ngram
listlengths
0
67.8k
[ "= os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path =", "os import torch from torch import nn from torch.utils.data.dataloader import DataLoader from src.data_process.dataset", "model = torch.load(save_path) model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model,", "import LMDataset from src.train.eval import eval from src.utils.constants import PAD_INDEX from src.utils.logger import", "'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data = LMDataset(processed_test_path) test_loader =", "processed_base_path = os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path", "from src.data_process.dataset import LMDataset from src.train.eval import eval from src.utils.constants import PAD_INDEX from", "= Logger(log_path) test_data = LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True )", "src.train.eval import eval from src.utils.constants import PAD_INDEX from src.utils.logger import Logger def test(args):", "import eval from src.utils.constants import PAD_INDEX from src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES']", "batch_size=args.batch_size, shuffle=False, pin_memory=True ) model = torch.load(save_path) model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX)", "os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data = LMDataset(processed_test_path) test_loader", "nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader, criterion) logger.log('test_loss: %.4f\\ttest_ppl: %.4f' % (test_loss, test_ppl))", "import nn from torch.utils.data.dataloader import DataLoader from src.data_process.dataset import LMDataset from src.train.eval import", "LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model = torch.load(save_path) model", "= DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model = torch.load(save_path) model = model.cuda()", "torch.load(save_path) model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader, criterion)", "import PAD_INDEX from src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path =", "test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path", ") model = torch.load(save_path) model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl =", "= os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data = LMDataset(processed_test_path)", "= os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data = LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data,", "logger = Logger(log_path) test_data = LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True", "os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path,", "Logger(log_path) test_data = LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model", "from torch import nn from torch.utils.data.dataloader import DataLoader from src.data_process.dataset import LMDataset from", "DataLoader from src.data_process.dataset import LMDataset from src.train.eval import eval from src.utils.constants import PAD_INDEX", "log_path = os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data = LMDataset(processed_test_path) test_loader = DataLoader(", "def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed')", "torch import nn from torch.utils.data.dataloader import DataLoader from src.data_process.dataset import LMDataset from src.train.eval", "src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path", "pin_memory=True ) model = torch.load(save_path) model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl", "from src.utils.constants import PAD_INDEX from src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)", "os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path =", "import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path =", "model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader, criterion) logger.log('test_loss:", "criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader, criterion) logger.log('test_loss: %.4f\\ttest_ppl: %.4f' %", "from src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data', args.data)", "eval from src.utils.constants import PAD_INDEX from src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] =", "os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data = LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size,", "= os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path =", "os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path = os.path.join(log_base_path,", "os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path,", "= model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader, criterion) logger.log('test_loss: %.4f\\ttest_ppl:", "= torch.load(save_path) model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader,", "PAD_INDEX from src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data',", "str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz')", "os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path)", "nn from torch.utils.data.dataloader import DataLoader from src.data_process.dataset import LMDataset from src.train.eval import eval", "Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path = os.path.join(base_path,", "torch.utils.data.dataloader import DataLoader from src.data_process.dataset import LMDataset from src.train.eval import eval from src.utils.constants", "'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt')", "import DataLoader from src.data_process.dataset import LMDataset from src.train.eval import eval from src.utils.constants import", "model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader, criterion) logger.log('test_loss: %.4f\\ttest_ppl: %.4f'", "log_base_path = os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data =", "save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger", "base_path = os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path", "shuffle=False, pin_memory=True ) model = torch.load(save_path) model = model.cuda() criterion = nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss,", "'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger = Logger(log_path) test_data", "src.utils.constants import PAD_INDEX from src.utils.logger import Logger def test(args): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) base_path", "LMDataset from src.train.eval import eval from src.utils.constants import PAD_INDEX from src.utils.logger import Logger", "test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model = torch.load(save_path) model =", "'test_log.txt') logger = Logger(log_path) test_data = LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False,", "test_data = LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model =", "= os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path = os.path.join(log_base_path, 'test_log.txt') logger =", "= nn.CrossEntropyLoss(ignore_index=PAD_INDEX) test_loss, test_ppl = eval(model, test_loader, criterion) logger.log('test_loss: %.4f\\ttest_ppl: %.4f' % (test_loss,", "src.data_process.dataset import LMDataset from src.train.eval import eval from src.utils.constants import PAD_INDEX from src.utils.logger", "DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model = torch.load(save_path) model = model.cuda() criterion", "dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model = torch.load(save_path) model = model.cuda() criterion =", "from torch.utils.data.dataloader import DataLoader from src.data_process.dataset import LMDataset from src.train.eval import eval from", "'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log')", "import torch from torch import nn from torch.utils.data.dataloader import DataLoader from src.data_process.dataset import", "args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl')", "torch from torch import nn from torch.utils.data.dataloader import DataLoader from src.data_process.dataset import LMDataset", "= LMDataset(processed_test_path) test_loader = DataLoader( dataset=test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True ) model = torch.load(save_path)", "import os import torch from torch import nn from torch.utils.data.dataloader import DataLoader from", "= str(args.gpu) base_path = os.path.join('./data', args.data) processed_base_path = os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path,", "from src.train.eval import eval from src.utils.constants import PAD_INDEX from src.utils.logger import Logger def", "<filename>src/train/test.py<gh_stars>1-10 import os import torch from torch import nn from torch.utils.data.dataloader import DataLoader", "= os.path.join(base_path, 'processed') processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path =", "processed_test_path = os.path.join(processed_base_path, 'test.npz') save_path = os.path.join(processed_base_path, 'rnnlm.pkl') log_base_path = os.path.join(base_path, 'log') log_path" ]
[ "resp = '' result_size = 0 try: utcnow = datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(),", "'', line_no_formatting) line_noquotes = re.sub(r'\"', '', line_noprefix) line_end_format = re.sub(r'(&.*?)$', '', line_noquotes) return", "fallback_json result_size = 0 return result_size def run_log(query_line, last_timestamp): # open queries and", "= -1 if exec_time != -1 and len(query_times) == 11: #and result_size >", "in results: # for entry in results.get(): if entry is not None: out.write(str(entry))", "as pbar: count = 0. last_timestamp = datetime.utcnow() for l_ in in_: count", "len(url_) == 1: request_url = url_[0] query_times = [] resp = '' result_size", "delta_last_query] query_clean = cleanup_query(request_url) res = str(query_clean + '\\t'+ str(time_vec) + '\\t' +", "last_timestamp) if len(results) > 40000: break if count == 19: count = 0", "datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds() for _ in range(11): response, exec_time", "log-url into readable sparql query Keyword-args: query -- log-url to clean ''' line_no_tabs", "-1 if exec_time != -1 and len(query_times) == 11: #and result_size > 0:", "11: #and result_size > 0: cold_exec_time = query_times[0] warm_times = query_times[1:] warm_mean =", "open(log_file) as in_, tqdm(total=40000) as pbar: count = 0. last_timestamp = datetime.utcnow() for", "= datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds() for _", "} } def run_http_request(req): '''Executes HTTP request to server and returns time Keyword-args:", "res = str(query_clean + '\\t'+ str(time_vec) + '\\t' + str(warm_mean) + '\\t' +", "= cleanup_query(request_url) res = str(query_clean + '\\t'+ str(time_vec) + '\\t' + str(warm_mean) +", "re.sub(r'\\++', '+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix = re.sub(r'.*query=', '', line_no_formatting)", "f: # #Spawn pool of workers to execute http queries # pool =", "# with open(log_file) as f: # #Spawn pool of workers to execute http", "-1.: results.append(res) with open(log_file + '-test2', 'a') as out: for entry in results:", "range(11): response, exec_time = run_http_request(request_url) # if exec_time == -1.: # break query_times.append(exec_time)", "exec_time == -1.: # break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query =", "\"Waiting for\", remaining, \"tasks to complete...\" # sys.stdout.flush() # time.sleep(10) with open(log_file) as", "get_result_size(respJson) except: exec_time = -1 if exec_time != -1 and len(query_times) == 11:", "Pool() # results = pool.map_async(run_log, f,1) # pool.close() # while not results.ready(): #", "complete...\" # sys.stdout.flush() # time.sleep(10) with open(log_file) as in_, tqdm(total=40000) as pbar: count", "datetime, time fallback_json = { \"head\": { \"link\": [], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\",", "Pool import re, sys, requests, random, json import time as timesleep import numpy", "time1 def cleanup_query(query): '''Cleans log-url into readable sparql query Keyword-args: query -- log-url", "url_[0] query_times = [] resp = '' result_size = 0 try: utcnow =", "pbar: count = 0. last_timestamp = datetime.utcnow() for l_ in in_: count +=", "def cleanup_query(query): '''Cleans log-url into readable sparql query Keyword-args: query -- log-url to", "line_noprefix = re.sub(r'.*query=', '', line_no_formatting) line_noquotes = re.sub(r'\"', '', line_noprefix) line_end_format = re.sub(r'(&.*?)$',", "time Keyword-args: req -- sparql query in url formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890'", "= query_times[0] warm_times = query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query, delta_last_query]", "result_size = len(response['results']['bindings']) except: # respJson = fallback_json result_size = 0 return result_size", "'\\t' + str(cold_exec_time) + '\\t' + str(result_size) + '\\n') return (res, last_timestamp_new) else:", "line_single_spaces) line_noprefix = re.sub(r'.*query=', '', line_no_formatting) line_noquotes = re.sub(r'\"', '', line_noprefix) line_end_format =", "= fallback_json result_size = 0 return result_size def run_log(query_line, last_timestamp): # open queries", "readable sparql query Keyword-args: query -- log-url to clean ''' line_no_tabs = re.sub(r'%09|%0B',", "== 1: request_url = url_[0] query_times = [] resp = '' result_size =", "= np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query, delta_last_query] query_clean = cleanup_query(request_url) res = str(query_clean", "\"propertyVal\", \"propertyValLabel\"] }, \"results\": { \"distinct\": False, \"ordered\": True, \"bindings\": [ ] }", "import time as timesleep import numpy as np from tqdm import * from", "log-url to clean ''' line_no_tabs = re.sub(r'%09|%0B', '+', query) line_single_spaces = re.sub(r'\\++', '+',", "'', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size = len(response['results']['bindings']) except: # respJson", "0: cold_exec_time = query_times[0] warm_times = query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64) time_vec =", "entry in results.get(): if entry is not None: out.write(str(entry)) if __name__ == '__main__':", "- midnight_utc).total_seconds()) respJson = response.json() result_size = get_result_size(respJson) except: exec_time = -1 if", "midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds() for _ in range(11):", "time as timesleep import numpy as np from tqdm import * from urlparse", "from urlparse import urlparse, parse_qs import urllib from datetime import datetime, time fallback_json", "urlparse import urlparse, parse_qs import urllib from datetime import datetime, time fallback_json =", "= response.json() result_size = get_result_size(respJson) except: exec_time = -1 if exec_time != -1", "if exec_time == -1.: # break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query", "= url_[0] query_times = [] resp = '' result_size = 0 try: utcnow", "timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds()) respJson = response.json() result_size = get_result_size(respJson) except: exec_time", "exec_time = run_http_request(request_url) # if exec_time == -1.: # break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1)", "main(): results = [] log_file = 'database.log' # with open(log_file) as f: #", "import * from urlparse import urlparse, parse_qs import urllib from datetime import datetime,", "return result_size def run_log(query_line, last_timestamp): # open queries and regex for links url_", "datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds() for _ in", "def run_log(query_line, last_timestamp): # open queries and regex for links url_ = re.findall('\"GET", "in results.get(): if entry is not None: out.write(str(entry)) if __name__ == '__main__': main()", "queries # pool = Pool() # results = pool.map_async(run_log, f,1) # pool.close() #", "'&format=json' t0 = datetime.utcnow() # make call and measure time taken resp =", "+ str(cold_exec_time) + '\\t' + str(result_size) + '\\n') return (res, last_timestamp_new) else: return", "= run_http_request(request_url) # if exec_time == -1.: # break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new", "from tqdm import * from urlparse import urlparse, parse_qs import urllib from datetime", "while not results.ready(): # remaining = results._number_left # print \"Waiting for\", remaining, \"tasks", "line_single_spaces = re.sub(r'\\++', '+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix = re.sub(r'.*query=',", "entry in results: # for entry in results.get(): if entry is not None:", "'a') as out: for entry in results: # for entry in results.get(): if", "\"link\": [], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\": { \"distinct\": False, \"ordered\":", "HTTP request to server and returns time Keyword-args: req -- sparql query in", "resp, time1 def cleanup_query(query): '''Cleans log-url into readable sparql query Keyword-args: query --", "results = pool.map_async(run_log, f,1) # pool.close() # while not results.ready(): # remaining =", "query Keyword-args: query -- log-url to clean ''' line_no_tabs = re.sub(r'%09|%0B', '+', query)", "run_http_request(req): '''Executes HTTP request to server and returns time Keyword-args: req -- sparql", "result_size = 0 try: utcnow = datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query =", "server and returns time Keyword-args: req -- sparql query in url formatting '''", "return (res, last_timestamp_new) else: return (-1., last_timestamp_new) else: return (-1., last_timestamp_new) def main():", "{ \"distinct\": False, \"ordered\": True, \"bindings\": [ ] } } def run_http_request(req): '''Executes", "time taken resp = requests.get(url) time1 = (datetime.utcnow() - t0).total_seconds() return resp, time1", "try: utcnow = datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds()", "execute http queries # pool = Pool() # results = pool.map_async(run_log, f,1) #", "time fallback_json = { \"head\": { \"link\": [], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"]", "last_timestamp): # open queries and regex for links url_ = re.findall('\"GET (.*?) HTTP',", "\"propertyValLabel\"] }, \"results\": { \"distinct\": False, \"ordered\": True, \"bindings\": [ ] } }", "sys.stdout.flush() # time.sleep(10) with open(log_file) as in_, tqdm(total=40000) as pbar: count = 0.", "import datetime, time fallback_json = { \"head\": { \"link\": [], \"vars\": [\"property\", \"propertyLabel\",", "(-1., last_timestamp_new) else: return (-1., last_timestamp_new) def main(): results = [] log_file =", "of workers to execute http queries # pool = Pool() # results =", "query_clean = cleanup_query(request_url) res = str(query_clean + '\\t'+ str(time_vec) + '\\t' + str(warm_mean)", "'+', query) line_single_spaces = re.sub(r'\\++', '+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix", "numpy as np from tqdm import * from urlparse import urlparse, parse_qs import", "= Pool() # results = pool.map_async(run_log, f,1) # pool.close() # while not results.ready():", "[], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\": { \"distinct\": False, \"ordered\": True,", "last_timestamp_new) def main(): results = [] log_file = 'database.log' # with open(log_file) as", "[\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\": { \"distinct\": False, \"ordered\": True, \"bindings\": [", "[] resp = '' result_size = 0 try: utcnow = datetime.utcnow() midnight_utc =", "make call and measure time taken resp = requests.get(url) time1 = (datetime.utcnow() -", "measure time taken resp = requests.get(url) time1 = (datetime.utcnow() - t0).total_seconds() return resp,", "to complete...\" # sys.stdout.flush() # time.sleep(10) with open(log_file) as in_, tqdm(total=40000) as pbar:", "timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds()) respJson = response.json() result_size", "= 'database.log' # with open(log_file) as f: # #Spawn pool of workers to", "return resp, time1 def cleanup_query(query): '''Cleans log-url into readable sparql query Keyword-args: query", "'', line_single_spaces) line_noprefix = re.sub(r'.*query=', '', line_no_formatting) line_noquotes = re.sub(r'\"', '', line_noprefix) line_end_format", "query_times = [] resp = '' result_size = 0 try: utcnow = datetime.utcnow()", "out: for entry in results: # for entry in results.get(): if entry is", "to clean ''' line_no_tabs = re.sub(r'%09|%0B', '+', query) line_single_spaces = re.sub(r'\\++', '+', line_no_tabs)", "run_log(query_line, last_timestamp): # open queries and regex for links url_ = re.findall('\"GET (.*?)", "query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query, delta_last_query] query_clean = cleanup_query(request_url) res", "\"distinct\": False, \"ordered\": True, \"bindings\": [ ] } } def run_http_request(req): '''Executes HTTP", "[] log_file = 'database.log' # with open(log_file) as f: # #Spawn pool of", "results._number_left # print \"Waiting for\", remaining, \"tasks to complete...\" # sys.stdout.flush() # time.sleep(10)", "(.*?) HTTP', query_line) last_timestamp_new = datetime.utcnow() if len(url_) == 1: request_url = url_[0]", "# remaining = results._number_left # print \"Waiting for\", remaining, \"tasks to complete...\" #", "== -1.: # break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query = ((last_timestamp_new", "time1 = (datetime.utcnow() - t0).total_seconds() return resp, time1 def cleanup_query(query): '''Cleans log-url into", "datetime.utcnow() timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds()) respJson = response.json() result_size = get_result_size(respJson) except:", "}, \"results\": { \"distinct\": False, \"ordered\": True, \"bindings\": [ ] } } def", "Keyword-args: req -- sparql query in url formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890' +", "np from tqdm import * from urlparse import urlparse, parse_qs import urllib from", "get_result_size(response): try: result_size = len(response['results']['bindings']) except: # respJson = fallback_json result_size = 0", "req -- sparql query in url formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890' + req", "fallback_json = { \"head\": { \"link\": [], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] },", "== 11: #and result_size > 0: cold_exec_time = query_times[0] warm_times = query_times[1:] warm_mean", "cleanup_query(query): '''Cleans log-url into readable sparql query Keyword-args: query -- log-url to clean", "0. last_timestamp = datetime.utcnow() for l_ in in_: count += 1 res, last_timestamp", "> 0: cold_exec_time = query_times[0] warm_times = query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64) time_vec", "# sys.stdout.flush() # time.sleep(10) with open(log_file) as in_, tqdm(total=40000) as pbar: count =", "\"ordered\": True, \"bindings\": [ ] } } def run_http_request(req): '''Executes HTTP request to", "count = 0. last_timestamp = datetime.utcnow() for l_ in in_: count += 1", "to server and returns time Keyword-args: req -- sparql query in url formatting", "-1.: # break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query = ((last_timestamp_new -", "try: result_size = len(response['results']['bindings']) except: # respJson = fallback_json result_size = 0 return", "datetime.utcnow() for l_ in in_: count += 1 res, last_timestamp = run_log(l_, last_timestamp)", "utcnow = datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds() for", "0 try: utcnow = datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow() -", "urlparse, parse_qs import urllib from datetime import datetime, time fallback_json = { \"head\":", "requests, random, json import time as timesleep import numpy as np from tqdm", "dtype=np.float64) time_vec = [timestamp_query, delta_last_query] query_clean = cleanup_query(request_url) res = str(query_clean + '\\t'+", "return (-1., last_timestamp_new) def main(): results = [] log_file = 'database.log' # with", "# results = pool.map_async(run_log, f,1) # pool.close() # while not results.ready(): # remaining", "# break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds())", "= results._number_left # print \"Waiting for\", remaining, \"tasks to complete...\" # sys.stdout.flush() #", "sys, requests, random, json import time as timesleep import numpy as np from", "def get_result_size(response): try: result_size = len(response['results']['bindings']) except: # respJson = fallback_json result_size =", "!= -1.: results.append(res) with open(log_file + '-test2', 'a') as out: for entry in", "exec_time = -1 if exec_time != -1 and len(query_times) == 11: #and result_size", "np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query, delta_last_query] query_clean = cleanup_query(request_url) res = str(query_clean +", "= 0 return result_size def run_log(query_line, last_timestamp): # open queries and regex for", "warm_mean = np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query, delta_last_query] query_clean = cleanup_query(request_url) res =", "len(response['results']['bindings']) except: # respJson = fallback_json result_size = 0 return result_size def run_log(query_line,", "count == 19: count = 0 pbar.update(19) sys.stdout.flush() if res != -1.: results.append(res)", "if res != -1.: results.append(res) with open(log_file + '-test2', 'a') as out: for", "last_timestamp).total_seconds() for _ in range(11): response, exec_time = run_http_request(request_url) # if exec_time ==", "line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size = len(response['results']['bindings']) except: # respJson =", "'', line_noprefix) line_end_format = re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size", "<reponame>derdav3/tf-sparql<filename>database_build/dbpedia_run_log_http.py<gh_stars>1-10 from multiprocessing import Pool import re, sys, requests, random, json import time", "re.sub(r'.*query=', '', line_no_formatting) line_noquotes = re.sub(r'\"', '', line_noprefix) line_end_format = re.sub(r'(&.*?)$', '', line_noquotes)", "pbar.update(19) sys.stdout.flush() if res != -1.: results.append(res) with open(log_file + '-test2', 'a') as", "else: return (-1., last_timestamp_new) else: return (-1., last_timestamp_new) def main(): results = []", "result_size > 0: cold_exec_time = query_times[0] warm_times = query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64)", "from datetime import datetime, time fallback_json = { \"head\": { \"link\": [], \"vars\":", "sparql query Keyword-args: query -- log-url to clean ''' line_no_tabs = re.sub(r'%09|%0B', '+',", "0 return result_size def run_log(query_line, last_timestamp): # open queries and regex for links", "as out: for entry in results: # for entry in results.get(): if entry", "warm_times = query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query, delta_last_query] query_clean =", "line_noquotes = re.sub(r'\"', '', line_noprefix) line_end_format = re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def", "response, exec_time = run_http_request(request_url) # if exec_time == -1.: # break query_times.append(exec_time) #", "cleanup_query(request_url) res = str(query_clean + '\\t'+ str(time_vec) + '\\t' + str(warm_mean) + '\\t'", "for links url_ = re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new = datetime.utcnow() if len(url_)", "= re.sub(r'%09|%0B', '+', query) line_single_spaces = re.sub(r'\\++', '+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '',", "[timestamp_query, delta_last_query] query_clean = cleanup_query(request_url) res = str(query_clean + '\\t'+ str(time_vec) + '\\t'", "links url_ = re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new = datetime.utcnow() if len(url_) ==", "= { \"head\": { \"link\": [], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\":", "for entry in results: # for entry in results.get(): if entry is not", "re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new = datetime.utcnow() if len(url_) == 1: request_url =", "last_timestamp = datetime.utcnow() for l_ in in_: count += 1 res, last_timestamp =", "= ((last_timestamp_new - midnight_utc).total_seconds()) respJson = response.json() result_size = get_result_size(respJson) except: exec_time =", "= re.sub(r'.*query=', '', line_no_formatting) line_noquotes = re.sub(r'\"', '', line_noprefix) line_end_format = re.sub(r'(&.*?)$', '',", "in url formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json' t0 =", "'' result_size = 0 try: utcnow = datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query", "# respJson = fallback_json result_size = 0 return result_size def run_log(query_line, last_timestamp): #", "# while not results.ready(): # remaining = results._number_left # print \"Waiting for\", remaining,", "'\\n') return (res, last_timestamp_new) else: return (-1., last_timestamp_new) else: return (-1., last_timestamp_new) def", "_ in range(11): response, exec_time = run_http_request(request_url) # if exec_time == -1.: #", "= re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new = datetime.utcnow() if len(url_) == 1: request_url", "res != -1.: results.append(res) with open(log_file + '-test2', 'a') as out: for entry", "results: # for entry in results.get(): if entry is not None: out.write(str(entry)) if", "datetime.utcnow() if len(url_) == 1: request_url = url_[0] query_times = [] resp =", "query) line_single_spaces = re.sub(r'\\++', '+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix =", "'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json' t0 = datetime.utcnow() # make call and measure", "remaining = results._number_left # print \"Waiting for\", remaining, \"tasks to complete...\" # sys.stdout.flush()", "results = [] log_file = 'database.log' # with open(log_file) as f: # #Spawn", "return (-1., last_timestamp_new) else: return (-1., last_timestamp_new) def main(): results = [] log_file", "result_size = 0 return result_size def run_log(query_line, last_timestamp): # open queries and regex", "in_, tqdm(total=40000) as pbar: count = 0. last_timestamp = datetime.utcnow() for l_ in", "taken resp = requests.get(url) time1 = (datetime.utcnow() - t0).total_seconds() return resp, time1 def", "print \"Waiting for\", remaining, \"tasks to complete...\" # sys.stdout.flush() # time.sleep(10) with open(log_file)", "= requests.get(url) time1 = (datetime.utcnow() - t0).total_seconds() return resp, time1 def cleanup_query(query): '''Cleans", "formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json' t0 = datetime.utcnow() #", "#Spawn pool of workers to execute http queries # pool = Pool() #", "and len(query_times) == 11: #and result_size > 0: cold_exec_time = query_times[0] warm_times =", "= datetime.utcnow() # make call and measure time taken resp = requests.get(url) time1", "multiprocessing import Pool import re, sys, requests, random, json import time as timesleep", "run_http_request(request_url) # if exec_time == -1.: # break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new =", "f,1) # pool.close() # while not results.ready(): # remaining = results._number_left # print", "if exec_time != -1 and len(query_times) == 11: #and result_size > 0: cold_exec_time", "line_no_formatting) line_noquotes = re.sub(r'\"', '', line_noprefix) line_end_format = re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii'))", "datetime.utcnow() # make call and measure time taken resp = requests.get(url) time1 =", "pool of workers to execute http queries # pool = Pool() # results", "= query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query, delta_last_query] query_clean = cleanup_query(request_url)", "for l_ in in_: count += 1 res, last_timestamp = run_log(l_, last_timestamp) if", "and measure time taken resp = requests.get(url) time1 = (datetime.utcnow() - t0).total_seconds() return", "re, sys, requests, random, json import time as timesleep import numpy as np", "urllib from datetime import datetime, time fallback_json = { \"head\": { \"link\": [],", "url = 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json' t0 = datetime.utcnow() # make call", "'\\t'+ str(time_vec) + '\\t' + str(warm_mean) + '\\t' + str(cold_exec_time) + '\\t' +", "+= 1 res, last_timestamp = run_log(l_, last_timestamp) if len(results) > 40000: break if", "delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds() for _ in range(11): response, exec_time = run_http_request(request_url)", "if count == 19: count = 0 pbar.update(19) sys.stdout.flush() if res != -1.:", "respJson = fallback_json result_size = 0 return result_size def run_log(query_line, last_timestamp): # open", "request to server and returns time Keyword-args: req -- sparql query in url", "= re.sub(r'\\++', '+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix = re.sub(r'.*query=', '',", "resp = requests.get(url) time1 = (datetime.utcnow() - t0).total_seconds() return resp, time1 def cleanup_query(query):", "for _ in range(11): response, exec_time = run_http_request(request_url) # if exec_time == -1.:", "def main(): results = [] log_file = 'database.log' # with open(log_file) as f:", "= datetime.utcnow() timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds()) respJson = response.json() result_size = get_result_size(respJson)", "False, \"ordered\": True, \"bindings\": [ ] } } def run_http_request(req): '''Executes HTTP request", "str(warm_mean) + '\\t' + str(cold_exec_time) + '\\t' + str(result_size) + '\\n') return (res,", "from multiprocessing import Pool import re, sys, requests, random, json import time as", "as np from tqdm import * from urlparse import urlparse, parse_qs import urllib", "# pool.close() # while not results.ready(): # remaining = results._number_left # print \"Waiting", "import Pool import re, sys, requests, random, json import time as timesleep import", "= (datetime.utcnow() - t0).total_seconds() return resp, time1 def cleanup_query(query): '''Cleans log-url into readable", "# if exec_time == -1.: # break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow()", "= re.sub(r'\"', '', line_noprefix) line_end_format = re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response):", "response.json() result_size = get_result_size(respJson) except: exec_time = -1 if exec_time != -1 and", "# for entry in results.get(): if entry is not None: out.write(str(entry)) if __name__", "* from urlparse import urlparse, parse_qs import urllib from datetime import datetime, time", "datetime import datetime, time fallback_json = { \"head\": { \"link\": [], \"vars\": [\"property\",", "\"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\": { \"distinct\": False, \"ordered\": True, \"bindings\":", "last_timestamp_new = datetime.utcnow() if len(url_) == 1: request_url = url_[0] query_times = []", "# make call and measure time taken resp = requests.get(url) time1 = (datetime.utcnow()", "} def run_http_request(req): '''Executes HTTP request to server and returns time Keyword-args: req", "returns time Keyword-args: req -- sparql query in url formatting ''' url =", "1 res, last_timestamp = run_log(l_, last_timestamp) if len(results) > 40000: break if count", "req + '&format=json' t0 = datetime.utcnow() # make call and measure time taken", "= 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json' t0 = datetime.utcnow() # make call and", "query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds()) respJson =", "'database.log' # with open(log_file) as f: # #Spawn pool of workers to execute", "len(results) > 40000: break if count == 19: count = 0 pbar.update(19) sys.stdout.flush()", "+ '\\t'+ str(time_vec) + '\\t' + str(warm_mean) + '\\t' + str(cold_exec_time) + '\\t'", "re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix = re.sub(r'.*query=', '', line_no_formatting) line_noquotes = re.sub(r'\"', '', line_noprefix)", "re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size = len(response['results']['bindings']) except: #", "break query_times.append(exec_time) # timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds()) respJson", "parse_qs import urllib from datetime import datetime, time fallback_json = { \"head\": {", "= len(response['results']['bindings']) except: # respJson = fallback_json result_size = 0 return result_size def", "last_timestamp_new) else: return (-1., last_timestamp_new) else: return (-1., last_timestamp_new) def main(): results =", "str(query_clean + '\\t'+ str(time_vec) + '\\t' + str(warm_mean) + '\\t' + str(cold_exec_time) +", "] } } def run_http_request(req): '''Executes HTTP request to server and returns time", "time.sleep(10) with open(log_file) as in_, tqdm(total=40000) as pbar: count = 0. last_timestamp =", "return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size = len(response['results']['bindings']) except: # respJson = fallback_json", "1: request_url = url_[0] query_times = [] resp = '' result_size = 0", "= [timestamp_query, delta_last_query] query_clean = cleanup_query(request_url) res = str(query_clean + '\\t'+ str(time_vec) +", "= pool.map_async(run_log, f,1) # pool.close() # while not results.ready(): # remaining = results._number_left", "+ '\\t' + str(result_size) + '\\n') return (res, last_timestamp_new) else: return (-1., last_timestamp_new)", "line_noprefix) line_end_format = re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size =", "str(cold_exec_time) + '\\t' + str(result_size) + '\\n') return (res, last_timestamp_new) else: return (-1.,", "sparql query in url formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json'", "line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix = re.sub(r'.*query=', '', line_no_formatting) line_noquotes =", "+ '&format=json' t0 = datetime.utcnow() # make call and measure time taken resp", "'''Cleans log-url into readable sparql query Keyword-args: query -- log-url to clean '''", "midnight_utc).total_seconds()) respJson = response.json() result_size = get_result_size(respJson) except: exec_time = -1 if exec_time", "''' url = 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json' t0 = datetime.utcnow() # make", "!= -1 and len(query_times) == 11: #and result_size > 0: cold_exec_time = query_times[0]", "-1 and len(query_times) == 11: #and result_size > 0: cold_exec_time = query_times[0] warm_times", "= [] resp = '' result_size = 0 try: utcnow = datetime.utcnow() midnight_utc", "+ req + '&format=json' t0 = datetime.utcnow() # make call and measure time", "http queries # pool = Pool() # results = pool.map_async(run_log, f,1) # pool.close()", "random, json import time as timesleep import numpy as np from tqdm import", "if len(url_) == 1: request_url = url_[0] query_times = [] resp = ''", "= 0. last_timestamp = datetime.utcnow() for l_ in in_: count += 1 res,", "(datetime.utcnow() - t0).total_seconds() return resp, time1 def cleanup_query(query): '''Cleans log-url into readable sparql", "into readable sparql query Keyword-args: query -- log-url to clean ''' line_no_tabs =", "= 0 pbar.update(19) sys.stdout.flush() if res != -1.: results.append(res) with open(log_file + '-test2',", "last_timestamp_new) else: return (-1., last_timestamp_new) def main(): results = [] log_file = 'database.log'", "log_file = 'database.log' # with open(log_file) as f: # #Spawn pool of workers", "for entry in results.get(): if entry is not None: out.write(str(entry)) if __name__ ==", "run_log(l_, last_timestamp) if len(results) > 40000: break if count == 19: count =", "cold_exec_time = query_times[0] warm_times = query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query,", "== 19: count = 0 pbar.update(19) sys.stdout.flush() if res != -1.: results.append(res) with", "as f: # #Spawn pool of workers to execute http queries # pool", "\"head\": { \"link\": [], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\": { \"distinct\":", "requests.get(url) time1 = (datetime.utcnow() - t0).total_seconds() return resp, time1 def cleanup_query(query): '''Cleans log-url", "and regex for links url_ = re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new = datetime.utcnow()", "open queries and regex for links url_ = re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new", "'-test2', 'a') as out: for entry in results: # for entry in results.get():", "time_vec = [timestamp_query, delta_last_query] query_clean = cleanup_query(request_url) res = str(query_clean + '\\t'+ str(time_vec)", "= re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size = len(response['results']['bindings']) except:", "result_size def run_log(query_line, last_timestamp): # open queries and regex for links url_ =", "as timesleep import numpy as np from tqdm import * from urlparse import", "else: return (-1., last_timestamp_new) def main(): results = [] log_file = 'database.log' #", "19: count = 0 pbar.update(19) sys.stdout.flush() if res != -1.: results.append(res) with open(log_file", "if len(results) > 40000: break if count == 19: count = 0 pbar.update(19)", "= datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds() for _ in range(11): response,", "as in_, tqdm(total=40000) as pbar: count = 0. last_timestamp = datetime.utcnow() for l_", "last_timestamp = run_log(l_, last_timestamp) if len(results) > 40000: break if count == 19:", "pool.map_async(run_log, f,1) # pool.close() # while not results.ready(): # remaining = results._number_left #", "tqdm import * from urlparse import urlparse, parse_qs import urllib from datetime import", "respJson = response.json() result_size = get_result_size(respJson) except: exec_time = -1 if exec_time !=", "re.sub(r'\"', '', line_noprefix) line_end_format = re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try:", "# timesleep.sleep(random.random()*0.1) last_timestamp_new = datetime.utcnow() timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds()) respJson = response.json()", "(datetime.utcnow() - last_timestamp).total_seconds() for _ in range(11): response, exec_time = run_http_request(request_url) # if", "query in url formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json' t0", "remaining, \"tasks to complete...\" # sys.stdout.flush() # time.sleep(10) with open(log_file) as in_, tqdm(total=40000)", "time(0)) delta_last_query = (datetime.utcnow() - last_timestamp).total_seconds() for _ in range(11): response, exec_time =", "len(query_times) == 11: #and result_size > 0: cold_exec_time = query_times[0] warm_times = query_times[1:]", "[ ] } } def run_http_request(req): '''Executes HTTP request to server and returns", "line_no_tabs = re.sub(r'%09|%0B', '+', query) line_single_spaces = re.sub(r'\\++', '+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D',", "# pool = Pool() # results = pool.map_async(run_log, f,1) # pool.close() # while", "import numpy as np from tqdm import * from urlparse import urlparse, parse_qs", "with open(log_file) as f: # #Spawn pool of workers to execute http queries", "= run_log(l_, last_timestamp) if len(results) > 40000: break if count == 19: count", "str(result_size) + '\\n') return (res, last_timestamp_new) else: return (-1., last_timestamp_new) else: return (-1.,", "'+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix = re.sub(r'.*query=', '', line_no_formatting) line_noquotes", "0 pbar.update(19) sys.stdout.flush() if res != -1.: results.append(res) with open(log_file + '-test2', 'a')", "''' line_no_tabs = re.sub(r'%09|%0B', '+', query) line_single_spaces = re.sub(r'\\++', '+', line_no_tabs) line_no_formatting =", "import re, sys, requests, random, json import time as timesleep import numpy as", "queries and regex for links url_ = re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new =", "except: exec_time = -1 if exec_time != -1 and len(query_times) == 11: #and", "- t0).total_seconds() return resp, time1 def cleanup_query(query): '''Cleans log-url into readable sparql query", "open(log_file) as f: # #Spawn pool of workers to execute http queries #", "sys.stdout.flush() if res != -1.: results.append(res) with open(log_file + '-test2', 'a') as out:", "\"tasks to complete...\" # sys.stdout.flush() # time.sleep(10) with open(log_file) as in_, tqdm(total=40000) as", "and returns time Keyword-args: req -- sparql query in url formatting ''' url", "True, \"bindings\": [ ] } } def run_http_request(req): '''Executes HTTP request to server", "+ '\\t' + str(warm_mean) + '\\t' + str(cold_exec_time) + '\\t' + str(result_size) +", "import urllib from datetime import datetime, time fallback_json = { \"head\": { \"link\":", "= (datetime.utcnow() - last_timestamp).total_seconds() for _ in range(11): response, exec_time = run_http_request(request_url) #", "#and result_size > 0: cold_exec_time = query_times[0] warm_times = query_times[1:] warm_mean = np.mean(warm_times,", "t0).total_seconds() return resp, time1 def cleanup_query(query): '''Cleans log-url into readable sparql query Keyword-args:", "# open queries and regex for links url_ = re.findall('\"GET (.*?) HTTP', query_line)", "count += 1 res, last_timestamp = run_log(l_, last_timestamp) if len(results) > 40000: break", "= 0 try: utcnow = datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0)) delta_last_query = (datetime.utcnow()", "{ \"link\": [], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\": { \"distinct\": False,", "= datetime.utcnow() if len(url_) == 1: request_url = url_[0] query_times = [] resp", "query_line) last_timestamp_new = datetime.utcnow() if len(url_) == 1: request_url = url_[0] query_times =", "in in_: count += 1 res, last_timestamp = run_log(l_, last_timestamp) if len(results) >", "with open(log_file) as in_, tqdm(total=40000) as pbar: count = 0. last_timestamp = datetime.utcnow()", "'\\t' + str(result_size) + '\\n') return (res, last_timestamp_new) else: return (-1., last_timestamp_new) else:", "= '' result_size = 0 try: utcnow = datetime.utcnow() midnight_utc = datetime.combine(utcnow.date(), time(0))", "str(time_vec) + '\\t' + str(warm_mean) + '\\t' + str(cold_exec_time) + '\\t' + str(result_size)", "url_ = re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new = datetime.utcnow() if len(url_) == 1:", "last_timestamp_new = datetime.utcnow() timestamp_query = ((last_timestamp_new - midnight_utc).total_seconds()) respJson = response.json() result_size =", "def run_http_request(req): '''Executes HTTP request to server and returns time Keyword-args: req --", "HTTP', query_line) last_timestamp_new = datetime.utcnow() if len(url_) == 1: request_url = url_[0] query_times", "in range(11): response, exec_time = run_http_request(request_url) # if exec_time == -1.: # break", "line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix = re.sub(r'.*query=', '', line_no_formatting) line_noquotes = re.sub(r'\"',", "except: # respJson = fallback_json result_size = 0 return result_size def run_log(query_line, last_timestamp):", "count = 0 pbar.update(19) sys.stdout.flush() if res != -1.: results.append(res) with open(log_file +", "# print \"Waiting for\", remaining, \"tasks to complete...\" # sys.stdout.flush() # time.sleep(10) with", "= datetime.utcnow() for l_ in in_: count += 1 res, last_timestamp = run_log(l_,", "- last_timestamp).total_seconds() for _ in range(11): response, exec_time = run_http_request(request_url) # if exec_time", "results.ready(): # remaining = results._number_left # print \"Waiting for\", remaining, \"tasks to complete...\"", "not results.ready(): # remaining = results._number_left # print \"Waiting for\", remaining, \"tasks to", "= [] log_file = 'database.log' # with open(log_file) as f: # #Spawn pool", "results.append(res) with open(log_file + '-test2', 'a') as out: for entry in results: #", "result_size = get_result_size(respJson) except: exec_time = -1 if exec_time != -1 and len(query_times)", "Keyword-args: query -- log-url to clean ''' line_no_tabs = re.sub(r'%09|%0B', '+', query) line_single_spaces", "call and measure time taken resp = requests.get(url) time1 = (datetime.utcnow() - t0).total_seconds()", "clean ''' line_no_tabs = re.sub(r'%09|%0B', '+', query) line_single_spaces = re.sub(r'\\++', '+', line_no_tabs) line_no_formatting", "+ '-test2', 'a') as out: for entry in results: # for entry in", "\"results\": { \"distinct\": False, \"ordered\": True, \"bindings\": [ ] } } def run_http_request(req):", "res, last_timestamp = run_log(l_, last_timestamp) if len(results) > 40000: break if count ==", "'''Executes HTTP request to server and returns time Keyword-args: req -- sparql query", "re.sub(r'%09|%0B', '+', query) line_single_spaces = re.sub(r'\\++', '+', line_no_tabs) line_no_formatting = re.sub(r'%0A|%0D', '', line_single_spaces)", "((last_timestamp_new - midnight_utc).total_seconds()) respJson = response.json() result_size = get_result_size(respJson) except: exec_time = -1", "workers to execute http queries # pool = Pool() # results = pool.map_async(run_log,", "(-1., last_timestamp_new) def main(): results = [] log_file = 'database.log' # with open(log_file)", "= re.sub(r'%0A|%0D', '', line_single_spaces) line_noprefix = re.sub(r'.*query=', '', line_no_formatting) line_noquotes = re.sub(r'\"', '',", "-- log-url to clean ''' line_no_tabs = re.sub(r'%09|%0B', '+', query) line_single_spaces = re.sub(r'\\++',", "request_url = url_[0] query_times = [] resp = '' result_size = 0 try:", "regex for links url_ = re.findall('\"GET (.*?) HTTP', query_line) last_timestamp_new = datetime.utcnow() if", "+ '\\n') return (res, last_timestamp_new) else: return (-1., last_timestamp_new) else: return (-1., last_timestamp_new)", "# time.sleep(10) with open(log_file) as in_, tqdm(total=40000) as pbar: count = 0. last_timestamp", "with open(log_file + '-test2', 'a') as out: for entry in results: # for", "'\\t' + str(warm_mean) + '\\t' + str(cold_exec_time) + '\\t' + str(result_size) + '\\n')", "l_ in in_: count += 1 res, last_timestamp = run_log(l_, last_timestamp) if len(results)", "timesleep import numpy as np from tqdm import * from urlparse import urlparse,", "import urlparse, parse_qs import urllib from datetime import datetime, time fallback_json = {", "json import time as timesleep import numpy as np from tqdm import *", "= str(query_clean + '\\t'+ str(time_vec) + '\\t' + str(warm_mean) + '\\t' + str(cold_exec_time)", "to execute http queries # pool = Pool() # results = pool.map_async(run_log, f,1)", "+ str(warm_mean) + '\\t' + str(cold_exec_time) + '\\t' + str(result_size) + '\\n') return", "pool.close() # while not results.ready(): # remaining = results._number_left # print \"Waiting for\",", "= get_result_size(respJson) except: exec_time = -1 if exec_time != -1 and len(query_times) ==", "+ str(result_size) + '\\n') return (res, last_timestamp_new) else: return (-1., last_timestamp_new) else: return", "break if count == 19: count = 0 pbar.update(19) sys.stdout.flush() if res !=", "\"bindings\": [ ] } } def run_http_request(req): '''Executes HTTP request to server and", "exec_time != -1 and len(query_times) == 11: #and result_size > 0: cold_exec_time =", "for\", remaining, \"tasks to complete...\" # sys.stdout.flush() # time.sleep(10) with open(log_file) as in_,", "{ \"head\": { \"link\": [], \"vars\": [\"property\", \"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\": {", "query -- log-url to clean ''' line_no_tabs = re.sub(r'%09|%0B', '+', query) line_single_spaces =", "\"propertyLabel\", \"propertyVal\", \"propertyValLabel\"] }, \"results\": { \"distinct\": False, \"ordered\": True, \"bindings\": [ ]", "# #Spawn pool of workers to execute http queries # pool = Pool()", "url formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890' + req + '&format=json' t0 = datetime.utcnow()", "(res, last_timestamp_new) else: return (-1., last_timestamp_new) else: return (-1., last_timestamp_new) def main(): results", "urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size = len(response['results']['bindings']) except: # respJson = fallback_json result_size", "40000: break if count == 19: count = 0 pbar.update(19) sys.stdout.flush() if res", "-- sparql query in url formatting ''' url = 'http://claudio11.ifi.uzh.ch:8890' + req +", "tqdm(total=40000) as pbar: count = 0. last_timestamp = datetime.utcnow() for l_ in in_:", "pool = Pool() # results = pool.map_async(run_log, f,1) # pool.close() # while not", "> 40000: break if count == 19: count = 0 pbar.update(19) sys.stdout.flush() if", "t0 = datetime.utcnow() # make call and measure time taken resp = requests.get(url)", "query_times[0] warm_times = query_times[1:] warm_mean = np.mean(warm_times, dtype=np.float64) time_vec = [timestamp_query, delta_last_query] query_clean", "open(log_file + '-test2', 'a') as out: for entry in results: # for entry", "in_: count += 1 res, last_timestamp = run_log(l_, last_timestamp) if len(results) > 40000:", "line_end_format = re.sub(r'(&.*?)$', '', line_noquotes) return urllib.unquote_plus(line_end_format.encode('ascii')) def get_result_size(response): try: result_size = len(response['results']['bindings'])", "+ '\\t' + str(cold_exec_time) + '\\t' + str(result_size) + '\\n') return (res, last_timestamp_new)" ]
[ "\"Web interface for Passari workflow\" ) LONG_DESCRIPTION = DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL", "install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\",", "import setup, find_packages NAME = \"passari_web_ui\" DESCRIPTION = ( \"Web interface for Passari", "\"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\"", "\"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[", "\"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[ \"Development", "\"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[ \"Development Status :: 5 -", "\"build_sphinx\": { \"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\") } }, setup_requires=[\"setuptools_scm\", \"sphinx\", \"sphinxcontrib-apidoc\"],", "{ \"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\") } }, setup_requires=[\"setuptools_scm\", \"sphinx\", \"sphinxcontrib-apidoc\"], extras_require={", "\"Development Status :: 5 - Production/Stable\", \"Operating System :: POSIX :: Linux\", \"Programming", "\"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[ \"Development Status ::", "include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\",", "\"flask-talisman\", \"arrow\" ], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Operating System ::", "\"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ],", "setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\",", "for Passari workflow\" ) LONG_DESCRIPTION = DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\"", ":: OSI Approved :: MIT License\", \"Framework :: Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={", ":: POSIX :: Linux\", \"Programming Language :: Python :: 3\", \"Programming Language ::", "\"License :: OSI Approved :: MIT License\", \"Framework :: Flask\", ], python_requires=\">=3.6\", use_scm_version=True,", "POSIX :: Linux\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python", "setup, find_packages NAME = \"passari_web_ui\" DESCRIPTION = ( \"Web interface for Passari workflow\"", "= DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR,", ":: 5 - Production/Stable\", \"Operating System :: POSIX :: Linux\", \"Programming Language ::", "\"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Operating", "description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\",", "find_packages NAME = \"passari_web_ui\" DESCRIPTION = ( \"Web interface for Passari workflow\" )", "AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"},", "(\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\") } }, setup_requires=[\"setuptools_scm\", \"sphinx\", \"sphinxcontrib-apidoc\"], extras_require={ \"sphinx\": [\"sphinxcontrib-apidoc\"]", ":: Linux\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python ::", "= ( \"Web interface for Passari workflow\" ) LONG_DESCRIPTION = DESCRIPTION AUTHOR =", "\"Programming Language :: Python :: 3.6\", \"License :: OSI Approved :: MIT License\",", "Python :: 3.6\", \"License :: OSI Approved :: MIT License\", \"Framework :: Flask\",", "AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"),", "Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": { \"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\")", "use_scm_version=True, command_options={ \"build_sphinx\": { \"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\") } }, setup_requires=[\"setuptools_scm\",", "package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\",", ":: Python :: 3.6\", \"License :: OSI Approved :: MIT License\", \"Framework ::", "classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Operating System :: POSIX :: Linux\",", ":: 3\", \"Programming Language :: Python :: 3.6\", \"License :: OSI Approved ::", "\"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\":", "\"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[ \"Development Status", ") LONG_DESCRIPTION = DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION,", "DESCRIPTION = ( \"Web interface for Passari workflow\" ) LONG_DESCRIPTION = DESCRIPTION AUTHOR", "packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\",", "Passari workflow\" ) LONG_DESCRIPTION = DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup(", "\"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\",", "Production/Stable\", \"Operating System :: POSIX :: Linux\", \"Programming Language :: Python :: 3\",", "workflow\" ) LONG_DESCRIPTION = DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME,", "Language :: Python :: 3\", \"Programming Language :: Python :: 3.6\", \"License ::", "\"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.6\", \"License", "NAME), \"source_dir\": (\"setup.py\", \"docs\") } }, setup_requires=[\"setuptools_scm\", \"sphinx\", \"sphinxcontrib-apidoc\"], extras_require={ \"sphinx\": [\"sphinxcontrib-apidoc\"] }", "\"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\",", "\"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[ \"Development Status :: 5", "from setuptools import setup, find_packages NAME = \"passari_web_ui\" DESCRIPTION = ( \"Web interface", "= \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[", "\"bcrypt\", \"Flask-SQLAlchemy\", \"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[ \"Development Status :: 5 - Production/Stable\",", "\"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\", \"rq-dashboard>=0.6\", \"toml\", \"bcrypt\", \"Flask-SQLAlchemy\",", "], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Operating System :: POSIX ::", ":: 3.6\", \"License :: OSI Approved :: MIT License\", \"Framework :: Flask\", ],", "author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\", \"rq>=1\",", "DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL,", "\"Operating System :: POSIX :: Linux\", \"Programming Language :: Python :: 3\", \"Programming", "3.6\", \"License :: OSI Approved :: MIT License\", \"Framework :: Flask\", ], python_requires=\">=3.6\",", "= \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True,", "Linux\", \"Programming Language :: Python :: 3\", \"Programming Language :: Python :: 3.6\",", "\"Framework :: Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": { \"project\": (\"setup.py\", NAME), \"source_dir\":", "OSI Approved :: MIT License\", \"Framework :: Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\":", "NAME = \"passari_web_ui\" DESCRIPTION = ( \"Web interface for Passari workflow\" ) LONG_DESCRIPTION", "- Production/Stable\", \"Operating System :: POSIX :: Linux\", \"Programming Language :: Python ::", "python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": { \"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\") } },", "command_options={ \"build_sphinx\": { \"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\") } }, setup_requires=[\"setuptools_scm\", \"sphinx\",", "\"Flask-WTF\", \"flask-talisman\", \"arrow\" ], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Operating System", ":: Python :: 3\", \"Programming Language :: Python :: 3.6\", \"License :: OSI", "MIT License\", \"Framework :: Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": { \"project\": (\"setup.py\",", "interface for Passari workflow\" ) LONG_DESCRIPTION = DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL =", "], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": { \"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\") }", "\"arrow\" ], classifiers=[ \"Development Status :: 5 - Production/Stable\", \"Operating System :: POSIX", "setuptools import setup, find_packages NAME = \"passari_web_ui\" DESCRIPTION = ( \"Web interface for", ":: MIT License\", \"Framework :: Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": { \"project\":", "3\", \"Programming Language :: Python :: 3.6\", \"License :: OSI Approved :: MIT", "System :: POSIX :: Linux\", \"Programming Language :: Python :: 3\", \"Programming Language", "Language :: Python :: 3.6\", \"License :: OSI Approved :: MIT License\", \"Framework", "long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\",", "Status :: 5 - Production/Stable\", \"Operating System :: POSIX :: Linux\", \"Programming Language", "5 - Production/Stable\", \"Operating System :: POSIX :: Linux\", \"Programming Language :: Python", "License\", \"Framework :: Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": { \"project\": (\"setup.py\", NAME),", "Python :: 3\", \"Programming Language :: Python :: 3.6\", \"License :: OSI Approved", "Approved :: MIT License\", \"Framework :: Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": {", "LONG_DESCRIPTION = DESCRIPTION AUTHOR = \"<NAME>\" AUTHOR_EMAIL = \"<EMAIL>\" setup( name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION,", "\"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\", \"docs\") } }, setup_requires=[\"setuptools_scm\", \"sphinx\", \"sphinxcontrib-apidoc\"], extras_require={ \"sphinx\":", "( \"Web interface for Passari workflow\" ) LONG_DESCRIPTION = DESCRIPTION AUTHOR = \"<NAME>\"", "author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\", \"click<8\", \"SQLAlchemy\", \"psycopg2\",", ":: Flask\", ], python_requires=\">=3.6\", use_scm_version=True, command_options={ \"build_sphinx\": { \"project\": (\"setup.py\", NAME), \"source_dir\": (\"setup.py\",", "name=NAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, author=AUTHOR, author_email=AUTHOR_EMAIL, packages=find_packages(\"src\"), include_package_data=True, package_dir={\"passari_web_ui\": \"src/passari_web_ui\"}, install_requires=[ \"Flask\", \"Flask-Security-Too\", \"click>=7\",", "\"source_dir\": (\"setup.py\", \"docs\") } }, setup_requires=[\"setuptools_scm\", \"sphinx\", \"sphinxcontrib-apidoc\"], extras_require={ \"sphinx\": [\"sphinxcontrib-apidoc\"] } )", "\"passari_web_ui\" DESCRIPTION = ( \"Web interface for Passari workflow\" ) LONG_DESCRIPTION = DESCRIPTION", "= \"passari_web_ui\" DESCRIPTION = ( \"Web interface for Passari workflow\" ) LONG_DESCRIPTION =" ]
[ "MultinomialNB, BernoulliNB from data_manager import DataManager correct_labels = ['Premise', 'Claim', 'MajorClaim'] correct_links =", "f: pickle.dump(classifier, f) @staticmethod def getFeatures(words): content = DataManager().get_content(words) features = nltk.FreqDist(content).keys() return", "= [] pass def set_data(self, data): args = DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data)", "data_manager import DataManager correct_labels = ['Premise', 'Claim', 'MajorClaim'] correct_links = ['Support', 'Attacks'] class", "<reponame>Gusyatnikova/argument-mining-rus import os import pickle import nltk as nltk from nltk.classify import SklearnClassifier", "from nltk.classify import SklearnClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB, BernoulliNB", "from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB, BernoulliNB from data_manager import DataManager", "import MultinomialNB, BernoulliNB from data_manager import DataManager correct_labels = ['Premise', 'Claim', 'MajorClaim'] correct_links", "return self.divided_links @staticmethod def save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files' filename = os.path.join(pickle_dir, pickle_name)", "self.divided_args = [] self.divided_links = [] pass def set_data(self, data): args = DataManager().filter_labels(data,", "= DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args def", "links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self, filename): classifier_file =", "'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier =", "self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args, train_links): classifier", "pass def set_logisticregression_classifier(self, train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links)", "f) @staticmethod def getFeatures(words): content = DataManager().get_content(words) features = nltk.FreqDist(content).keys() return features def", "with open(filename, 'wb') as f: pickle.dump(classifier, f) @staticmethod def getFeatures(words): content = DataManager().get_content(words)", "as nltk from nltk.classify import SklearnClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import", "= nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier,", "@staticmethod def save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files' filename = os.path.join(pickle_dir, pickle_name) with open(filename,", "import DataManager correct_labels = ['Premise', 'Claim', 'MajorClaim'] correct_links = ['Support', 'Attacks'] class Classification:", "os import pickle import nltk as nltk from nltk.classify import SklearnClassifier from sklearn.linear_model", "nltk from nltk.classify import SklearnClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB,", "self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier", "__init__(self): self.divided_args = [] self.divided_links = [] pass def set_data(self, data): args =", "pickle import nltk as nltk from nltk.classify import SklearnClassifier from sklearn.linear_model import LogisticRegression", "'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass", "import nltk as nltk from nltk.classify import SklearnClassifier from sklearn.linear_model import LogisticRegression from", "= 'pickle_files' filename = os.path.join(pickle_dir, pickle_name) with open(filename, 'wb') as f: pickle.dump(classifier, f)", "save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files' filename = os.path.join(pickle_dir, pickle_name) with open(filename, 'wb') as", "get_divided_links(self): return self.divided_links @staticmethod def save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files' filename = os.path.join(pickle_dir,", "def __init__(self): self.divided_args = [] self.divided_links = [] pass def set_data(self, data): args", "train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def", "'Claim', 'MajorClaim'] correct_links = ['Support', 'Attacks'] class Classification: def __init__(self): self.divided_args = []", "getFeatures(words): content = DataManager().get_content(words) features = nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self, train_args, train_links):", "sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB, BernoulliNB from data_manager import DataManager correct_labels", "train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def", "import os import pickle import nltk as nltk from nltk.classify import SklearnClassifier from", "set_naivebayes_classifier(self, train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle')", "links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self, filename): classifier_file = open('pickle_files/'+filename, \"rb\") classifier =", "DataManager().get_content(words) features = nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self, train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args)", "SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle')", "as f: pickle.dump(classifier, f) @staticmethod def getFeatures(words): content = DataManager().get_content(words) features = nltk.FreqDist(content).keys()", "['Support', 'Attacks'] class Classification: def __init__(self): self.divided_args = [] self.divided_links = [] pass", "self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self, filename): classifier_file = open('pickle_files/'+filename, \"rb\") classifier", "Classification: def __init__(self): self.divided_args = [] self.divided_links = [] pass def set_data(self, data):", "def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self,", "self.divided_args def get_divided_links(self): return self.divided_links @staticmethod def save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files' filename", "nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self, train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier", "def get_divided_args(self): return self.divided_args def get_divided_links(self): return self.divided_links @staticmethod def save_pickle(classifier, pickle_name): pickle_dir", "= nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self, train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle')", "set_logisticregression_classifier(self, train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle')", "classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self,", "open(filename, 'wb') as f: pickle.dump(classifier, f) @staticmethod def getFeatures(words): content = DataManager().get_content(words) features", "= DataManager().get_content(words) features = nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self, train_args, train_links): classifier =", "self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set)", "train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self, filename):", "classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args)", "= SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set,", "filename = os.path.join(pickle_dir, pickle_name) with open(filename, 'wb') as f: pickle.dump(classifier, f) @staticmethod def", "data): args = DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links =", "import LogisticRegression from sklearn.naive_bayes import MultinomialNB, BernoulliNB from data_manager import DataManager correct_labels =", "links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self, filename): classifier_file = open('pickle_files/'+filename, \"rb\")", "features = nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self, train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier,", "= SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set)", "'pickle_files' filename = os.path.join(pickle_dir, pickle_name) with open(filename, 'wb') as f: pickle.dump(classifier, f) @staticmethod", "nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args, train_links):", "'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args, train_links): classifier =", "= SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier,", "self.divided_links = [] pass def set_data(self, data): args = DataManager().filter_labels(data, correct_labels) links =", "DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args def get_divided_links(self): return self.divided_links @staticmethod", "SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args, train_links):", "correct_links = ['Support', 'Attacks'] class Classification: def __init__(self): self.divided_args = [] self.divided_links =", "= DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args def get_divided_links(self): return self.divided_links @staticmethod def save_pickle(classifier,", "import SklearnClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB, BernoulliNB from data_manager", "links = DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args", "pass def set_sklearn_classifier(self, train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links)", "classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args)", "load_classifier(self, filename): classifier_file = open('pickle_files/'+filename, \"rb\") classifier = pickle.load(classifier_file, encoding=\"latin1\") classifier_file.close() return classifier", "def set_sklearn_classifier(self, train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier,", "args = DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links)", "classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self,", "self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self, filename): classifier_file = open('pickle_files/'+filename, \"rb\") classifier = pickle.load(classifier_file,", "= SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self, train_args,", "from sklearn.naive_bayes import MultinomialNB, BernoulliNB from data_manager import DataManager correct_labels = ['Premise', 'Claim',", "= ['Premise', 'Claim', 'MajorClaim'] correct_links = ['Support', 'Attacks'] class Classification: def __init__(self): self.divided_args", "DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args def get_divided_links(self):", "'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args, train_links): classifier =", "pickle_name) with open(filename, 'wb') as f: pickle.dump(classifier, f) @staticmethod def getFeatures(words): content =", "BernoulliNB from data_manager import DataManager correct_labels = ['Premise', 'Claim', 'MajorClaim'] correct_links = ['Support',", "'Attacks'] class Classification: def __init__(self): self.divided_args = [] self.divided_links = [] pass def", "DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args def get_divided_links(self): return self.divided_links @staticmethod def save_pickle(classifier, pickle_name):", "@staticmethod def getFeatures(words): content = DataManager().get_content(words) features = nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self,", "nltk as nltk from nltk.classify import SklearnClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes", "class Classification: def __init__(self): self.divided_args = [] self.divided_links = [] pass def set_data(self,", "'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set)", "train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass", "set_data(self, data): args = DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links", "LogisticRegression from sklearn.naive_bayes import MultinomialNB, BernoulliNB from data_manager import DataManager correct_labels = ['Premise',", "from data_manager import DataManager correct_labels = ['Premise', 'Claim', 'MajorClaim'] correct_links = ['Support', 'Attacks']", "correct_labels = ['Premise', 'Claim', 'MajorClaim'] correct_links = ['Support', 'Attacks'] class Classification: def __init__(self):", "import pickle import nltk as nltk from nltk.classify import SklearnClassifier from sklearn.linear_model import", "self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self, filename): classifier_file = open('pickle_files/'+filename,", "= nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args,", "SklearnClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB, BernoulliNB from data_manager import", "nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle')", "self.divided_args = DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args def get_divided_links(self): return", "pass def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def", "pass def set_data(self, data): args = DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data) self.divided_args =", "content = DataManager().get_content(words) features = nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self, train_args, train_links): classifier", "train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass", "def save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files' filename = os.path.join(pickle_dir, pickle_name) with open(filename, 'wb')", "def getFeatures(words): content = DataManager().get_content(words) features = nltk.FreqDist(content).keys() return features def set_naivebayes_classifier(self, train_args,", "self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set,", "Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args, train_links): classifier", "train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def", "def set_data(self, data): args = DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args)", "def set_naivebayes_classifier(self, train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links) Classification().save_pickle(classifier,", "= DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def", "pickle.dump(classifier, f) @staticmethod def getFeatures(words): content = DataManager().get_content(words) features = nltk.FreqDist(content).keys() return features", "def load_classifier(self, filename): classifier_file = open('pickle_files/'+filename, \"rb\") classifier = pickle.load(classifier_file, encoding=\"latin1\") classifier_file.close() return", "pickle_name): pickle_dir = 'pickle_files' filename = os.path.join(pickle_dir, pickle_name) with open(filename, 'wb') as f:", "DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self):", "= DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args def get_divided_links(self): return self.divided_links", "= ['Support', 'Attacks'] class Classification: def __init__(self): self.divided_args = [] self.divided_links = []", "set_sklearn_classifier(self, train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle')", "SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set,", "Classification().save_pickle(classifier, 'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier", "'links_naivebayes.pickle') pass def set_sklearn_classifier(self, train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier =", "pass def load_classifier(self, filename): classifier_file = open('pickle_files/'+filename, \"rb\") classifier = pickle.load(classifier_file, encoding=\"latin1\") classifier_file.close()", "DataManager correct_labels = ['Premise', 'Claim', 'MajorClaim'] correct_links = ['Support', 'Attacks'] class Classification: def", "sklearn.naive_bayes import MultinomialNB, BernoulliNB from data_manager import DataManager correct_labels = ['Premise', 'Claim', 'MajorClaim']", "nltk.classify import SklearnClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB, BernoulliNB from", "classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set,", "'wb') as f: pickle.dump(classifier, f) @staticmethod def getFeatures(words): content = DataManager().get_content(words) features =", "= os.path.join(pickle_dir, pickle_name) with open(filename, 'wb') as f: pickle.dump(classifier, f) @staticmethod def getFeatures(words):", "['Premise', 'Claim', 'MajorClaim'] correct_links = ['Support', 'Attacks'] class Classification: def __init__(self): self.divided_args =", "pickle_dir = 'pickle_files' filename = os.path.join(pickle_dir, pickle_name) with open(filename, 'wb') as f: pickle.dump(classifier,", "def get_divided_links(self): return self.divided_links @staticmethod def save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files' filename =", "= [] self.divided_links = [] pass def set_data(self, data): args = DataManager().filter_labels(data, correct_labels)", "[] pass def set_data(self, data): args = DataManager().filter_labels(data, correct_labels) links = DataManager().filter_links(data) self.divided_args", "'MajorClaim'] correct_links = ['Support', 'Attacks'] class Classification: def __init__(self): self.divided_args = [] self.divided_links", "[] self.divided_links = [] pass def set_data(self, data): args = DataManager().filter_labels(data, correct_labels) links", "def set_logisticregression_classifier(self, train_args, train_links): classifier = SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier,", "self.divided_links @staticmethod def save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files' filename = os.path.join(pickle_dir, pickle_name) with", "os.path.join(pickle_dir, pickle_name) with open(filename, 'wb') as f: pickle.dump(classifier, f) @staticmethod def getFeatures(words): content", "correct_labels) links = DataManager().filter_links(data) self.divided_args = DataManager().divide_sentences(args) self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self): return", "SklearnClassifier(LogisticRegression()).train(train_args) self.save_pickle(classifier, 'args_logisticregression.pickle') classifier = SklearnClassifier(LogisticRegression()).train(train_links) self.save_pickle(classifier, 'links_logisticregression.pickle') pass def train_classifiers(self, arguments_training_set, links_training_set):", "arguments_training_set, links_training_set): self.set_naivebayes_classifier(arguments_training_set, links_training_set) self.set_sklearn_classifier(arguments_training_set, links_training_set) self.set_logisticregression_classifier(arguments_training_set, links_training_set) pass def load_classifier(self, filename): classifier_file", "links_training_set) pass def load_classifier(self, filename): classifier_file = open('pickle_files/'+filename, \"rb\") classifier = pickle.load(classifier_file, encoding=\"latin1\")", "features def set_naivebayes_classifier(self, train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier = nltk.NaiveBayesClassifier.train(train_links)", "get_divided_args(self): return self.divided_args def get_divided_links(self): return self.divided_links @staticmethod def save_pickle(classifier, pickle_name): pickle_dir =", "return self.divided_args def get_divided_links(self): return self.divided_links @staticmethod def save_pickle(classifier, pickle_name): pickle_dir = 'pickle_files'", "train_args, train_links): classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass", "return features def set_naivebayes_classifier(self, train_args, train_links): classifier = nltk.NaiveBayesClassifier.train(train_args) Classification().save_pickle(classifier, 'args_naivebayes.pickle') classifier =", "classifier = SklearnClassifier(MultinomialNB()).train(train_args) self.save_pickle(classifier, 'args_sklearn.pickle') classifier = SklearnClassifier(MultinomialNB()).train(train_links) self.save_pickle(classifier, 'links_sklearn.pickle') pass def set_logisticregression_classifier(self,", "self.divided_links = DataManager().divide_sentences(links) def get_divided_args(self): return self.divided_args def get_divided_links(self): return self.divided_links @staticmethod def" ]
[ "import views app_name=\"blog\" #Works as namespace urlpatterns = [ path('', views.blogs, name=\"blog\"), path('<int:blog_id>',", "#Works as namespace urlpatterns = [ path('', views.blogs, name=\"blog\"), path('<int:blog_id>', views.detail, name=\"detail\") ]", "from . import views app_name=\"blog\" #Works as namespace urlpatterns = [ path('', views.blogs,", "django.urls import path from . import views app_name=\"blog\" #Works as namespace urlpatterns =", "path from . import views app_name=\"blog\" #Works as namespace urlpatterns = [ path('',", "from django.urls import path from . import views app_name=\"blog\" #Works as namespace urlpatterns", "app_name=\"blog\" #Works as namespace urlpatterns = [ path('', views.blogs, name=\"blog\"), path('<int:blog_id>', views.detail, name=\"detail\")", "views app_name=\"blog\" #Works as namespace urlpatterns = [ path('', views.blogs, name=\"blog\"), path('<int:blog_id>', views.detail,", "import path from . import views app_name=\"blog\" #Works as namespace urlpatterns = [", ". import views app_name=\"blog\" #Works as namespace urlpatterns = [ path('', views.blogs, name=\"blog\")," ]
[ "In edit mode taskIndex = new_list.index(editTask) # Get list position new_list[taskIndex].update({'name': newTask}) #", "not None: # In edit mode taskIndex = new_list.index(editTask) # Get list position", "'editBox', 'onChange': handleChange, 'value': newTask } ), el('input', {'type': 'submit'}), el('ol', None, el(ListItems,", "Completed:\"), el('input', {'type': 'checkbox', 'id': 'status', 'onChange': lambda e: handleChangeStatus(e, task), 'checked': task['status']", "Update name else: # In add mode new_list.append({'name': newTask, 'status': False}) # Add", "in taskList] def updateCount(): if taskFilter == 'open': new_list = [task for task", "not task['status']] elif taskFilter == 'closed': new_list = [task for task in taskList", "handleChange, 'value': newTask } ), el('input', {'type': 'submit'}), el('ol', None, el(ListItems, None) ),", "'status', 'onChange': lambda e: handleChangeStatus(e, task), 'checked': task['status'] } ), ) else: return", "[task for task in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList, taskFilter])", "task['status'] } ), ) else: return None def ListItems(): return [el(ListItem, {'key': task['name'],", "a copy taskIndex = new_list.index(task) # Get list position new_list[taskIndex].update({'status': target['checked']}) # Update", "{'htmlFor': 'all'}, \"All Tasks:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'all', 'value': 'all',", "'value': 'open', 'onChange': handleChange, 'checked': taskFilter == 'open' } ), el('label', {'htmlFor': 'closed'},", "setTitle, useEffect, useState, render, createElement as el def App(): newTask, setNewTask = useState(\"\")", "else: setNewTask(target['value']) def handleChangeStatus(event, task): target = event['target'] new_list = list(taskList) # Make", "# Add new item setTaskList(new_list) # Update our state setNewTask(\"\") # Clear the", "task in taskList if task['status']] else: new_list = [task for task in taskList]", "{'type': 'radio', 'name': 'taskFilter', 'id': 'closed', 'value': 'closed', 'onChange': handleChange, 'checked': taskFilter ==", "target['checked']}) # Update setTaskList(new_list) # Update our state def ListItem(props): task = props['task']", "createElement as el def App(): newTask, setNewTask = useState(\"\") editTask, setEditTask = useState(None)", "{'htmlFor': 'closed'}, \" Completed:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'closed', 'value': 'closed',", "mode new_list.append({'name': newTask, 'status': False}) # Add new item setTaskList(new_list) # Update our", "'editBox'}, \"Edit Task: \" if editTask is not None else \"Add Task: \"", "handleSubmit}, el('div', None, f\"Number of Tasks: {taskCount}\"), el('div', None, el('label', {'htmlFor': 'all'}, \"All", "setEditTask = useState(None) taskList, setTaskList = useState([]) taskCount, setTaskCount = useState(0) taskFilter, setTaskFilter", "task): target = event['target'] new_list = list(taskList) # Make a copy taskIndex =", "setNewTask = useState(\"\") editTask, setEditTask = useState(None) taskList, setTaskList = useState([]) taskCount, setTaskCount", "\" Active:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'open', 'value': 'open', 'onChange': handleChange,", "mode taskIndex = new_list.index(editTask) # Get list position new_list[taskIndex].update({'name': newTask}) # Update name", "# Make a copy if editTask is not None: # In edit mode", "App(): newTask, setNewTask = useState(\"\") editTask, setEditTask = useState(None) taskList, setTaskList = useState([])", "# Clear the edit item value def handleEdit(task): setNewTask(task['name']) # Set the new", "else \"Add Task: \" ), el('input', {'id': 'editBox', 'onChange': handleChange, 'value': newTask }", "ListItem(props): task = props['task'] if taskFilter == \"all\" or \\ (taskFilter == \"open\"", "(taskFilter == \"closed\" and task['status']): return el('li', None, task['name'] + \" \", el('button',", "new_list = [task for task in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount,", "\"Add Task: \" ), el('input', {'id': 'editBox', 'onChange': handleChange, 'value': newTask } ),", "'all'}, \"All Tasks:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'all', 'value': 'all', 'onChange':", "# Update our state def ListItem(props): task = props['task'] if taskFilter == \"all\"", "def handleSubmit(event): event.preventDefault() new_list = list(taskList) # Make a copy if editTask is", "and not task['status']) or \\ (taskFilter == \"closed\" and task['status']): return el('li', None,", "Clear the new item value setEditTask(None) # Clear the edit item value def", "= useState(\"all\") def handleSubmit(event): event.preventDefault() new_list = list(taskList) # Make a copy if", "'onClick': lambda: handleDelete(task) }, \"Delete\" ), el('button', {'type': 'button', 'onClick': lambda: handleEdit(task) },", "new item value setEditTask(task) # Set the edit item value def handleDelete(task): new_list", "useState([]) taskCount, setTaskCount = useState(0) taskFilter, setTaskFilter = useState(\"all\") def handleSubmit(event): event.preventDefault() new_list", "= [task for task in taskList if task['status']] else: new_list = [task for", "handleDelete(task): new_list = list(taskList) # Make a copy new_list.remove(task) # Remove the specified", "el('label', {'htmlFor': 'all'}, \"All Tasks:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'all', 'value':", "add mode new_list.append({'name': newTask, 'status': False}) # Add new item setTaskList(new_list) # Update", "el('label', {'htmlFor': 'open'}, \" Active:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'open', 'value':", "new_list = list(taskList) # Make a copy if editTask is not None: #", "), el('label', {'htmlFor': 'status'}, \" Completed:\"), el('input', {'type': 'checkbox', 'id': 'status', 'onChange': lambda", "'onChange': handleChange, 'checked': taskFilter == 'open' } ), el('label', {'htmlFor': 'closed'}, \" Completed:\"),", "), el('input', {'type': 'submit'}), el('ol', None, el(ListItems, None) ), ) render(App, None, 'root')", "== \"closed\" and task['status']): return el('li', None, task['name'] + \" \", el('button', {'type':", "in taskList if not task['status']] elif taskFilter == 'closed': new_list = [task for", "Task: \" ), el('input', {'id': 'editBox', 'onChange': handleChange, 'value': newTask } ), el('input',", "'closed', 'onChange': handleChange, 'checked': taskFilter == 'closed' } ), ), el('label', {'htmlFor': 'editBox'},", "the specified item setTaskList(new_list) # Update our state def handleChange(event): target = event['target']", "Make a copy taskIndex = new_list.index(task) # Get list position new_list[taskIndex].update({'status': target['checked']}) #", "the edit item value def handleDelete(task): new_list = list(taskList) # Make a copy", "None, f\"Number of Tasks: {taskCount}\"), el('div', None, el('label', {'htmlFor': 'all'}, \"All Tasks:\"), el('input',", "{'type': 'button', 'onClick': lambda: handleDelete(task) }, \"Delete\" ), el('button', {'type': 'button', 'onClick': lambda:", "\" if editTask is not None else \"Add Task: \" ), el('input', {'id':", "new_list[taskIndex].update({'name': newTask}) # Update name else: # In add mode new_list.append({'name': newTask, 'status':", "taskFilter == 'closed' } ), ), el('label', {'htmlFor': 'editBox'}, \"Edit Task: \" if", "lambda: handleDelete(task) }, \"Delete\" ), el('button', {'type': 'button', 'onClick': lambda: handleEdit(task) }, \"Edit\"", "new_list = list(taskList) # Make a copy taskIndex = new_list.index(task) # Get list", "a copy new_list.remove(task) # Remove the specified item setTaskList(new_list) # Update our state", "# In edit mode taskIndex = new_list.index(editTask) # Get list position new_list[taskIndex].update({'name': newTask})", "'value': newTask } ), el('input', {'type': 'submit'}), el('ol', None, el(ListItems, None) ), )", "position new_list[taskIndex].update({'name': newTask}) # Update name else: # In add mode new_list.append({'name': newTask,", "{'type': 'checkbox', 'id': 'status', 'onChange': lambda e: handleChangeStatus(e, task), 'checked': task['status'] } ),", "\\ (taskFilter == \"open\" and not task['status']) or \\ (taskFilter == \"closed\" and", "useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList, taskFilter]) return el('form', {'onSubmit': handleSubmit}, el('div', None,", "in taskList if task['status']] else: new_list = [task for task in taskList] setTaskCount(len(new_list))", "'status'}, \" Completed:\"), el('input', {'type': 'checkbox', 'id': 'status', 'onChange': lambda e: handleChangeStatus(e, task),", "'value': 'all', 'onChange': handleChange, 'checked': taskFilter == 'all' } ), el('label', {'htmlFor': 'open'},", "el('button', {'type': 'button', 'onClick': lambda: handleEdit(task) }, \"Edit\" ), el('label', {'htmlFor': 'status'}, \"", "= props['task'] if taskFilter == \"all\" or \\ (taskFilter == \"open\" and not", "= useState(\"\") editTask, setEditTask = useState(None) taskList, setTaskList = useState([]) taskCount, setTaskCount =", "'closed'}, \" Completed:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'closed', 'value': 'closed', 'onChange':", "= useState([]) taskCount, setTaskCount = useState(0) taskFilter, setTaskFilter = useState(\"all\") def handleSubmit(event): event.preventDefault()", "\"all\" or \\ (taskFilter == \"open\" and not task['status']) or \\ (taskFilter ==", "else: new_list = [task for task in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), [])", "of Tasks: {taskCount}\"), el('div', None, el('label', {'htmlFor': 'all'}, \"All Tasks:\"), el('input', {'type': 'radio',", "event['target'] new_list = list(taskList) # Make a copy taskIndex = new_list.index(task) # Get", "el('label', {'htmlFor': 'status'}, \" Completed:\"), el('input', {'type': 'checkbox', 'id': 'status', 'onChange': lambda e:", "), el('label', {'htmlFor': 'editBox'}, \"Edit Task: \" if editTask is not None else", "# Update setTaskList(new_list) # Update our state def ListItem(props): task = props['task'] if", "not None else \"Add Task: \" ), el('input', {'id': 'editBox', 'onChange': handleChange, 'value':", "\\ (taskFilter == \"closed\" and task['status']): return el('li', None, task['name'] + \" \",", "taskFilter == \"all\" or \\ (taskFilter == \"open\" and not task['status']) or \\", "{'type': 'radio', 'name': 'taskFilter', 'id': 'open', 'value': 'open', 'onChange': handleChange, 'checked': taskFilter ==", "== 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event, task): target = event['target'] new_list =", "setEditTask(task) # Set the edit item value def handleDelete(task): new_list = list(taskList) #", "copy new_list.remove(task) # Remove the specified item setTaskList(new_list) # Update our state def", "f\"Number of Tasks: {taskCount}\"), el('div', None, el('label', {'htmlFor': 'all'}, \"All Tasks:\"), el('input', {'type':", "), el('button', {'type': 'button', 'onClick': lambda: handleEdit(task) }, \"Edit\" ), el('label', {'htmlFor': 'status'},", "\" \", el('button', {'type': 'button', 'onClick': lambda: handleDelete(task) }, \"Delete\" ), el('button', {'type':", "task in taskList if not task['status']] elif taskFilter == 'closed': new_list = [task", "value setEditTask(task) # Set the edit item value def handleDelete(task): new_list = list(taskList)", "new_list = list(taskList) # Make a copy new_list.remove(task) # Remove the specified item", "Get list position new_list[taskIndex].update({'name': newTask}) # Update name else: # In add mode", "'name': 'taskFilter', 'id': 'all', 'value': 'all', 'onChange': handleChange, 'checked': taskFilter == 'all' }", "# Get list position new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list) # Update our state", "editTask, setEditTask = useState(None) taskList, setTaskList = useState([]) taskCount, setTaskCount = useState(0) taskFilter,", "item value setEditTask(None) # Clear the edit item value def handleEdit(task): setNewTask(task['name']) #", "+ \" \", el('button', {'type': 'button', 'onClick': lambda: handleDelete(task) }, \"Delete\" ), el('button',", "}, \"Delete\" ), el('button', {'type': 'button', 'onClick': lambda: handleEdit(task) }, \"Edit\" ), el('label',", "not task['status']) or \\ (taskFilter == \"closed\" and task['status']): return el('li', None, task['name']", "'id': 'closed', 'value': 'closed', 'onChange': handleChange, 'checked': taskFilter == 'closed' } ), ),", "editTask is not None else \"Add Task: \" ), el('input', {'id': 'editBox', 'onChange':", "setTaskFilter = useState(\"all\") def handleSubmit(event): event.preventDefault() new_list = list(taskList) # Make a copy", "), ) else: return None def ListItems(): return [el(ListItem, {'key': task['name'], 'task': task})", "new_list = [task for task in taskList if task['status']] else: new_list = [task", "handleChangeStatus(event, task): target = event['target'] new_list = list(taskList) # Make a copy taskIndex", "new_list.index(editTask) # Get list position new_list[taskIndex].update({'name': newTask}) # Update name else: # In", "if editTask is not None: # In edit mode taskIndex = new_list.index(editTask) #", "state def ListItem(props): task = props['task'] if taskFilter == \"all\" or \\ (taskFilter", "taskFilter, setTaskFilter = useState(\"all\") def handleSubmit(event): event.preventDefault() new_list = list(taskList) # Make a", "\" Completed:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'closed', 'value': 'closed', 'onChange': handleChange,", "), el('input', {'id': 'editBox', 'onChange': handleChange, 'value': newTask } ), el('input', {'type': 'submit'}),", "taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList, taskFilter]) return el('form', {'onSubmit': handleSubmit},", "handleChange, 'checked': taskFilter == 'closed' } ), ), el('label', {'htmlFor': 'editBox'}, \"Edit Task:", "'all', 'onChange': handleChange, 'checked': taskFilter == 'all' } ), el('label', {'htmlFor': 'open'}, \"", "edit item value def handleDelete(task): new_list = list(taskList) # Make a copy new_list.remove(task)", "el('li', None, task['name'] + \" \", el('button', {'type': 'button', 'onClick': lambda: handleDelete(task) },", "'checked': task['status'] } ), ) else: return None def ListItems(): return [el(ListItem, {'key':", "setEditTask(None) # Clear the edit item value def handleEdit(task): setNewTask(task['name']) # Set the", "None: # In edit mode taskIndex = new_list.index(editTask) # Get list position new_list[taskIndex].update({'name':", "\" ), el('input', {'id': 'editBox', 'onChange': handleChange, 'value': newTask } ), el('input', {'type':", "def ListItems(): return [el(ListItem, {'key': task['name'], 'task': task}) for task in taskList] def", "List\"), []) useEffect(updateCount, [taskList, taskFilter]) return el('form', {'onSubmit': handleSubmit}, el('div', None, f\"Number of", "if taskFilter == \"all\" or \\ (taskFilter == \"open\" and not task['status']) or", "In add mode new_list.append({'name': newTask, 'status': False}) # Add new item setTaskList(new_list) #", "} ), ) else: return None def ListItems(): return [el(ListItem, {'key': task['name'], 'task':", "# Make a copy taskIndex = new_list.index(task) # Get list position new_list[taskIndex].update({'status': target['checked']})", "(taskFilter == \"open\" and not task['status']) or \\ (taskFilter == \"closed\" and task['status']):", "== 'all' } ), el('label', {'htmlFor': 'open'}, \" Active:\"), el('input', {'type': 'radio', 'name':", "# Update name else: # In add mode new_list.append({'name': newTask, 'status': False}) #", "{'onSubmit': handleSubmit}, el('div', None, f\"Number of Tasks: {taskCount}\"), el('div', None, el('label', {'htmlFor': 'all'},", "} ), el('label', {'htmlFor': 'closed'}, \" Completed:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id':", "updateCount(): if taskFilter == 'open': new_list = [task for task in taskList if", "el('button', {'type': 'button', 'onClick': lambda: handleDelete(task) }, \"Delete\" ), el('button', {'type': 'button', 'onClick':", "taskIndex = new_list.index(task) # Get list position new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list) #", "\"Edit Task: \" if editTask is not None else \"Add Task: \" ),", "def App(): newTask, setNewTask = useState(\"\") editTask, setEditTask = useState(None) taskList, setTaskList =", "setTaskCount = useState(0) taskFilter, setTaskFilter = useState(\"all\") def handleSubmit(event): event.preventDefault() new_list = list(taskList)", "# In add mode new_list.append({'name': newTask, 'status': False}) # Add new item setTaskList(new_list)", "pyreact import setTitle, useEffect, useState, render, createElement as el def App(): newTask, setNewTask", "== \"open\" and not task['status']) or \\ (taskFilter == \"closed\" and task['status']): return", "event['target'] if target['name'] == 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event, task): target =", "= list(taskList) # Make a copy taskIndex = new_list.index(task) # Get list position", "is not None else \"Add Task: \" ), el('input', {'id': 'editBox', 'onChange': handleChange,", "handleDelete(task) }, \"Delete\" ), el('button', {'type': 'button', 'onClick': lambda: handleEdit(task) }, \"Edit\" ),", "newTask } ), el('input', {'type': 'submit'}), el('ol', None, el(ListItems, None) ), ) render(App,", "target['name'] == 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event, task): target = event['target'] new_list", "Update our state def handleChange(event): target = event['target'] if target['name'] == 'taskFilter': setTaskFilter(target['value'])", "the new item value setEditTask(None) # Clear the edit item value def handleEdit(task):", "event.preventDefault() new_list = list(taskList) # Make a copy if editTask is not None:", "'open' } ), el('label', {'htmlFor': 'closed'}, \" Completed:\"), el('input', {'type': 'radio', 'name': 'taskFilter',", "[el(ListItem, {'key': task['name'], 'task': task}) for task in taskList] def updateCount(): if taskFilter", "render, createElement as el def App(): newTask, setNewTask = useState(\"\") editTask, setEditTask =", "'radio', 'name': 'taskFilter', 'id': 'closed', 'value': 'closed', 'onChange': handleChange, 'checked': taskFilter == 'closed'", "= [task for task in taskList if not task['status']] elif taskFilter == 'closed':", "{'type': 'button', 'onClick': lambda: handleEdit(task) }, \"Edit\" ), el('label', {'htmlFor': 'status'}, \" Completed:\"),", "handleChangeStatus(e, task), 'checked': task['status'] } ), ) else: return None def ListItems(): return", "list(taskList) # Make a copy taskIndex = new_list.index(task) # Get list position new_list[taskIndex].update({'status':", "= event['target'] new_list = list(taskList) # Make a copy taskIndex = new_list.index(task) #", "handleEdit(task) }, \"Edit\" ), el('label', {'htmlFor': 'status'}, \" Completed:\"), el('input', {'type': 'checkbox', 'id':", "'button', 'onClick': lambda: handleEdit(task) }, \"Edit\" ), el('label', {'htmlFor': 'status'}, \" Completed:\"), el('input',", "setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList, taskFilter]) return el('form', {'onSubmit': handleSubmit}, el('div',", "task in taskList] def updateCount(): if taskFilter == 'open': new_list = [task for", "== 'closed': new_list = [task for task in taskList if task['status']] else: new_list", "edit mode taskIndex = new_list.index(editTask) # Get list position new_list[taskIndex].update({'name': newTask}) # Update", "== 'closed' } ), ), el('label', {'htmlFor': 'editBox'}, \"Edit Task: \" if editTask", "# Set the edit item value def handleDelete(task): new_list = list(taskList) # Make", "taskList if task['status']] else: new_list = [task for task in taskList] setTaskCount(len(new_list)) useEffect(lambda:", "taskFilter == 'open': new_list = [task for task in taskList if not task['status']]", "{'htmlFor': 'open'}, \" Active:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'open', 'value': 'open',", "taskList] def updateCount(): if taskFilter == 'open': new_list = [task for task in", "copy if editTask is not None: # In edit mode taskIndex = new_list.index(editTask)", "'closed' } ), ), el('label', {'htmlFor': 'editBox'}, \"Edit Task: \" if editTask is", "'all' } ), el('label', {'htmlFor': 'open'}, \" Active:\"), el('input', {'type': 'radio', 'name': 'taskFilter',", "new_list = [task for task in taskList if not task['status']] elif taskFilter ==", "from pyreact import setTitle, useEffect, useState, render, createElement as el def App(): newTask,", "# Make a copy new_list.remove(task) # Remove the specified item setTaskList(new_list) # Update", "useState, render, createElement as el def App(): newTask, setNewTask = useState(\"\") editTask, setEditTask", "for task in taskList] def updateCount(): if taskFilter == 'open': new_list = [task", "= list(taskList) # Make a copy if editTask is not None: # In", "Set the edit item value def handleDelete(task): new_list = list(taskList) # Make a", "new_list.index(task) # Get list position new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list) # Update our", "target = event['target'] if target['name'] == 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event, task):", "setNewTask(target['value']) def handleChangeStatus(event, task): target = event['target'] new_list = list(taskList) # Make a", "Update our state setNewTask(\"\") # Clear the new item value setEditTask(None) # Clear", "[taskList, taskFilter]) return el('form', {'onSubmit': handleSubmit}, el('div', None, f\"Number of Tasks: {taskCount}\"), el('div',", "'closed', 'value': 'closed', 'onChange': handleChange, 'checked': taskFilter == 'closed' } ), ), el('label',", "props['task'] if taskFilter == \"all\" or \\ (taskFilter == \"open\" and not task['status'])", "handleEdit(task): setNewTask(task['name']) # Set the new item value setEditTask(task) # Set the edit", "# Remove the specified item setTaskList(new_list) # Update our state def handleChange(event): target", "for task in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList, taskFilter]) return", "'onChange': lambda e: handleChangeStatus(e, task), 'checked': task['status'] } ), ) else: return None", "{'key': task['name'], 'task': task}) for task in taskList] def updateCount(): if taskFilter ==", "\"open\" and not task['status']) or \\ (taskFilter == \"closed\" and task['status']): return el('li',", "if taskFilter == 'open': new_list = [task for task in taskList if not", "lambda: handleEdit(task) }, \"Edit\" ), el('label', {'htmlFor': 'status'}, \" Completed:\"), el('input', {'type': 'checkbox',", "= useState(None) taskList, setTaskList = useState([]) taskCount, setTaskCount = useState(0) taskFilter, setTaskFilter =", "Tasks: {taskCount}\"), el('div', None, el('label', {'htmlFor': 'all'}, \"All Tasks:\"), el('input', {'type': 'radio', 'name':", "[task for task in taskList if task['status']] else: new_list = [task for task", "Update setTaskList(new_list) # Update our state def ListItem(props): task = props['task'] if taskFilter", "the edit item value def handleEdit(task): setNewTask(task['name']) # Set the new item value", "value def handleDelete(task): new_list = list(taskList) # Make a copy new_list.remove(task) # Remove", ") else: return None def ListItems(): return [el(ListItem, {'key': task['name'], 'task': task}) for", "useState(\"all\") def handleSubmit(event): event.preventDefault() new_list = list(taskList) # Make a copy if editTask", "taskFilter == 'closed': new_list = [task for task in taskList if task['status']] else:", "el('div', None, el('label', {'htmlFor': 'all'}, \"All Tasks:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id':", "def handleChangeStatus(event, task): target = event['target'] new_list = list(taskList) # Make a copy", "handleSubmit(event): event.preventDefault() new_list = list(taskList) # Make a copy if editTask is not", "if editTask is not None else \"Add Task: \" ), el('input', {'id': 'editBox',", "= useState(0) taskFilter, setTaskFilter = useState(\"all\") def handleSubmit(event): event.preventDefault() new_list = list(taskList) #", "} ), ), el('label', {'htmlFor': 'editBox'}, \"Edit Task: \" if editTask is not", "editTask is not None: # In edit mode taskIndex = new_list.index(editTask) # Get", "target = event['target'] new_list = list(taskList) # Make a copy taskIndex = new_list.index(task)", "def handleChange(event): target = event['target'] if target['name'] == 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def", "specified item setTaskList(new_list) # Update our state def handleChange(event): target = event['target'] if", "setNewTask(\"\") # Clear the new item value setEditTask(None) # Clear the edit item", "None def ListItems(): return [el(ListItem, {'key': task['name'], 'task': task}) for task in taskList]", "else: # In add mode new_list.append({'name': newTask, 'status': False}) # Add new item", "taskList, setTaskList = useState([]) taskCount, setTaskCount = useState(0) taskFilter, setTaskFilter = useState(\"all\") def", "Get list position new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list) # Update our state def", "setTaskList = useState([]) taskCount, setTaskCount = useState(0) taskFilter, setTaskFilter = useState(\"all\") def handleSubmit(event):", "# Clear the new item value setEditTask(None) # Clear the edit item value", "# Update our state def handleChange(event): target = event['target'] if target['name'] == 'taskFilter':", "= list(taskList) # Make a copy new_list.remove(task) # Remove the specified item setTaskList(new_list)", "new_list.remove(task) # Remove the specified item setTaskList(new_list) # Update our state def handleChange(event):", "e: handleChangeStatus(e, task), 'checked': task['status'] } ), ) else: return None def ListItems():", "None, el('label', {'htmlFor': 'all'}, \"All Tasks:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'all',", "'radio', 'name': 'taskFilter', 'id': 'all', 'value': 'all', 'onChange': handleChange, 'checked': taskFilter == 'all'", "task['name'], 'task': task}) for task in taskList] def updateCount(): if taskFilter == 'open':", "setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList, taskFilter]) return el('form', {'onSubmit': handleSubmit}, el('div', None, f\"Number", "lambda e: handleChangeStatus(e, task), 'checked': task['status'] } ), ) else: return None def", "state def handleChange(event): target = event['target'] if target['name'] == 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value'])", "'onChange': handleChange, 'checked': taskFilter == 'all' } ), el('label', {'htmlFor': 'open'}, \" Active:\"),", "name else: # In add mode new_list.append({'name': newTask, 'status': False}) # Add new", "new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list) # Update our state def ListItem(props): task =", "Completed:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'closed', 'value': 'closed', 'onChange': handleChange, 'checked':", "'onChange': handleChange, 'checked': taskFilter == 'closed' } ), ), el('label', {'htmlFor': 'editBox'}, \"Edit", "value def handleEdit(task): setNewTask(task['name']) # Set the new item value setEditTask(task) # Set", "edit item value def handleEdit(task): setNewTask(task['name']) # Set the new item value setEditTask(task)", "Make a copy if editTask is not None: # In edit mode taskIndex", "{'id': 'editBox', 'onChange': handleChange, 'value': newTask } ), el('input', {'type': 'submit'}), el('ol', None,", "taskList if not task['status']] elif taskFilter == 'closed': new_list = [task for task", "new item setTaskList(new_list) # Update our state setNewTask(\"\") # Clear the new item", "or \\ (taskFilter == \"closed\" and task['status']): return el('li', None, task['name'] + \"", "list position new_list[taskIndex].update({'name': newTask}) # Update name else: # In add mode new_list.append({'name':", "taskFilter == 'all' } ), el('label', {'htmlFor': 'open'}, \" Active:\"), el('input', {'type': 'radio',", "el('input', {'type': 'checkbox', 'id': 'status', 'onChange': lambda e: handleChangeStatus(e, task), 'checked': task['status'] }", "list(taskList) # Make a copy new_list.remove(task) # Remove the specified item setTaskList(new_list) #", "newTask, setNewTask = useState(\"\") editTask, setEditTask = useState(None) taskList, setTaskList = useState([]) taskCount,", "'id': 'open', 'value': 'open', 'onChange': handleChange, 'checked': taskFilter == 'open' } ), el('label',", "newTask, 'status': False}) # Add new item setTaskList(new_list) # Update our state setNewTask(\"\")", "handleChange, 'checked': taskFilter == 'all' } ), el('label', {'htmlFor': 'open'}, \" Active:\"), el('input',", "item setTaskList(new_list) # Update our state def handleChange(event): target = event['target'] if target['name']", "'taskFilter', 'id': 'open', 'value': 'open', 'onChange': handleChange, 'checked': taskFilter == 'open' } ),", "for task in taskList if task['status']] else: new_list = [task for task in", "def handleDelete(task): new_list = list(taskList) # Make a copy new_list.remove(task) # Remove the", "task), 'checked': task['status'] } ), ) else: return None def ListItems(): return [el(ListItem,", "'status': False}) # Add new item setTaskList(new_list) # Update our state setNewTask(\"\") #", "# Update our state setNewTask(\"\") # Clear the new item value setEditTask(None) #", "None, task['name'] + \" \", el('button', {'type': 'button', 'onClick': lambda: handleDelete(task) }, \"Delete\"", "def ListItem(props): task = props['task'] if taskFilter == \"all\" or \\ (taskFilter ==", "in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList, taskFilter]) return el('form', {'onSubmit':", "'checkbox', 'id': 'status', 'onChange': lambda e: handleChangeStatus(e, task), 'checked': task['status'] } ), )", "} ), el('input', {'type': 'submit'}), el('ol', None, el(ListItems, None) ), ) render(App, None,", "useEffect, useState, render, createElement as el def App(): newTask, setNewTask = useState(\"\") editTask,", "useState(\"\") editTask, setEditTask = useState(None) taskList, setTaskList = useState([]) taskCount, setTaskCount = useState(0)", "item setTaskList(new_list) # Update our state setNewTask(\"\") # Clear the new item value", "taskFilter]) return el('form', {'onSubmit': handleSubmit}, el('div', None, f\"Number of Tasks: {taskCount}\"), el('div', None,", "Task: \" if editTask is not None else \"Add Task: \" ), el('input',", "and task['status']): return el('li', None, task['name'] + \" \", el('button', {'type': 'button', 'onClick':", "Clear the edit item value def handleEdit(task): setNewTask(task['name']) # Set the new item", "for task in taskList if not task['status']] elif taskFilter == 'closed': new_list =", "el def App(): newTask, setNewTask = useState(\"\") editTask, setEditTask = useState(None) taskList, setTaskList", "new item value setEditTask(None) # Clear the edit item value def handleEdit(task): setNewTask(task['name'])", "useState(0) taskFilter, setTaskFilter = useState(\"all\") def handleSubmit(event): event.preventDefault() new_list = list(taskList) # Make", "'open', 'onChange': handleChange, 'checked': taskFilter == 'open' } ), el('label', {'htmlFor': 'closed'}, \"", "a copy if editTask is not None: # In edit mode taskIndex =", "), el('label', {'htmlFor': 'open'}, \" Active:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'open',", "= new_list.index(task) # Get list position new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list) # Update", "list(taskList) # Make a copy if editTask is not None: # In edit", "'checked': taskFilter == 'closed' } ), ), el('label', {'htmlFor': 'editBox'}, \"Edit Task: \"", "setNewTask(task['name']) # Set the new item value setEditTask(task) # Set the edit item", "== 'open': new_list = [task for task in taskList if not task['status']] elif", "= event['target'] if target['name'] == 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event, task): target", "task['status']): return el('li', None, task['name'] + \" \", el('button', {'type': 'button', 'onClick': lambda:", "= [task for task in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList,", "useState(None) taskList, setTaskList = useState([]) taskCount, setTaskCount = useState(0) taskFilter, setTaskFilter = useState(\"all\")", "'taskFilter', 'id': 'all', 'value': 'all', 'onChange': handleChange, 'checked': taskFilter == 'all' } ),", "task}) for task in taskList] def updateCount(): if taskFilter == 'open': new_list =", "'name': 'taskFilter', 'id': 'open', 'value': 'open', 'onChange': handleChange, 'checked': taskFilter == 'open' }", "'value': 'closed', 'onChange': handleChange, 'checked': taskFilter == 'closed' } ), ), el('label', {'htmlFor':", "\" Completed:\"), el('input', {'type': 'checkbox', 'id': 'status', 'onChange': lambda e: handleChangeStatus(e, task), 'checked':", "'all', 'value': 'all', 'onChange': handleChange, 'checked': taskFilter == 'all' } ), el('label', {'htmlFor':", "'id': 'all', 'value': 'all', 'onChange': handleChange, 'checked': taskFilter == 'all' } ), el('label',", "task = props['task'] if taskFilter == \"all\" or \\ (taskFilter == \"open\" and", "el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'open', 'value': 'open', 'onChange': handleChange, 'checked': taskFilter", "item value def handleEdit(task): setNewTask(task['name']) # Set the new item value setEditTask(task) #", "ListItems(): return [el(ListItem, {'key': task['name'], 'task': task}) for task in taskList] def updateCount():", "new_list.append({'name': newTask, 'status': False}) # Add new item setTaskList(new_list) # Update our state", "= new_list.index(editTask) # Get list position new_list[taskIndex].update({'name': newTask}) # Update name else: #", "value setEditTask(None) # Clear the edit item value def handleEdit(task): setNewTask(task['name']) # Set", "import setTitle, useEffect, useState, render, createElement as el def App(): newTask, setNewTask =", "el('label', {'htmlFor': 'editBox'}, \"Edit Task: \" if editTask is not None else \"Add", "task['status']] else: new_list = [task for task in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"),", "item value def handleDelete(task): new_list = list(taskList) # Make a copy new_list.remove(task) #", "False}) # Add new item setTaskList(new_list) # Update our state setNewTask(\"\") # Clear", "task in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo List\"), []) useEffect(updateCount, [taskList, taskFilter]) return el('form',", "[]) useEffect(updateCount, [taskList, taskFilter]) return el('form', {'onSubmit': handleSubmit}, el('div', None, f\"Number of Tasks:", "None else \"Add Task: \" ), el('input', {'id': 'editBox', 'onChange': handleChange, 'value': newTask", "} ), el('label', {'htmlFor': 'open'}, \" Active:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id':", "Tasks:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'all', 'value': 'all', 'onChange': handleChange, 'checked':", "{'htmlFor': 'status'}, \" Completed:\"), el('input', {'type': 'checkbox', 'id': 'status', 'onChange': lambda e: handleChangeStatus(e,", "handleChange(event): target = event['target'] if target['name'] == 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event,", "as el def App(): newTask, setNewTask = useState(\"\") editTask, setEditTask = useState(None) taskList,", "# Get list position new_list[taskIndex].update({'name': newTask}) # Update name else: # In add", "elif taskFilter == 'closed': new_list = [task for task in taskList if task['status']]", "return el('li', None, task['name'] + \" \", el('button', {'type': 'button', 'onClick': lambda: handleDelete(task)", "setTaskList(new_list) # Update our state def ListItem(props): task = props['task'] if taskFilter ==", "\", el('button', {'type': 'button', 'onClick': lambda: handleDelete(task) }, \"Delete\" ), el('button', {'type': 'button',", "useEffect(updateCount, [taskList, taskFilter]) return el('form', {'onSubmit': handleSubmit}, el('div', None, f\"Number of Tasks: {taskCount}\"),", "'name': 'taskFilter', 'id': 'closed', 'value': 'closed', 'onChange': handleChange, 'checked': taskFilter == 'closed' }", "our state setNewTask(\"\") # Clear the new item value setEditTask(None) # Clear the", "task['status']) or \\ (taskFilter == \"closed\" and task['status']): return el('li', None, task['name'] +", "'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event, task): target = event['target'] new_list = list(taskList)", "el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'all', 'value': 'all', 'onChange': handleChange, 'checked': taskFilter", "el('form', {'onSubmit': handleSubmit}, el('div', None, f\"Number of Tasks: {taskCount}\"), el('div', None, el('label', {'htmlFor':", "{'type': 'radio', 'name': 'taskFilter', 'id': 'all', 'value': 'all', 'onChange': handleChange, 'checked': taskFilter ==", "el('input', {'id': 'editBox', 'onChange': handleChange, 'value': newTask } ), el('input', {'type': 'submit'}), el('ol',", "}, \"Edit\" ), el('label', {'htmlFor': 'status'}, \" Completed:\"), el('input', {'type': 'checkbox', 'id': 'status',", "def updateCount(): if taskFilter == 'open': new_list = [task for task in taskList", "return [el(ListItem, {'key': task['name'], 'task': task}) for task in taskList] def updateCount(): if", "'checked': taskFilter == 'open' } ), el('label', {'htmlFor': 'closed'}, \" Completed:\"), el('input', {'type':", "def handleEdit(task): setNewTask(task['name']) # Set the new item value setEditTask(task) # Set the", "'radio', 'name': 'taskFilter', 'id': 'open', 'value': 'open', 'onChange': handleChange, 'checked': taskFilter == 'open'", "), ), el('label', {'htmlFor': 'editBox'}, \"Edit Task: \" if editTask is not None", "'closed': new_list = [task for task in taskList if task['status']] else: new_list =", "{'htmlFor': 'editBox'}, \"Edit Task: \" if editTask is not None else \"Add Task:", "item value setEditTask(task) # Set the edit item value def handleDelete(task): new_list =", "[task for task in taskList if not task['status']] elif taskFilter == 'closed': new_list", "'open'}, \" Active:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'open', 'value': 'open', 'onChange':", "return None def ListItems(): return [el(ListItem, {'key': task['name'], 'task': task}) for task in", "'id': 'status', 'onChange': lambda e: handleChangeStatus(e, task), 'checked': task['status'] } ), ) else:", "'open', 'value': 'open', 'onChange': handleChange, 'checked': taskFilter == 'open' } ), el('label', {'htmlFor':", "handleChange, 'checked': taskFilter == 'open' } ), el('label', {'htmlFor': 'closed'}, \" Completed:\"), el('input',", "state setNewTask(\"\") # Clear the new item value setEditTask(None) # Clear the edit", "\"Edit\" ), el('label', {'htmlFor': 'status'}, \" Completed:\"), el('input', {'type': 'checkbox', 'id': 'status', 'onChange':", "else: return None def ListItems(): return [el(ListItem, {'key': task['name'], 'task': task}) for task", "'onClick': lambda: handleEdit(task) }, \"Edit\" ), el('label', {'htmlFor': 'status'}, \" Completed:\"), el('input', {'type':", "setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event, task): target = event['target'] new_list = list(taskList) #", "== \"all\" or \\ (taskFilter == \"open\" and not task['status']) or \\ (taskFilter", "el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'closed', 'value': 'closed', 'onChange': handleChange, 'checked': taskFilter", "Set the new item value setEditTask(task) # Set the edit item value def", "Update our state def ListItem(props): task = props['task'] if taskFilter == \"all\" or", "taskCount, setTaskCount = useState(0) taskFilter, setTaskFilter = useState(\"all\") def handleSubmit(event): event.preventDefault() new_list =", "our state def handleChange(event): target = event['target'] if target['name'] == 'taskFilter': setTaskFilter(target['value']) else:", "el('label', {'htmlFor': 'closed'}, \" Completed:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'closed', 'value':", "# Set the new item value setEditTask(task) # Set the edit item value", "\"closed\" and task['status']): return el('li', None, task['name'] + \" \", el('button', {'type': 'button',", "taskFilter == 'open' } ), el('label', {'htmlFor': 'closed'}, \" Completed:\"), el('input', {'type': 'radio',", "taskIndex = new_list.index(editTask) # Get list position new_list[taskIndex].update({'name': newTask}) # Update name else:", "if target['name'] == 'taskFilter': setTaskFilter(target['value']) else: setNewTask(target['value']) def handleChangeStatus(event, task): target = event['target']", "\"Delete\" ), el('button', {'type': 'button', 'onClick': lambda: handleEdit(task) }, \"Edit\" ), el('label', {'htmlFor':", "task['name'] + \" \", el('button', {'type': 'button', 'onClick': lambda: handleDelete(task) }, \"Delete\" ),", "\"All Tasks:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'all', 'value': 'all', 'onChange': handleChange,", "'open': new_list = [task for task in taskList if not task['status']] elif taskFilter", "our state def ListItem(props): task = props['task'] if taskFilter == \"all\" or \\", "copy taskIndex = new_list.index(task) # Get list position new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list)", "Active:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'open', 'value': 'open', 'onChange': handleChange, 'checked':", "is not None: # In edit mode taskIndex = new_list.index(editTask) # Get list", "list position new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list) # Update our state def ListItem(props):", "or \\ (taskFilter == \"open\" and not task['status']) or \\ (taskFilter == \"closed\"", "'task': task}) for task in taskList] def updateCount(): if taskFilter == 'open': new_list", "el('div', None, f\"Number of Tasks: {taskCount}\"), el('div', None, el('label', {'htmlFor': 'all'}, \"All Tasks:\"),", "if not task['status']] elif taskFilter == 'closed': new_list = [task for task in", "== 'open' } ), el('label', {'htmlFor': 'closed'}, \" Completed:\"), el('input', {'type': 'radio', 'name':", "'checked': taskFilter == 'all' } ), el('label', {'htmlFor': 'open'}, \" Active:\"), el('input', {'type':", "the new item value setEditTask(task) # Set the edit item value def handleDelete(task):", "return el('form', {'onSubmit': handleSubmit}, el('div', None, f\"Number of Tasks: {taskCount}\"), el('div', None, el('label',", "newTask}) # Update name else: # In add mode new_list.append({'name': newTask, 'status': False})", "setTaskList(new_list) # Update our state def handleChange(event): target = event['target'] if target['name'] ==", "setTaskList(new_list) # Update our state setNewTask(\"\") # Clear the new item value setEditTask(None)", "{taskCount}\"), el('div', None, el('label', {'htmlFor': 'all'}, \"All Tasks:\"), el('input', {'type': 'radio', 'name': 'taskFilter',", "), el('label', {'htmlFor': 'closed'}, \" Completed:\"), el('input', {'type': 'radio', 'name': 'taskFilter', 'id': 'closed',", "position new_list[taskIndex].update({'status': target['checked']}) # Update setTaskList(new_list) # Update our state def ListItem(props): task", "'button', 'onClick': lambda: handleDelete(task) }, \"Delete\" ), el('button', {'type': 'button', 'onClick': lambda: handleEdit(task)", "if task['status']] else: new_list = [task for task in taskList] setTaskCount(len(new_list)) useEffect(lambda: setTitle(\"ToDo", "'onChange': handleChange, 'value': newTask } ), el('input', {'type': 'submit'}), el('ol', None, el(ListItems, None)", "Remove the specified item setTaskList(new_list) # Update our state def handleChange(event): target =", "task['status']] elif taskFilter == 'closed': new_list = [task for task in taskList if", "Add new item setTaskList(new_list) # Update our state setNewTask(\"\") # Clear the new", "Make a copy new_list.remove(task) # Remove the specified item setTaskList(new_list) # Update our", "'taskFilter', 'id': 'closed', 'value': 'closed', 'onChange': handleChange, 'checked': taskFilter == 'closed' } )," ]
[ "Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7',", "Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language", "import test as TestCommand import appstore class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args =", "Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python", "tox errno = tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8') as reader: readme = reader.read()", "setuptools import setup from setuptools.command.test import test as TestCommand import appstore class Tox(TestCommand):", "2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5',", "'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming", "from io import open from setuptools import setup from setuptools.command.test import test as", "Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language :: English',", "'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural", ":: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language ::", "finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import tox errno", "Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python", "from setuptools.command.test import test as TestCommand import appstore class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self)", "'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python ::", ":: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python ::", "Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language", ":: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python ::", "def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import tox", "import appstore class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True", "description='App Store -- user-oriented front-end for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0',", "Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python", "2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3',", "license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development Status :: 1 -", "setup from setuptools.command.test import test as TestCommand import appstore class Tox(TestCommand): def finalize_options(self):", "Planning', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License',", ":: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python', 'Programming", "'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation ::", "Apache Software License', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language", "-- user-oriented front-end for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'],", "English', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language", "Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language", "3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',", ":: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language ::", "appstore class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def", "TestCommand import appstore class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite =", "cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development Status :: 1 - Planning', 'Intended Audience ::", "Status :: 1 - Planning', 'Intended Audience :: Developers', 'License :: OSI Approved", "TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import tox errno =", "errno = tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8') as reader: readme = reader.read() setup(", "with open('README.rst', encoding='utf-8') as reader: readme = reader.read() setup( name='appstore', version=appstore.__version__, description='App Store", ":: Developers', 'License :: OSI Approved :: Apache Software License', 'Natural Language ::", "as reader: readme = reader.read() setup( name='appstore', version=appstore.__version__, description='App Store -- user-oriented front-end", "'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming", "Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language :: Python',", "'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming", "class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self):", "tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development Status :: 1 - Planning', 'Intended Audience", "'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language ::", "'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming", "from setuptools import setup from setuptools.command.test import test as TestCommand import appstore class", "reader: readme = reader.read() setup( name='appstore', version=appstore.__version__, description='App Store -- user-oriented front-end for", "user-oriented front-end for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test':", "'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming", "name='appstore', version=appstore.__version__, description='App Store -- user-oriented front-end for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/',", "= tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8') as reader: readme = reader.read() setup( name='appstore',", "import tox errno = tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8') as reader: readme =", "1 - Planning', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache", "Tox}, install_requires=[], classifiers=( 'Development Status :: 1 - Planning', 'Intended Audience :: Developers',", "for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[],", "Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python", "run_tests(self): import tox errno = tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8') as reader: readme", "test as TestCommand import appstore class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = []", ":: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language ::", "io import open from setuptools import setup from setuptools.command.test import test as TestCommand", "readme = reader.read() setup( name='appstore', version=appstore.__version__, description='App Store -- user-oriented front-end for pip.',", "encoding='utf-8') as reader: readme = reader.read() setup( name='appstore', version=appstore.__version__, description='App Store -- user-oriented", ":: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python ::", "Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python", "Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language", "self.test_args = [] self.test_suite = True def run_tests(self): import tox errno = tox.cmdline(self.test_args)", "= True def run_tests(self): import tox errno = tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8')", "- Planning', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software", "import setup from setuptools.command.test import test as TestCommand import appstore class Tox(TestCommand): def", "'Development Status :: 1 - Planning', 'Intended Audience :: Developers', 'License :: OSI", "Software License', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language ::", "Language :: English', 'Programming Language :: Python', 'Programming Language :: Python :: 2',", "[] self.test_suite = True def run_tests(self): import tox errno = tox.cmdline(self.test_args) exit(errno) with", ":: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language ::", "author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development Status ::", "front-end for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox},", "pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=(", "install_requires=[], classifiers=( 'Development Status :: 1 - Planning', 'Intended Audience :: Developers', 'License", "long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development", ":: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', ),", "= reader.read() setup( name='appstore', version=appstore.__version__, description='App Store -- user-oriented front-end for pip.', long_description=readme,", ":: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python ::", "version=appstore.__version__, description='App Store -- user-oriented front-end for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache", "Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython',", "classifiers=( 'Development Status :: 1 - Planning', 'Intended Audience :: Developers', 'License ::", "Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language", "License', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python", "Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import", "OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language ::", "url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development Status :: 1", "Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language", "self.test_suite = True def run_tests(self): import tox errno = tox.cmdline(self.test_args) exit(errno) with open('README.rst',", "setuptools.command.test import test as TestCommand import appstore class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args", "True def run_tests(self): import tox errno = tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8') as", "3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6',", "tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8') as reader: readme = reader.read() setup( name='appstore', version=appstore.__version__,", ":: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python ::", "Store -- user-oriented front-end for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'],", "Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', ), )", "import open from setuptools import setup from setuptools.command.test import test as TestCommand import", "def run_tests(self): import tox errno = tox.cmdline(self.test_args) exit(errno) with open('README.rst', encoding='utf-8') as reader:", "Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python", ":: 1 - Planning', 'Intended Audience :: Developers', 'License :: OSI Approved ::", "packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development Status :: 1 - Planning', 'Intended", "'License :: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming", "setup( name='appstore', version=appstore.__version__, description='App Store -- user-oriented front-end for pip.', long_description=readme, author='<NAME>', author_email='<EMAIL>',", "2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development Status :: 1 - Planning',", "= [] self.test_suite = True def run_tests(self): import tox errno = tox.cmdline(self.test_args) exit(errno)", ":: OSI Approved :: Apache Software License', 'Natural Language :: English', 'Programming Language", "exit(errno) with open('README.rst', encoding='utf-8') as reader: readme = reader.read() setup( name='appstore', version=appstore.__version__, description='App", "author='<NAME>', author_email='<EMAIL>', url='http://www.grantjenks.com/docs/appstore/', license='Apache 2.0', packages=['appstore'], tests_require=['tox'], cmdclass={'test': Tox}, install_requires=[], classifiers=( 'Development Status", ":: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python ::", "open from setuptools import setup from setuptools.command.test import test as TestCommand import appstore", ":: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language ::", "reader.read() setup( name='appstore', version=appstore.__version__, description='App Store -- user-oriented front-end for pip.', long_description=readme, author='<NAME>',", "3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation", "as TestCommand import appstore class Tox(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite", "open('README.rst', encoding='utf-8') as reader: readme = reader.read() setup( name='appstore', version=appstore.__version__, description='App Store --", ":: English', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming" ]
[ "ours ### START CODE HERE ### (approx. 6 lines of code) W1 =", "W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3} return parameters", "= tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ### END", "\"cost\", the feedict should contain a minibatch for (X,Y). ### START CODE HERE", "for (X,Y). ### START CODE HERE ### (1 line) _, minibatch_cost = sess.run([optimizer,", "### (1 line) _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ###", "\"b1\", \"W2\", \"b2\", \"W3\", \"b3\" the shapes are given in initialize_parameters Returns: Z3", "'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if label is 1, only", "Build the forward propagation in the tensorflow graph ### START CODE HERE ###", "minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph", "4 # x1 = np.random.rand(batchsize, dim) # x2 = np.random.rand(batchsize, dim) # y", "b1 : [25, 1] W2 : [12, 25] b2 : [12, 1] W3", "of shape (n_x, n_y) ### START CODE HERE ### (1 line) X, Y", "minibatch_size) # number of minibatches of size minibatch_size in the train set seed", "# Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1 = np.dot(W1,", "training set, of shape (input size = 12288, number of training examples =", "forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add cost function", "Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y)", "distance between x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you can try", "epoch % 5 == 0: costs.append(epoch_cost) # lets save the parameters in a", "size costs = [] # To keep track of the cost # Create", "\"\"\" ops.reset_default_graph() # to be able to rerun the model without overwriting tf", "(1 line) _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END", "(1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation:", "epoch_cost)) if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) #", "the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test", "Arguments: X -- input dataset placeholder, of shape (input size, number of examples)", "random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X,", "{X: minibatch_X, Y: minibatch_Y}) # Initialize all variables init_op = tf.global_variables_initializer() sess.run(init_op) #", "the feedict should contain a minibatch for (X,Y). ### START CODE HERE ###", "with tensorflow. The shapes are: W1 : [25, 12288] b1 : [25, 1]", "1], initializer=tf.zeros_initializer()) ### END CODE HERE ### parameters = {\"W1\": W1, \"b1\": b1,", "\"W3\": W3, \"b3\": b3} return parameters def forward_propagation(X, parameters): \"\"\" Implements the forward", "Add cost function to tensorflow graph ### START CODE HERE ### (1 line)", "that runs the graph on a minibatch. # Run the session to execute", "### (1 line) parameters = initialize_parameters() ### END CODE HERE ### # Forward", "number of epochs of the optimization loop minibatch_size -- size of a minibatch", "execute the \"optimizer\" and the \"cost\", the feedict should contain a minibatch for", "tf sess = tf.Session() from keras import backend as K K.set_session(sess) # this", "# to keep consistent results (n_x, m) = X_train.shape # (n_x: input size,", "of minibatches of size minibatch_size in the train set seed = seed +", "variables tf.set_random_seed(1) # to keep consistent results seed = 3 # to keep", "keras.utils.data_utils import get_file # from keras import backend as K from keras.layers import", "learning rate of the optimization num_epochs -- number of epochs of the optimization", "CODE HERE ### (1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ###", "keras.layers import Flatten, Dense, Input # from keras.utils.data_utils import get_file # from keras", "= tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4 # x1 = np.random.rand(batchsize, dim) # x2", "if print_cost == True and epoch % 100 == 0: print(\"Cost after epoch", "number of training examples = 1080) Y_train -- test set, of shape (output", "loss_mean = tf.reduce_mean(loss) return loss_mean def initialize_parameters(): \"\"\" Initializes parameters to build a", "tf.reduce_mean(loss) return loss_mean def initialize_parameters(): \"\"\" Initializes parameters to build a neural network", "CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE ###", "def initialize_parameters(): \"\"\" Initializes parameters to build a neural network with tensorflow. The", "dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\" the shapes are", "print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test})) return parameters loss_ = loss(x1_, x2_, y_)", "b3} return parameters def forward_propagation(X, parameters): \"\"\" Implements the forward propagation for the", "the parameters from the dictionary \"parameters\" W1 = parameters['W1'] b1 = parameters['b1'] W2", "### (approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1)", "### START CODE HERE ### (1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE", "the parameters in a variable parameters = sess.run(parameters) print(\"Parameters have been trained!\") #", "keep track of the cost # Create Placeholders of shape (n_x, n_y) ###", "HERE ### epoch_cost += minibatch_cost / num_minibatches # Print the cost every epoch", "and the \"cost\", the feedict should contain a minibatch for (X,Y). ### START", "12288] b1 : [25, 1] W2 : [12, 25] b2 : [12, 1]", "b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3", "the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU", "is 1, only match_loss will count, otherwise mismatch_loss loss = tf.add(tf.mul(labels, match_loss), tf.mul((1", "after epoch %i: %f\" % (epoch, epoch_cost)) if print_cost == True and epoch", "START CODE HERE ### (approx. 6 lines of code) W1 = tf.get_variable(\"W1\", [25,", "in a variable parameters = sess.run(parameters) print(\"Parameters have been trained!\") # Calculate the", "### START CODE HERE ### (1 line) _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X:", "number of examples in the train set) n_y = Y_train.shape[0] # n_y :", "(epoch, epoch_cost)) if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost)", "-> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X -- input dataset", "you can try margin parameters margin = tf.constant(1.) labels = tf.to_float(y) match_loss =", "three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input", "CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ###", "consistent results (n_x, m) = X_train.shape # (n_x: input size, m : number", "CODE HERE ### # Cost function: Add cost function to tensorflow graph ###", "parameters -- parameters learnt by the model. They can then be used to", "that your \"random\" numbers match ours ### START CODE HERE ### (approx. 6", "tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables init =", "% (epoch, epoch_cost)) if print_cost == True and epoch % 5 == 0:", "import Dense from tensorflow.python.framework import ops import tensorflow as tf sess = tf.Session()", "labels = tf.to_float(y) match_loss = tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term')", "END CODE HERE ### # Initialize all the variables init = tf.global_variables_initializer() #", "of shape (output size = 6, number of test examples = 120) learning_rate", "keras import backend as K K.set_session(sess) # this placeholder will contain our input", "= tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ### END CODE HERE ### parameters = {\"W1\":", "= random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch", "= loss(x1_, x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4 # x1", "size, m : number of examples in the train set) n_y = Y_train.shape[0]", "the cost every epoch if print_cost == True and epoch % 100 ==", "# Do the training loop for epoch in range(num_epochs): epoch_cost = 0. #", "parameters in a variable parameters = sess.run(parameters) print(\"Parameters have been trained!\") # Calculate", "shape (output size = 6, number of training examples = 1080) X_test --", "[25, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12,", "import Flatten, Dense, Input # from keras.utils.data_utils import get_file # from keras import", "all variables init_op = tf.global_variables_initializer() sess.run(init_op) # Run training loop with sess.as_default(): optimizer.run(feed_dict={x1_:x1,", "minibatches of size minibatch_size in the train set seed = seed + 1", "given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit", "X_test -- training set, of shape (input size = 12288, number of training", "6, number of test examples = 120) learning_rate -- learning rate of the", "input dataset placeholder, of shape (input size, number of examples) parameters -- python", "= tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2 A2 =", "of training examples = 120) Y_test -- test set, of shape (output size", "Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2 A2", "Initialize all the variables init = tf.global_variables_initializer() # Start the session to compute", "trained!\") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy", ": [25, 1] W2 : [12, 25] b2 : [12, 1] W3 :", "examples = 120) Y_test -- test set, of shape (output size = 6,", ": [12, 25] b2 : [12, 1] W3 : [6, 12] b3 :", "(output size = 6, number of training examples = 1080) X_test -- training", "shape (output size = 6, number of test examples = 120) learning_rate --", "END CODE HERE ### epoch_cost += minibatch_cost / num_minibatches # Print the cost", "== 0: print(\"Cost after epoch %i: %f\" % (epoch, epoch_cost)) if print_cost ==", "AdamOptimizer. ### START CODE HERE ### (1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END", "= tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3 ### END CODE", "parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the", "epoch in range(num_epochs): epoch_cost = 0. # Defines a cost related to an", "# x2 = np.random.rand(batchsize, dim) # y = np.array([0,1,1,0]) # # l =", "the cost every 100 epochs Returns: parameters -- parameters learnt by the model.", "on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y:", "== True and epoch % 5 == 0: costs.append(epoch_cost) # lets save the", "network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size = 12288,", "CODE HERE ### epoch_cost += minibatch_cost / num_minibatches # Print the cost every", "of examples) parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\",", "can try margin parameters margin = tf.constant(1.) labels = tf.to_float(y) match_loss = tf.square(l2diff,", "are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR", "forward propagation in the tensorflow graph ### START CODE HERE ### (1 line)", "propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR", "model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements a three-layer tensorflow", "1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12, 1],", "# Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that", "# lets save the parameters in a variable parameters = sess.run(parameters) print(\"Parameters have", "Euclidean distance between x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you can", "Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs):", "Z2 = np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2)", "END CODE HERE ### # Cost function: Add cost function to tensorflow graph", "-> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X --", "feed_dict = {X: minibatch_X, Y: minibatch_Y}) # Initialize all variables init_op = tf.global_variables_initializer()", "HERE ### # Initialize parameters ### START CODE HERE ### (1 line) parameters", "Returns: parameters -- parameters learnt by the model. They can then be used", "# from keras import backend as K from keras.layers import Dense from tensorflow.python.framework", "= tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if label is", "\"random\" numbers match ours ### START CODE HERE ### (approx. 6 lines of", "compute the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init)", "relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2", "\"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test}))", "(n_x, n_y) ### START CODE HERE ### (1 line) X, Y = create_placeholders(n_x,", "so that your \"random\" numbers match ours ### START CODE HERE ### (approx.", "### # Initialize all the variables init = tf.global_variables_initializer() # Start the session", "print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) # lets save", "session to compute the tensorflow graph with tf.Session() as sess: # Run the", "can then be used to predict. \"\"\" ops.reset_default_graph() # to be able to", "tf.square(l2diff)), 'mismatch_term') # if label is 1, only match_loss will count, otherwise mismatch_loss", "shapes are: W1 : [25, 12288] b1 : [25, 1] W2 : [12,", "### END CODE HERE ### return Z3 def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001,", "= 0. # Defines a cost related to an epoch num_minibatches = int(m", "tensors containing W1, b1, W2, b2, W3, b3 \"\"\" tf.set_random_seed(1) # so that", "(input size = 12288, number of training examples = 1080) Y_train -- test", "\"b3\": b3} return parameters def forward_propagation(X, parameters): \"\"\" Implements the forward propagation for", "cost # Create Placeholders of shape (n_x, n_y) ### START CODE HERE ###", "input digits, as flat vectors img_features_x = tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1, x2,", "= 120) learning_rate -- learning rate of the optimization num_epochs -- number of", "loss(x1, x2, y): # Euclidean distance between x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)),", "training examples = 1080) X_test -- training set, of shape (input size =", "compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer.", "overwriting tf variables tf.set_random_seed(1) # to keep consistent results seed = 3 #", "the session to compute the tensorflow graph with tf.Session() as sess: # Run", "seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch", "X_test, Y: Y_test})) return parameters loss_ = loss(x1_, x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_)", "3 # to keep consistent results (n_x, m) = X_train.shape # (n_x: input", "[25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12,", "b1) # Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) # A1", "network with tensorflow. The shapes are: W1 : [25, 12288] b1 : [25,", "= tf.Session() from keras import backend as K K.set_session(sess) # this placeholder will", "\"parameters\" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2']", "of training examples = 1080) X_test -- training set, of shape (input size", "HERE ### # Forward propagation: Build the forward propagation in the tensorflow graph", "# sess.run((optimizer, cost), feed_dict = {X: minibatch_X, Y: minibatch_Y}) # Initialize all variables", "A1), b2) # Z2 = np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2) #", "Y_train -- test set, of shape (output size = 6, number of training", "training examples = 120) Y_test -- test set, of shape (output size =", "W1 : [25, 12288] b1 : [25, 1] W2 : [12, 25] b2", "(approx. 6 lines of code) W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 =", "= sess.run(parameters) print(\"Parameters have been trained!\") # Calculate the correct predictions correct_prediction =", "shapes are given in initialize_parameters Returns: Z3 -- the output of the last", "= tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2 =", "backend as K K.set_session(sess) # this placeholder will contain our input digits, as", "y): # Euclidean distance between x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) #", "W2 : [12, 25] b2 : [12, 1] W3 : [6, 12] b3", "%i: %f\" % (epoch, epoch_cost)) if print_cost == True and epoch % 5", "the variables init = tf.global_variables_initializer() # Start the session to compute the tensorflow", "from tensorflow.python.framework import ops import tensorflow as tf sess = tf.Session() from keras", "output of the last LINEAR unit \"\"\" # Retrieve the parameters from the", "# Cost function: Add cost function to tensorflow graph ### START CODE HERE", "120) learning_rate -- learning rate of the optimization num_epochs -- number of epochs", "line) X, Y = create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize", "mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss) return loss_mean def initialize_parameters(): \"\"\" Initializes parameters to", "of size minibatch_size in the train set seed = seed + 1 minibatches", "if label is 1, only match_loss will count, otherwise mismatch_loss loss = tf.add(tf.mul(labels,", "print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test})) return", "as tf sess = tf.Session() from keras import backend as K K.set_session(sess) #", "\"\"\" Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set,", "= X_train.shape # (n_x: input size, m : number of examples in the", "Initializes parameters to build a neural network with tensorflow. The shapes are: W1", "### (approx. 6 lines of code) W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1", "# Initialize all the variables init = tf.global_variables_initializer() # Start the session to", "1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6, 1],", "tf variables tf.set_random_seed(1) # to keep consistent results seed = 3 # to", "feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer, cost), feed_dict = {X: minibatch_X, Y: minibatch_Y}) #", "tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss) return loss_mean def", "CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE", "to rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep consistent", "b2, \"W3\": W3, \"b3\": b3} return parameters def forward_propagation(X, parameters): \"\"\" Implements the", "Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X:", "= tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 =", "in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit \"\"\"", "Accuracy:\", accuracy.eval({X: X_train, Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test})) return parameters", "Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3 ### END", "### return Z3 def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\"", "X -- input dataset placeholder, of shape (input size, number of examples) parameters", "b1, W2, b2, W3, b3 \"\"\" tf.set_random_seed(1) # so that your \"random\" numbers", "### END CODE HERE ### # Cost function: Add cost function to tensorflow", "match_loss will count, otherwise mismatch_loss loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss),", "relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3 ###", "will contain our input digits, as flat vectors img_features_x = tf.placeholder(tf.float32, shape=(1, 4096))", "120) Y_test -- test set, of shape (output size = 6, number of", "unit \"\"\" # Retrieve the parameters from the dictionary \"parameters\" W1 = parameters['W1']", "batchsize = 4 # x1 = np.random.rand(batchsize, dim) # x2 = np.random.rand(batchsize, dim)", "and epoch % 5 == 0: costs.append(epoch_cost) # lets save the parameters in", "from keras.layers import Flatten, Dense, Input # from keras.utils.data_utils import get_file # from", "the cost # Create Placeholders of shape (n_x, n_y) ### START CODE HERE", "tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\",", "# batchsize = 4 # x1 = np.random.rand(batchsize, dim) # x2 = np.random.rand(batchsize,", "= np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2", "tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))", "= 1080) X_test -- training set, of shape (input size = 12288, number", "initialize_parameters(): \"\"\" Initializes parameters to build a neural network with tensorflow. The shapes", "tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if label is 1, only match_loss will count, otherwise", "+ 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: #", "n_y) ### START CODE HERE ### (1 line) X, Y = create_placeholders(n_x, n_y)", "of shape (input size = 12288, number of training examples = 1080) Y_train", "from keras.layers import Dense from tensorflow.python.framework import ops import tensorflow as tf sess", "to predict. \"\"\" ops.reset_default_graph() # to be able to rerun the model without", "### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an", "HERE ### (1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### #", "initialize_parameters Returns: Z3 -- the output of the last LINEAR unit \"\"\" #", "Use an AdamOptimizer. ### START CODE HERE ### (1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)", "n_y : output size costs = [] # To keep track of the", "= 3 # to keep consistent results (n_x, m) = X_train.shape # (n_x:", "with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training", "# Defines a cost related to an epoch num_minibatches = int(m / minibatch_size)", "the \"cost\", the feedict should contain a minibatch for (X,Y). ### START CODE", "CODE HERE ### (1 line) _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y:", "the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX", "placeholder, of shape (input size, number of examples) parameters -- python dictionary containing", "the forward propagation in the tensorflow graph ### START CODE HERE ### (1", "### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents: Z1 =", "# (n_x: input size, m : number of examples in the train set)", "# so that your \"random\" numbers match ours ### START CODE HERE ###", "HERE ### (approx. 6 lines of code) W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1))", "contain our input digits, as flat vectors img_features_x = tf.placeholder(tf.float32, shape=(1, 4096)) def", "1080) X_test -- training set, of shape (input size = 12288, number of", "parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ### (approx. 5 lines) #", "minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on a", "def forward_propagation(X, parameters): \"\"\" Implements the forward propagation for the model: LINEAR ->", "minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost / num_minibatches # Print", "set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for", "the graph on a minibatch. # Run the session to execute the \"optimizer\"", "minibatch_Y}) # Initialize all variables init_op = tf.global_variables_initializer() sess.run(init_op) # Run training loop", "in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The", "epoch %i: %f\" % (epoch, epoch_cost)) if print_cost == True and epoch %", "tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you can try margin parameters margin = tf.constant(1.) labels", "\"b2\", \"W3\", \"b3\" the shapes are given in initialize_parameters Returns: Z3 -- the", "minibatch print_cost -- True to print the cost every 100 epochs Returns: parameters", "W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3", "Define the tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE ### (1", "tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you can try margin parameters margin = tf.constant(1.)", "= {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3}", "number of examples) parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\",", "model. They can then be used to predict. \"\"\" ops.reset_default_graph() # to be", "HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ###", "the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) #", "# A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2,", "set, of shape (output size = 6, number of training examples = 1080)", "1] Returns: parameters -- a dictionary of tensors containing W1, b1, W2, b2,", "to keep consistent results (n_x, m) = X_train.shape # (n_x: input size, m", "parameters loss_ = loss(x1_, x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4", "set, of shape (input size = 12288, number of training examples = 120)", "rate of the optimization num_epochs -- number of epochs of the optimization loop", "### START CODE HERE ### (1 line) X, Y = create_placeholders(n_x, n_y) ###", "Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add", "### START CODE HERE ### (approx. 6 lines of code) W1 = tf.get_variable(\"W1\",", "tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\",", "variables init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph", "/ minibatch_size) # number of minibatches of size minibatch_size in the train set", "from keras.utils.data_utils import get_file # from keras import backend as K from keras.layers", "seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch", "the \"optimizer\" and the \"cost\", the feedict should contain a minibatch for (X,Y).", "dictionary of tensors containing W1, b1, W2, b2, W3, b3 \"\"\" tf.set_random_seed(1) #", "reduction_indices=1)) # you can try margin parameters margin = tf.constant(1.) labels = tf.to_float(y)", "minibatch_size=32, print_cost=True): \"\"\" Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train --", "the training loop for epoch in range(num_epochs): epoch_cost = 0. # Defines a", "(input size, number of examples) parameters -- python dictionary containing your parameters \"W1\",", "a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape", "runs the graph on a minibatch. # Run the session to execute the", "/ num_minibatches # Print the cost every epoch if print_cost == True and", "of shape (input size = 12288, number of training examples = 120) Y_test", "l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer, cost), feed_dict = {X: minibatch_X,", "neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size =", "Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs", "the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train}))", "np.dot(W3,Z2) + b3 ### END CODE HERE ### return Z3 def model(X_train, Y_train,", "-- learning rate of the optimization num_epochs -- number of epochs of the", "tensorflow. The shapes are: W1 : [25, 12288] b1 : [25, 1] W2", "size = 12288, number of training examples = 1080) Y_train -- test set,", "tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session() as", "Y = create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize parameters ###", "examples = 1080) X_test -- training set, of shape (input size = 12288,", "0: print(\"Cost after epoch %i: %f\" % (epoch, epoch_cost)) if print_cost == True", "= tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session()", "are: W1 : [25, 12288] b1 : [25, 1] W2 : [12, 25]", "b3 : [6, 1] Returns: parameters -- a dictionary of tensors containing W1,", "= np.array([0,1,1,0]) # # l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer, cost),", "of shape (output size = 6, number of training examples = 1080) X_test", "tensorflow graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters)", "100 epochs Returns: parameters -- parameters learnt by the model. They can then", "variable parameters = sess.run(parameters) print(\"Parameters have been trained!\") # Calculate the correct predictions", "= tf.reduce_mean(loss) return loss_mean def initialize_parameters(): \"\"\" Initializes parameters to build a neural", "\"\"\" tf.set_random_seed(1) # so that your \"random\" numbers match ours ### START CODE", "= relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) +", "X_train.shape # (n_x: input size, m : number of examples in the train", "n_y = Y_train.shape[0] # n_y : output size costs = [] # To", "parameters['b3'] ### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents: Z1", ": output size costs = [] # To keep track of the cost", "dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary", "parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3", "lets save the parameters in a variable parameters = sess.run(parameters) print(\"Parameters have been", "tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 =", "'mismatch_term') # if label is 1, only match_loss will count, otherwise mismatch_loss loss", "HERE ### parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\":", "\"W2\", \"b2\", \"W3\", \"b3\" the shapes are given in initialize_parameters Returns: Z3 --", "loop for epoch in range(num_epochs): epoch_cost = 0. # Defines a cost related", "test set, of shape (output size = 6, number of test examples =", "a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the", "minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a", "to build a neural network with tensorflow. The shapes are: W1 : [25,", "number of training examples = 1080) X_test -- training set, of shape (input", "your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\" the shapes are given in", "tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1, x2, y): # Euclidean distance between x1,x2 l2diff", "keep consistent results seed = 3 # to keep consistent results (n_x, m)", "our input digits, as flat vectors img_features_x = tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1,", "= compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow", "on a minibatch. # Run the session to execute the \"optimizer\" and the", "W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ### (approx. 5", "the train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size,", "shape (input size, number of examples) parameters -- python dictionary containing your parameters", "\"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3} return parameters def", "1080) Y_train -- test set, of shape (output size = 6, number of", "vectors img_features_x = tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1, x2, y): # Euclidean distance", "get_file # from keras import backend as K from keras.layers import Dense from", "Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements a three-layer tensorflow neural", "consistent results seed = 3 # to keep consistent results (n_x, m) =", "X, Y = create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize parameters", "set) n_y = Y_train.shape[0] # n_y : output size costs = [] #", "seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches:", "numbers match ours ### START CODE HERE ### (approx. 6 lines of code)", "= seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in", "### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ### START CODE", "25] b2 : [12, 1] W3 : [6, 12] b3 : [6, 1]", "Y: Y_test})) return parameters loss_ = loss(x1_, x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) #", "np.array([0,1,1,0]) # # l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer, cost), feed_dict", "W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ###", "-- input dataset placeholder, of shape (input size, number of examples) parameters --", "-- a dictionary of tensors containing W1, b1, W2, b2, W3, b3 \"\"\"", "RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X -- input", "of the optimization loop minibatch_size -- size of a minibatch print_cost -- True", "m : number of examples in the train set) n_y = Y_train.shape[0] #", "test set, of shape (output size = 6, number of training examples =", "12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12, 25],", "the tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE ### (1 line)", "X) + b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2,", "cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the", "### START CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE", "= 12288, number of training examples = 120) Y_test -- test set, of", "results (n_x, m) = X_train.shape # (n_x: input size, m : number of", "# Create Placeholders of shape (n_x, n_y) ### START CODE HERE ### (1", "tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y:", "shape=(1, 4096)) def loss(x1, x2, y): # Euclidean distance between x1,x2 l2diff =", "\"\"\" Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR", "function to tensorflow graph ### START CODE HERE ### (1 line) cost =", "CODE HERE ### (approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X,", "= parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 =", "(n_x, m) = X_train.shape # (n_x: input size, m : number of examples", "set, of shape (output size = 6, number of test examples = 120)", "Model # from keras.layers import Flatten, Dense, Input # from keras.utils.data_utils import get_file", "set, of shape (input size = 12288, number of training examples = 1080)", "the optimization loop minibatch_size -- size of a minibatch print_cost -- True to", "epoch if print_cost == True and epoch % 100 == 0: print(\"Cost after", "Input # from keras.utils.data_utils import get_file # from keras import backend as K", "# l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer, cost), feed_dict = {X:", "### epoch_cost += minibatch_cost / num_minibatches # Print the cost every epoch if", "build a neural network with tensorflow. The shapes are: W1 : [25, 12288]", "shape (n_x, n_y) ### START CODE HERE ### (1 line) X, Y =", "# from keras.layers import Flatten, Dense, Input # from keras.utils.data_utils import get_file #", "tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE ### (1 line) optimizer", "to keep consistent results seed = 3 # to keep consistent results (n_x,", "session to execute the \"optimizer\" and the \"cost\", the feedict should contain a", "= tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction,", "= tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1 = np.dot(W1, X) + b1 A1", ": number of examples in the train set) n_y = Y_train.shape[0] # n_y", "# n_y : output size costs = [] # To keep track of", "START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE", "size minibatch_size in the train set seed = seed + 1 minibatches =", "True and epoch % 100 == 0: print(\"Cost after epoch %i: %f\" %", "number of minibatches of size minibatch_size in the train set seed = seed", "'loss_add') loss_mean = tf.reduce_mean(loss) return loss_mean def initialize_parameters(): \"\"\" Initializes parameters to build", "1, only match_loss will count, otherwise mismatch_loss loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 -", "feedict should contain a minibatch for (X,Y). ### START CODE HERE ### (1", "The shapes are: W1 : [25, 12288] b1 : [25, 1] W2 :", "# Run the session to execute the \"optimizer\" and the \"cost\", the feedict", "HERE ### # Cost function: Add cost function to tensorflow graph ### START", "line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the", "output size costs = [] # To keep track of the cost #", "parameters def forward_propagation(X, parameters): \"\"\" Implements the forward propagation for the model: LINEAR", "# Z2 = np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2) # A2 =", "= tf.to_float(y) match_loss = tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') #", "minibatch_size -- size of a minibatch print_cost -- True to print the cost", "HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ### START", "# number of minibatches of size minibatch_size in the train set seed =", "of training examples = 1080) Y_train -- test set, of shape (output size", "accuracy.eval({X: X_test, Y: Y_test})) return parameters loss_ = loss(x1_, x2_, y_) optimizer =", "6 lines of code) W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\",", "= 12288, number of training examples = 1080) Y_train -- test set, of", "= np.dot(W3,Z2) + b3 ### END CODE HERE ### return Z3 def model(X_train,", "\"b2\": b2, \"W3\": W3, \"b3\": b3} return parameters def forward_propagation(X, parameters): \"\"\" Implements", "tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if label is 1,", "tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\",", "print_cost -- True to print the cost every 100 epochs Returns: parameters --", "parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3']", "tensorflow as tf sess = tf.Session() from keras import backend as K K.set_session(sess)", "related to an epoch num_minibatches = int(m / minibatch_size) # number of minibatches", "the dictionary \"parameters\" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2", "Placeholders of shape (n_x, n_y) ### START CODE HERE ### (1 line) X,", "# Print the cost every epoch if print_cost == True and epoch %", "and epoch % 100 == 0: print(\"Cost after epoch %i: %f\" % (epoch,", "minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line", "[12, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6,", "[12, 1] W3 : [6, 12] b3 : [6, 1] Returns: parameters --", "to execute the \"optimizer\" and the \"cost\", the feedict should contain a minibatch", "print(\"Cost after epoch %i: %f\" % (epoch, epoch_cost)) if print_cost == True and", "examples = 120) learning_rate -- learning rate of the optimization num_epochs -- number", "HERE ### # Initialize all the variables init = tf.global_variables_initializer() # Start the", "minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) =", "START CODE HERE ### (1 line) X, Y = create_placeholders(n_x, n_y) ### END", "W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3", "= tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if label is 1, only match_loss will", "lines of code) W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25,", "init_op = tf.global_variables_initializer() sess.run(init_op) # Run training loop with sess.as_default(): optimizer.run(feed_dict={x1_:x1, x2_:x2, y_:y})", "-- size of a minibatch print_cost -- True to print the cost every", "the tensorflow graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X,", "tf.argmax(Y)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train", "have been trained!\") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) #", "correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy =", "for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR ->", "== 0: costs.append(epoch_cost) # lets save the parameters in a variable parameters =", "tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if label is 1, only match_loss will count,", "b3 ### END CODE HERE ### return Z3 def model(X_train, Y_train, X_test, Y_test,", "minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT:", "keras.models import Model # from keras.layers import Flatten, Dense, Input # from keras.utils.data_utils", "sess.run(parameters) print(\"Parameters have been trained!\") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3),", "learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments:", "def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements a three-layer", "= tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables init", "minibatch for (X,Y). ### START CODE HERE ### (1 line) _, minibatch_cost =", "print_cost == True and epoch % 100 == 0: print(\"Cost after epoch %i:", "costs.append(epoch_cost) # lets save the parameters in a variable parameters = sess.run(parameters) print(\"Parameters", "of the cost # Create Placeholders of shape (n_x, n_y) ### START CODE", "dim) # x2 = np.random.rand(batchsize, dim) # y = np.array([0,1,1,0]) # # l", "as K from keras.layers import Dense from tensorflow.python.framework import ops import tensorflow as", "contain a minibatch for (X,Y). ### START CODE HERE ### (1 line) _,", "(n_x: input size, m : number of examples in the train set) n_y", "Dense from tensorflow.python.framework import ops import tensorflow as tf sess = tf.Session() from", "def loss(x1, x2, y): # Euclidean distance between x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1,", "line that runs the graph on a minibatch. # Run the session to", "Arguments: X_train -- training set, of shape (input size = 12288, number of", "keep consistent results (n_x, m) = X_train.shape # (n_x: input size, m :", "(1 line) parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation:", "(input size = 12288, number of training examples = 120) Y_test -- test", "tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 =", "# Initialize all variables init_op = tf.global_variables_initializer() sess.run(init_op) # Run training loop with", "(1 line) X, Y = create_placeholders(n_x, n_y) ### END CODE HERE ### #", "tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1 = np.dot(W1, X) + b1 A1 =", "(X,Y). ### START CODE HERE ### (1 line) _, minibatch_cost = sess.run([optimizer, cost],", "x2_:x2, y_:y}) # sess.run((optimizer, cost), feed_dict = {X: minibatch_X, Y: minibatch_Y}) # Initialize", "# to be able to rerun the model without overwriting tf variables tf.set_random_seed(1)", "Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.", "# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE", "line) _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE", "size = 6, number of test examples = 120) learning_rate -- learning rate", "forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU ->", "CODE HERE ### (approx. 6 lines of code) W1 = tf.get_variable(\"W1\", [25, 12288],", "tf.mul((1 - labels), mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss) return loss_mean def initialize_parameters(): \"\"\"", "neural network with tensorflow. The shapes are: W1 : [25, 12288] b1 :", "### END CODE HERE ### # Initialize parameters ### START CODE HERE ###", "= 6, number of test examples = 120) learning_rate -- learning rate of", "epoch % 100 == 0: print(\"Cost after epoch %i: %f\" % (epoch, epoch_cost))", "### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ### #", "W2, \"b2\": b2, \"W3\": W3, \"b3\": b3} return parameters def forward_propagation(X, parameters): \"\"\"", "= tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you can try margin parameters margin =", "accuracy.eval({X: X_train, Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test})) return parameters loss_", "### # Initialize parameters ### START CODE HERE ### (1 line) parameters =", "Accuracy:\", accuracy.eval({X: X_test, Y: Y_test})) return parameters loss_ = loss(x1_, x2_, y_) optimizer", "== True and epoch % 100 == 0: print(\"Cost after epoch %i: %f\"", "for epoch in range(num_epochs): epoch_cost = 0. # Defines a cost related to", "propagation in the tensorflow graph ### START CODE HERE ### (1 line) Z3", "5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1", "should contain a minibatch for (X,Y). ### START CODE HERE ### (1 line)", "[6, 1] Returns: parameters -- a dictionary of tensors containing W1, b1, W2,", "parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\" the shapes are given in initialize_parameters", "labels), mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss) return loss_mean def initialize_parameters(): \"\"\" Initializes parameters", "as flat vectors img_features_x = tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1, x2, y): #", "Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use", "[6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ### END CODE HERE", "lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1 =", "of test examples = 120) learning_rate -- learning rate of the optimization num_epochs", "initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): epoch_cost =", "tf.cast(X, tf.float32)), b1) # Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1)", "track of the cost # Create Placeholders of shape (n_x, n_y) ### START", "shape (input size = 12288, number of training examples = 1080) Y_train --", "graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ###", "= tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss) return loss_mean", "Initialize all variables init_op = tf.global_variables_initializer() sess.run(init_op) # Run training loop with sess.as_default():", "RELU -> LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder, of shape", "will count, otherwise mismatch_loss loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add')", "= tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3", "A2), b3) # Z3 = np.dot(W3,Z2) + b3 ### END CODE HERE ###", "only match_loss will count, otherwise mismatch_loss loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels),", "Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1 = np.dot(W1, X) +", "Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of", "optimization loop minibatch_size -- size of a minibatch print_cost -- True to print", "import tensorflow as tf sess = tf.Session() from keras import backend as K", "loss_ = loss(x1_, x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4 #", "W3, b3 \"\"\" tf.set_random_seed(1) # so that your \"random\" numbers match ours ###", "ops import tensorflow as tf sess = tf.Session() from keras import backend as", "0. # Defines a cost related to an epoch num_minibatches = int(m /", "match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss) return loss_mean def initialize_parameters():", "-- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\" the", "1] W3 : [6, 12] b3 : [6, 1] Returns: parameters -- a", "epoch_cost += minibatch_cost / num_minibatches # Print the cost every epoch if print_cost", "epoch num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size", "loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss) return", "graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ###", "y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4 # x1 = np.random.rand(batchsize, dim)", "import get_file # from keras import backend as K from keras.layers import Dense", "<reponame>janisoteps/imsim1 # from keras.models import Model # from keras.layers import Flatten, Dense, Input", "%f\" % (epoch, epoch_cost)) if print_cost == True and epoch % 5 ==", "np.random.rand(batchsize, dim) # y = np.array([0,1,1,0]) # # l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2,", "HERE ### (approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)),", "dim) # y = np.array([0,1,1,0]) # # l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y})", "as K K.set_session(sess) # this placeholder will contain our input digits, as flat", "line) cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define", "to tensorflow graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3,", "# Euclidean distance between x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you", "the last LINEAR unit \"\"\" # Retrieve the parameters from the dictionary \"parameters\"", "+ b3 ### END CODE HERE ### return Z3 def model(X_train, Y_train, X_test,", "epochs Returns: parameters -- parameters learnt by the model. They can then be", "the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): epoch_cost", "parameters from the dictionary \"parameters\" W1 = parameters['W1'] b1 = parameters['b1'] W2 =", "num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in", "Print the cost every epoch if print_cost == True and epoch % 100", "propagation: Build the forward propagation in the tensorflow graph ### START CODE HERE", "of tensors containing W1, b1, W2, b2, W3, b3 \"\"\" tf.set_random_seed(1) # so", "parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ### (approx.", "cost function to tensorflow graph ### START CODE HERE ### (1 line) cost", "Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test})) return parameters loss_ = loss(x1_, x2_,", "training examples = 1080) Y_train -- test set, of shape (output size =", "Y_test -- test set, of shape (output size = 6, number of test", "A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) +", "return parameters def forward_propagation(X, parameters): \"\"\" Implements the forward propagation for the model:", "# A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2)", "a cost related to an epoch num_minibatches = int(m / minibatch_size) # number", "model without overwriting tf variables tf.set_random_seed(1) # to keep consistent results seed =", "examples = 1080) Y_train -- test set, of shape (output size = 6,", "predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set accuracy", "containing W1, b1, W2, b2, W3, b3 \"\"\" tf.set_random_seed(1) # so that your", "dictionary \"parameters\" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 =", "parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\"", "W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2", "margin = tf.constant(1.) labels = tf.to_float(y) match_loss = tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0.,", "START CODE HERE ### (approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1,", "initializer=tf.zeros_initializer()) ### END CODE HERE ### parameters = {\"W1\": W1, \"b1\": b1, \"W2\":", "a variable parameters = sess.run(parameters) print(\"Parameters have been trained!\") # Calculate the correct", "True to print the cost every 100 epochs Returns: parameters -- parameters learnt", "K K.set_session(sess) # this placeholder will contain our input digits, as flat vectors", "-- number of epochs of the optimization loop minibatch_size -- size of a", "HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ###", "tf.constant(1.) labels = tf.to_float(y) match_loss = tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)),", "test examples = 120) learning_rate -- learning rate of the optimization num_epochs --", "To keep track of the cost # Create Placeholders of shape (n_x, n_y)", "np.random.rand(batchsize, dim) # x2 = np.random.rand(batchsize, dim) # y = np.array([0,1,1,0]) # #", "-> LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder, of shape (input", "(1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all", "optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables", "in the train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train,", "### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END", "+ b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2),", "### # Forward propagation: Build the forward propagation in the tensorflow graph ###", "= int(m / minibatch_size) # number of minibatches of size minibatch_size in the", "loop minibatch_size -- size of a minibatch print_cost -- True to print the", "training loop for epoch in range(num_epochs): epoch_cost = 0. # Defines a cost", "cost every epoch if print_cost == True and epoch % 100 == 0:", "number of training examples = 120) Y_test -- test set, of shape (output", "tf.Session() from keras import backend as K K.set_session(sess) # this placeholder will contain", "tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\",", "[6, 1], initializer=tf.zeros_initializer()) ### END CODE HERE ### parameters = {\"W1\": W1, \"b1\":", "forward_propagation(X, parameters): \"\"\" Implements the forward propagation for the model: LINEAR -> RELU", "parameters to build a neural network with tensorflow. The shapes are: W1 :", "minibatch_cost / num_minibatches # Print the cost every epoch if print_cost == True", "END CODE HERE ### return Z3 def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500,", "### END CODE HERE ### epoch_cost += minibatch_cost / num_minibatches # Print the", "tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2)", "x2)), reduction_indices=1)) # you can try margin parameters margin = tf.constant(1.) labels =", "tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ### END CODE", "an epoch num_minibatches = int(m / minibatch_size) # number of minibatches of size", "a minibatch print_cost -- True to print the cost every 100 epochs Returns:", "\"optimizer\" and the \"cost\", the feedict should contain a minibatch for (X,Y). ###", "to print the cost every 100 epochs Returns: parameters -- parameters learnt by", "START CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE", "size of a minibatch print_cost -- True to print the cost every 100", "cost related to an epoch num_minibatches = int(m / minibatch_size) # number of", "6, number of training examples = 1080) X_test -- training set, of shape", "Y_train.shape[0] # n_y : output size costs = [] # To keep track", "test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train})) print(\"Test", "= [] # To keep track of the cost # Create Placeholders of", "line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost function:", "num_minibatches # Print the cost every epoch if print_cost == True and epoch", "num_epochs -- number of epochs of the optimization loop minibatch_size -- size of", "then be used to predict. \"\"\" ops.reset_default_graph() # to be able to rerun", "Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR ->", "tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop", "mismatch_loss loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss)", "[6, 12] b3 : [6, 1] Returns: parameters -- a dictionary of tensors", "initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1))", "CODE HERE ### # Forward propagation: Build the forward propagation in the tensorflow", "Cost function: Add cost function to tensorflow graph ### START CODE HERE ###", "been trained!\") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate", "-> RELU -> LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder, of", "the session to execute the \"optimizer\" and the \"cost\", the feedict should contain", "return loss_mean def initialize_parameters(): \"\"\" Initializes parameters to build a neural network with", "b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3)", "### # Cost function: Add cost function to tensorflow graph ### START CODE", "loss(x1_, x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4 # x1 =", "Start the session to compute the tensorflow graph with tf.Session() as sess: #", "W3, \"b3\": b3} return parameters def forward_propagation(X, parameters): \"\"\" Implements the forward propagation", "LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X", "flat vectors img_features_x = tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1, x2, y): # Euclidean", "between x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you can try margin", "(minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on", "Flatten, Dense, Input # from keras.utils.data_utils import get_file # from keras import backend", "-- the output of the last LINEAR unit \"\"\" # Retrieve the parameters", "x2 = np.random.rand(batchsize, dim) # y = np.array([0,1,1,0]) # # l = sess.run(loss_,", "Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer. ### START CODE HERE ###", "the output of the last LINEAR unit \"\"\" # Retrieve the parameters from", "accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X:", "sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost +=", "b2 : [12, 1] W3 : [6, 12] b3 : [6, 1] Returns:", "Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1 = np.dot(W1, X)", "margin parameters margin = tf.constant(1.) labels = tf.to_float(y) match_loss = tf.square(l2diff, 'match_term') mismatch_loss", "\"\"\" Initializes parameters to build a neural network with tensorflow. The shapes are:", "x2, y): # Euclidean distance between x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1))", "examples in the train set) n_y = Y_train.shape[0] # n_y : output size", "every epoch if print_cost == True and epoch % 100 == 0: print(\"Cost", "= 120) Y_test -- test set, of shape (output size = 6, number", "Y: minibatch_Y}) # Initialize all variables init_op = tf.global_variables_initializer() sess.run(init_op) # Run training", "b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3} return parameters def forward_propagation(X,", "### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END", ": [6, 12] b3 : [6, 1] Returns: parameters -- a dictionary of", "minibatch_size in the train set seed = seed + 1 minibatches = random_mini_batches(X_train,", "be able to rerun the model without overwriting tf variables tf.set_random_seed(1) # to", "of examples in the train set) n_y = Y_train.shape[0] # n_y : output", "from keras.models import Model # from keras.layers import Flatten, Dense, Input # from", "of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size", "# Forward propagation: Build the forward propagation in the tensorflow graph ### START", "cost every 100 epochs Returns: parameters -- parameters learnt by the model. They", "cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost", "tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ### END CODE HERE ### parameters = {\"W1\": W1,", "import backend as K K.set_session(sess) # this placeholder will contain our input digits,", "Defines a cost related to an epoch num_minibatches = int(m / minibatch_size) #", "from keras import backend as K from keras.layers import Dense from tensorflow.python.framework import", "b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3", "optimizer. Use an AdamOptimizer. ### START CODE HERE ### (1 line) optimizer =", "Create Placeholders of shape (n_x, n_y) ### START CODE HERE ### (1 line)", "-- training set, of shape (input size = 12288, number of training examples", "\"\"\" # Retrieve the parameters from the dictionary \"parameters\" W1 = parameters['W1'] b1", "initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ### END CODE HERE ### parameters", "epochs of the optimization loop minibatch_size -- size of a minibatch print_cost --", "range(num_epochs): epoch_cost = 0. # Defines a cost related to an epoch num_minibatches", "= sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost", "# Retrieve the parameters from the dictionary \"parameters\" W1 = parameters['W1'] b1 =", "parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\":", "A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1)", "for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch #", "from keras import backend as K K.set_session(sess) # this placeholder will contain our", "K.set_session(sess) # this placeholder will contain our input digits, as flat vectors img_features_x", "b2) # Z2 = np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2) # A2", "W2, b2, W3, b3 \"\"\" tf.set_random_seed(1) # so that your \"random\" numbers match", "Initialize parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters() ###", "match_loss = tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if label", "W3 : [6, 12] b3 : [6, 1] Returns: parameters -- a dictionary", "They can then be used to predict. \"\"\" ops.reset_default_graph() # to be able", "LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size = 12288, number", "IMPORTANT: The line that runs the graph on a minibatch. # Run the", "-- test set, of shape (output size = 6, number of training examples", "learnt by the model. They can then be used to predict. \"\"\" ops.reset_default_graph()", "= 4 # x1 = np.random.rand(batchsize, dim) # x2 = np.random.rand(batchsize, dim) #", "ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables", "sess.run((optimizer, cost), feed_dict = {X: minibatch_X, Y: minibatch_Y}) # Initialize all variables init_op", "# y = np.array([0,1,1,0]) # # l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) #", "K from keras.layers import Dense from tensorflow.python.framework import ops import tensorflow as tf", "sess = tf.Session() from keras import backend as K K.set_session(sess) # this placeholder", "### parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3,", "\"b3\" the shapes are given in initialize_parameters Returns: Z3 -- the output of", "b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2", "LINEAR unit \"\"\" # Retrieve the parameters from the dictionary \"parameters\" W1 =", "parameters margin = tf.constant(1.) labels = tf.to_float(y) match_loss = tf.square(l2diff, 'match_term') mismatch_loss =", "### END CODE HERE ### parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2,", "line) parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation: Build", "of code) W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25, 1],", "set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train})) print(\"Test Accuracy:\",", "# you can try margin parameters margin = tf.constant(1.) labels = tf.to_float(y) match_loss", "a neural network with tensorflow. The shapes are: W1 : [25, 12288] b1", "END CODE HERE ### # Forward propagation: Build the forward propagation in the", "Dense, Input # from keras.utils.data_utils import get_file # from keras import backend as", "tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training set, of shape (input size", "import ops import tensorflow as tf sess = tf.Session() from keras import backend", "= 1080) Y_train -- test set, of shape (output size = 6, number", "_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE", "predict. \"\"\" ops.reset_default_graph() # to be able to rerun the model without overwriting", "an AdamOptimizer. ### START CODE HERE ### (1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ###", "HERE ### return Z3 def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True):", "model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments:", "[12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6,", "a1) + b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3,", "= {X: minibatch_X, Y: minibatch_Y}) # Initialize all variables init_op = tf.global_variables_initializer() sess.run(init_op)", "CODE HERE ### # Initialize all the variables init = tf.global_variables_initializer() # Start", "init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph with", "the shapes are given in initialize_parameters Returns: Z3 -- the output of the", "cost), feed_dict = {X: minibatch_X, Y: minibatch_Y}) # Initialize all variables init_op =", "### END CODE HERE ### # Initialize all the variables init = tf.global_variables_initializer()", "b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ### END CODE HERE ### parameters =", "img_features_x = tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1, x2, y): # Euclidean distance between", "tf.to_float(y) match_loss = tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if", "shape (input size = 12288, number of training examples = 120) Y_test --", "5 == 0: costs.append(epoch_cost) # lets save the parameters in a variable parameters", "print(\"Parameters have been trained!\") # Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))", "size = 6, number of training examples = 1080) X_test -- training set,", "size = 12288, number of training examples = 120) Y_test -- test set,", "= np.random.rand(batchsize, dim) # y = np.array([0,1,1,0]) # # l = sess.run(loss_, feed_dict={x1_:x1,", "b3 = parameters['b3'] ### START CODE HERE ### (approx. 5 lines) # Numpy", "12288, number of training examples = 120) Y_test -- test set, of shape", "# IMPORTANT: The line that runs the graph on a minibatch. # Run", "+= minibatch_cost / num_minibatches # Print the cost every epoch if print_cost ==", "= parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ###", "keras import backend as K from keras.layers import Dense from tensorflow.python.framework import ops", "4096)) def loss(x1, x2, y): # Euclidean distance between x1,x2 l2diff = tf.sqrt(", "(1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost", "Forward propagation: Build the forward propagation in the tensorflow graph ### START CODE", "try margin parameters margin = tf.constant(1.) labels = tf.to_float(y) match_loss = tf.square(l2diff, 'match_term')", "Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the", "print_cost=True): \"\"\" Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train -- training", "W1, b1, W2, b2, W3, b3 \"\"\" tf.set_random_seed(1) # so that your \"random\"", "-- test set, of shape (output size = 6, number of test examples", "input size, m : number of examples in the train set) n_y =", "results seed = 3 # to keep consistent results (n_x, m) = X_train.shape", "b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2)", "(output size = 6, number of test examples = 120) learning_rate -- learning", "code) W1 = tf.get_variable(\"W1\", [25, 12288], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable(\"b1\", [25, 1], initializer=tf.zeros_initializer())", "y = np.array([0,1,1,0]) # # l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer,", "# from keras.models import Model # from keras.layers import Flatten, Dense, Input #", "initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1))", "= tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2", "0: costs.append(epoch_cost) # lets save the parameters in a variable parameters = sess.run(parameters)", "= Y_train.shape[0] # n_y : output size costs = [] # To keep", "loss_mean def initialize_parameters(): \"\"\" Initializes parameters to build a neural network with tensorflow.", "tf.float32)), b1) # Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) #", "function: Add cost function to tensorflow graph ### START CODE HERE ### (1", "= parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START", "used to predict. \"\"\" ops.reset_default_graph() # to be able to rerun the model", "without overwriting tf variables tf.set_random_seed(1) # to keep consistent results seed = 3", "print the cost every 100 epochs Returns: parameters -- parameters learnt by the", "= forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add cost", "your \"random\" numbers match ours ### START CODE HERE ### (approx. 6 lines", "from the dictionary \"parameters\" W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2']", "# To keep track of the cost # Create Placeholders of shape (n_x,", "sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer, cost), feed_dict = {X: minibatch_X, Y: minibatch_Y})", "12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer()) ### END CODE HERE ###", "every 100 epochs Returns: parameters -- parameters learnt by the model. They can", "# Initialize parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters()", "backend as K from keras.layers import Dense from tensorflow.python.framework import ops import tensorflow", "rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep consistent results", "minibatch # IMPORTANT: The line that runs the graph on a minibatch. #", "25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6, 12],", "100 == 0: print(\"Cost after epoch %i: %f\" % (epoch, epoch_cost)) if print_cost", "initializer=tf.zeros_initializer()) W2 = tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer())", "num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX. Arguments: X_train", "Do the training loop for epoch in range(num_epochs): epoch_cost = 0. # Defines", "tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3 ### END CODE HERE", "last LINEAR unit \"\"\" # Retrieve the parameters from the dictionary \"parameters\" W1", "Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1)", "be used to predict. \"\"\" ops.reset_default_graph() # to be able to rerun the", "= minibatch # IMPORTANT: The line that runs the graph on a minibatch.", "variables init_op = tf.global_variables_initializer() sess.run(init_op) # Run training loop with sess.as_default(): optimizer.run(feed_dict={x1_:x1, x2_:x2,", "12288, number of training examples = 1080) Y_train -- test set, of shape", "# Z1 = np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) # A1 =", "minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ###", "as sess: # Run the initialization sess.run(init) # Do the training loop for", "= np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3", "A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) #", "Returns: parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3,", "b3) # Z3 = np.dot(W3,Z2) + b3 ### END CODE HERE ### return", "Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test})) return parameters loss_ = loss(x1_,", "-- parameters learnt by the model. They can then be used to predict.", "y_:y}) # sess.run((optimizer, cost), feed_dict = {X: minibatch_X, Y: minibatch_Y}) # Initialize all", "graph on a minibatch. # Run the session to execute the \"optimizer\" and", "Y_test})) return parameters loss_ = loss(x1_, x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize", "x1,x2 l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you can try margin parameters", "Z3 -- the output of the last LINEAR unit \"\"\" # Retrieve the", "12] b3 : [6, 1] Returns: parameters -- a dictionary of tensors containing", "a minibatch for (X,Y). ### START CODE HERE ### (1 line) _, minibatch_cost", "otherwise mismatch_loss loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add') loss_mean =", "b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE HERE", "the train set) n_y = Y_train.shape[0] # n_y : output size costs =", "(approx. 5 lines) # Numpy Equivalents: Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) #", "costs = [] # To keep track of the cost # Create Placeholders", "= parameters['W3'] b3 = parameters['b3'] ### START CODE HERE ### (approx. 5 lines)", "[12, 25] b2 : [12, 1] W3 : [6, 12] b3 : [6,", "all the variables init = tf.global_variables_initializer() # Start the session to compute the", "LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder, of shape (input size,", "Y: minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost / num_minibatches #", "save the parameters in a variable parameters = sess.run(parameters) print(\"Parameters have been trained!\")", "\"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\" the shapes are given in initialize_parameters Returns:", "START CODE HERE ### (1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE", "b2, W3, b3 \"\"\" tf.set_random_seed(1) # so that your \"random\" numbers match ours", "Z1 = tf.add(tf.matmul(W1, tf.cast(X, tf.float32)), b1) # Z1 = np.dot(W1, X) + b1", "X_train, Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test})) return parameters loss_ =", "tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4 # x1 = np.random.rand(batchsize, dim) # x2 =", "import backend as K from keras.layers import Dense from tensorflow.python.framework import ops import", "a minibatch. # Run the session to execute the \"optimizer\" and the \"cost\",", "= tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train})) print(\"Test Accuracy:\", accuracy.eval({X: X_test,", "### (1 line) X, Y = create_placeholders(n_x, n_y) ### END CODE HERE ###", "X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements a three-layer tensorflow neural network:", "[25, 12288] b1 : [25, 1] W2 : [12, 25] b2 : [12,", "# this placeholder will contain our input digits, as flat vectors img_features_x =", "= create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize parameters ### START", "CODE HERE ### # Initialize parameters ### START CODE HERE ### (1 line)", "A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1), b2) #", "parameters learnt by the model. They can then be used to predict. \"\"\"", "% 100 == 0: print(\"Cost after epoch %i: %f\" % (epoch, epoch_cost)) if", "# Z3 = np.dot(W3,Z2) + b3 ### END CODE HERE ### return Z3", "seed = 3 # to keep consistent results (n_x, m) = X_train.shape #", "CODE HERE ### parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2,", "parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters() ### END", "### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### #", "to compute the tensorflow graph with tf.Session() as sess: # Run the initialization", "digits, as flat vectors img_features_x = tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1, x2, y):", "l2diff = tf.sqrt( tf.reduce_sum(tf.square(tf.sub(x1, x2)), reduction_indices=1)) # you can try margin parameters margin", "CODE HERE ### return Z3 def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32,", "train set) n_y = Y_train.shape[0] # n_y : output size costs = []", "HERE ### (1 line) _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})", "containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\" the shapes are given", "int(m / minibatch_size) # number of minibatches of size minibatch_size in the train", "% 5 == 0: costs.append(epoch_cost) # lets save the parameters in a variable", "LINEAR -> RELU -> LINEAR -> SOFTMAX Arguments: X -- input dataset placeholder,", "number of test examples = 120) learning_rate -- learning rate of the optimization", "optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size", "# Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\",", "of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost", "= np.random.rand(batchsize, dim) # x2 = np.random.rand(batchsize, dim) # y = np.array([0,1,1,0]) #", "W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ###", ": [12, 1] W3 : [6, 12] b3 : [6, 1] Returns: parameters", "size, number of examples) parameters -- python dictionary containing your parameters \"W1\", \"b1\",", "np.dot(W1, X) + b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2 =", "parameters) ### END CODE HERE ### # Cost function: Add cost function to", "CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE", "Returns: Z3 -- the output of the last LINEAR unit \"\"\" # Retrieve", "# Run the initialization sess.run(init) # Do the training loop for epoch in", "in range(num_epochs): epoch_cost = 0. # Defines a cost related to an epoch", "keras.layers import Dense from tensorflow.python.framework import ops import tensorflow as tf sess =", "-> SOFTMAX Arguments: X -- input dataset placeholder, of shape (input size, number", "of a minibatch print_cost -- True to print the cost every 100 epochs", "CODE HERE ### (1 line) X, Y = create_placeholders(n_x, n_y) ### END CODE", "Run the session to execute the \"optimizer\" and the \"cost\", the feedict should", "np.dot(W2, a1) + b2 A2 = tf.nn.relu(Z2) # A2 = relu(Z2) Z3 =", "graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do the", "# Calculate the correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on", "examples) parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\",", "train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)", "correct predictions correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y)) # Calculate accuracy on the test set", "b3 \"\"\" tf.set_random_seed(1) # so that your \"random\" numbers match ours ### START", "return Z3 def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements", "n_y) ### END CODE HERE ### # Initialize parameters ### START CODE HERE", "SOFTMAX Arguments: X -- input dataset placeholder, of shape (input size, number of", "# Start the session to compute the tensorflow graph with tf.Session() as sess:", "to an epoch num_minibatches = int(m / minibatch_size) # number of minibatches of", ": [6, 1] Returns: parameters -- a dictionary of tensors containing W1, b1,", "# # l = sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer, cost), feed_dict =", "-- True to print the cost every 100 epochs Returns: parameters -- parameters", "tf.set_random_seed(1) # so that your \"random\" numbers match ours ### START CODE HERE", "tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do", "this placeholder will contain our input digits, as flat vectors img_features_x = tf.placeholder(tf.float32,", "in the tensorflow graph ### START CODE HERE ### (1 line) Z3 =", "match ours ### START CODE HERE ### (approx. 6 lines of code) W1", "the model. They can then be used to predict. \"\"\" ops.reset_default_graph() # to", "= tf.constant(1.) labels = tf.to_float(y) match_loss = tf.square(l2diff, 'match_term') mismatch_loss = tf.maximum(0., tf.sub(margin,", "# from keras.utils.data_utils import get_file # from keras import backend as K from", ": [25, 12288] b1 : [25, 1] W2 : [12, 25] b2 :", "sess: # Run the initialization sess.run(init) # Do the training loop for epoch", "of the last LINEAR unit \"\"\" # Retrieve the parameters from the dictionary", "feed_dict={X: minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost /", "if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) # lets", "x1 = np.random.rand(batchsize, dim) # x2 = np.random.rand(batchsize, dim) # y = np.array([0,1,1,0])", "# x1 = np.random.rand(batchsize, dim) # x2 = np.random.rand(batchsize, dim) # y =", "to be able to rerun the model without overwriting tf variables tf.set_random_seed(1) #", "= relu(Z2) Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3", "END CODE HERE ### # Initialize parameters ### START CODE HERE ### (1", "by the model. They can then be used to predict. \"\"\" ops.reset_default_graph() #", "python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\" the shapes", "= 6, number of training examples = 1080) X_test -- training set, of", "sess.run(init) # Do the training loop for epoch in range(num_epochs): epoch_cost = 0.", "1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select", "= tf.placeholder(tf.float32, shape=(1, 4096)) def loss(x1, x2, y): # Euclidean distance between x1,x2", "of shape (input size, number of examples) parameters -- python dictionary containing your", "x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4 # x1 = np.random.rand(batchsize,", "START CODE HERE ### (1 line) _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X,", "label is 1, only match_loss will count, otherwise mismatch_loss loss = tf.add(tf.mul(labels, match_loss),", "accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\")) print(\"Train Accuracy:\", accuracy.eval({X: X_train,", "able to rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep", "HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE ### #", "epoch_cost = 0. # Defines a cost related to an epoch num_minibatches =", "= sess.run(loss_, feed_dict={x1_:x1, x2_:x2, y_:y}) # sess.run((optimizer, cost), feed_dict = {X: minibatch_X, Y:", "initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer())", "Z3 = np.dot(W3,Z2) + b3 ### END CODE HERE ### return Z3 def", "learning_rate -- learning rate of the optimization num_epochs -- number of epochs of", "create_placeholders(n_x, n_y) ### END CODE HERE ### # Initialize parameters ### START CODE", "+ b1 A1 = tf.nn.relu(Z1) # A1 = relu(Z1) Z2 = tf.add(tf.matmul(W2, A1),", "\"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3} return parameters def forward_propagation(X, parameters):", "minibatch_X, Y: minibatch_Y}) ### END CODE HERE ### epoch_cost += minibatch_cost / num_minibatches", "### END CODE HERE ### # Forward propagation: Build the forward propagation in", "HERE ### (1 line) X, Y = create_placeholders(n_x, n_y) ### END CODE HERE", "True and epoch % 5 == 0: costs.append(epoch_cost) # lets save the parameters", "placeholder will contain our input digits, as flat vectors img_features_x = tf.placeholder(tf.float32, shape=(1,", "the optimization num_epochs -- number of epochs of the optimization loop minibatch_size --", "parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3'] b3 = parameters['b3'] ### START CODE", "minibatch_X, Y: minibatch_Y}) # Initialize all variables init_op = tf.global_variables_initializer() sess.run(init_op) # Run", "= tf.get_variable(\"W2\", [12, 25], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3 =", "a dictionary of tensors containing W1, b1, W2, b2, W3, b3 \"\"\" tf.set_random_seed(1)", "= initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the forward", "END CODE HERE ### parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\":", "tf.set_random_seed(1) # to keep consistent results seed = 3 # to keep consistent", "\"W3\", \"b3\" the shapes are given in initialize_parameters Returns: Z3 -- the output", "[] # To keep track of the cost # Create Placeholders of shape", "tensorflow.python.framework import ops import tensorflow as tf sess = tf.Session() from keras import", "= tf.get_variable(\"b2\", [12, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable(\"W3\", [6, 12], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b3 =", "START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE", "return parameters loss_ = loss(x1_, x2_, y_) optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize =", "count, otherwise mismatch_loss loss = tf.add(tf.mul(labels, match_loss), tf.mul((1 - labels), mismatch_loss), 'loss_add') loss_mean", "{\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3} return", "= parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 =", "Z3 def model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001, num_epochs=1500, minibatch_size=32, print_cost=True): \"\"\" Implements a", "### (1 line) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) ### END CODE HERE ### # Initialize", "# to keep consistent results seed = 3 # to keep consistent results", "parameters = sess.run(parameters) print(\"Parameters have been trained!\") # Calculate the correct predictions correct_prediction", "the model without overwriting tf variables tf.set_random_seed(1) # to keep consistent results seed", "# if label is 1, only match_loss will count, otherwise mismatch_loss loss =", "import Model # from keras.layers import Flatten, Dense, Input # from keras.utils.data_utils import", "parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] W3 = parameters['W3']", "minibatch. # Run the session to execute the \"optimizer\" and the \"cost\", the", "optimizer = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss_) # batchsize = 4 # x1 = np.random.rand(batchsize, dim) #", "in the train set) n_y = Y_train.shape[0] # n_y : output size costs", "parameters): \"\"\" Implements the forward propagation for the model: LINEAR -> RELU ->", "m) = X_train.shape # (n_x: input size, m : number of examples in", "initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the forward propagation", "mismatch_loss = tf.maximum(0., tf.sub(margin, tf.square(l2diff)), 'mismatch_term') # if label is 1, only match_loss", "Retrieve the parameters from the dictionary \"parameters\" W1 = parameters['W1'] b1 = parameters['b1']", "tensorflow graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y)", "= parameters['b3'] ### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:", "The line that runs the graph on a minibatch. # Run the session", "[25, 1] W2 : [12, 25] b2 : [12, 1] W3 : [6,", "1] W2 : [12, 25] b2 : [12, 1] W3 : [6, 12]", "X_train -- training set, of shape (input size = 12288, number of training", "END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.", "- labels), mismatch_loss), 'loss_add') loss_mean = tf.reduce_mean(loss) return loss_mean def initialize_parameters(): \"\"\" Initializes" ]
[ "get_local_dist(package_name): working_set = dict( (dist.project_name, dist) for dist in pkg_resources.WorkingSet() ) return working_set[package_name]", "'dist-info' metadata_file = 'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO') metadata_file =", "isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file = 'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext =", "get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh: metadata = parse_metadata(fh.read()) return metadata def get_funding_data(metadata): return", "def get_local_dist_metadata_filepath(dist): # Dist filename syntax # name [\"-\" version [\"-py\" pyver [\"-\"", "(\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\", re.VERBOSE) def", "chain(x.items(), y.items())), metadata, {}, ) return metadata def get_local_metadata(package_name): try: dist = get_local_dist(package_name)", "'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file = 'METADATA' elif isinstance(dist, pkg_resources.Distribution):", "k, v in metadata.items() if v) def parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line)) for", "ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1] # Stop taking filename components at", "Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\", re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata):", "'EGG-INFO') metadata_file = 'PKG-INFO' else: ext = None metadata_file = None filename =", "return metadata def get_local_metadata(package_name): try: dist = get_local_dist(package_name) metadata = get_dist_metadata(dist) except FileNotFoundError:", "re class MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set = dict( (dist.project_name, dist) for dist", "component[1] # Stop taking filename components at the first missing/invalid component filename_component =", "if isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file = 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext", "KeyError: # Package not available locally, # or there isn't a 'Funding' entry", "filename = ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file = 'PKG-INFO' elif", "raise MetaDataNotFound() return metadata def get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name) funding_url = get_funding_data(metadata)", "get_local_dist(package_name) metadata = get_dist_metadata(dist) except FileNotFoundError: # No metadata.json file locally raise MetaDataNotFound()", "ext = os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO' else: ext = None metadata_file =", "in metadata if m] metadata = reduce( lambda x, y: dict((k, v) for", "# Dist filename syntax # name [\"-\" version [\"-py\" pyver [\"-\" required_platform]]] \".\"", "m in metadata if m] metadata = reduce( lambda x, y: dict((k, v)", "return working_set[package_name] def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh: metadata =", "ext = 'egg-info' metadata_file = 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file", "in pkg_resources.WorkingSet() ) return working_set[package_name] def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path) as", "try: dist = get_local_dist(package_name) metadata = get_dist_metadata(dist) except FileNotFoundError: # No metadata.json file", "= re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\",", "Package not available locally, # or there isn't a 'Funding' entry in the", "for k, v in chain(x.items(), y.items())), metadata, {}, ) return metadata def get_local_metadata(package_name):", "filename, metadata_file) if ext: return path else: return None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)?", "return metadata def get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name) funding_url = get_funding_data(metadata) except KeyError:", "'egg-info' metadata_file = 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file = 'METADATA'", "metadata_file = 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file = 'METADATA' elif", "dist = get_local_dist(package_name) metadata = get_dist_metadata(dist) except FileNotFoundError: # No metadata.json file locally", "y: dict((k, v) for k, v in chain(x.items(), y.items())), metadata, {}, ) return", "valid_component(component): return component[1] # Stop taking filename components at the first missing/invalid component", "def get_local_metadata(package_name): try: dist = get_local_dist(package_name) metadata = get_dist_metadata(dist) except FileNotFoundError: # No", "( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform), )) filename = ''.join(chain(*filename_component))", "= ( filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines() ) metadata = [m for m", ") metadata = [m for m in metadata if m] metadata = reduce(", "from functools import reduce from itertools import chain, takewhile import os import pkg_resources", "working_set[package_name] def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh: metadata = parse_metadata(fh.read())", "ext) path = os.path.join(dist.location, filename, metadata_file) if ext: return path else: return None", "<filename>thanks/package_tools.py from functools import reduce from itertools import chain, takewhile import os import", "\".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1] # Stop taking filename components", "= 'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO' else:", "= 'PKG-INFO' else: ext = None metadata_file = None filename = '{}.{}'.format(filename, ext)", "('-', dist.platform), )) filename = ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file", "filename = '{}.{}'.format(filename, ext) path = os.path.join(dist.location, filename, metadata_file) if ext: return path", "x, y: dict((k, v) for k, v in chain(x.items(), y.items())), metadata, {}, )", "parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines() ) metadata = [m", "re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\", re.VERBOSE)", "[\"-\" required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1] # Stop taking", "= ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file = 'PKG-INFO' elif isinstance(dist,", "filename_component = takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform), ))", "= 'dist-info' metadata_file = 'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO') metadata_file", "except KeyError: # Package not available locally, # or there isn't a 'Funding'", "= get_local_dist(package_name) metadata = get_dist_metadata(dist) except FileNotFoundError: # No metadata.json file locally raise", "dist) for dist in pkg_resources.WorkingSet() ) return working_set[package_name] def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist)", "k, v in chain(x.items(), y.items())), metadata, {}, ) return metadata def get_local_metadata(package_name): try:", "locally raise MetaDataNotFound() return metadata def get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name) funding_url =", "get_local_metadata(package_name): try: dist = get_local_dist(package_name) metadata = get_dist_metadata(dist) except FileNotFoundError: # No metadata.json", "return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k, v) for k, v in metadata.items() if", "metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k, v) for k, v in metadata.items() if v)", "filter_empty_metadata(metadata): return dict((k, v) for k, v in metadata.items() if v) def parse_metadata(metadata):", "get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k, v) for k, v in metadata.items()", "def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh: metadata = parse_metadata(fh.read()) return", "if v) def parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines() )", "takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform), )) filename =", "get_local_dist_metadata_filepath(dist): # Dist filename syntax # name [\"-\" version [\"-py\" pyver [\"-\" required_platform]]]", "get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist filename syntax # name [\"-\" version", "locally, # or there isn't a 'Funding' entry in the project_urls raise MetaDataNotFound()", "v) for k, v in metadata.items() if v) def parse_metadata(metadata): metadata = (", "path = os.path.join(dist.location, filename, metadata_file) if ext: return path else: return None metadata_patterns", "# Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\", re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def", "ext = 'dist-info' metadata_file = 'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO')", "dist.py_version), ('-', dist.platform), )) filename = ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info'", "# Stop taking filename components at the first missing/invalid component filename_component = takewhile(valid_component,", ") return working_set[package_name] def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh: metadata", "pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file = 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info'", "# https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1] # Stop taking filename components at the", "metadata.json file locally raise MetaDataNotFound() return metadata def get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name)", "None filename = '{}.{}'.format(filename, ext) path = os.path.join(dist.location, filename, metadata_file) if ext: return", "URL \"\"\", re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k, v) for", "class MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set = dict( (dist.project_name, dist) for dist in", "v in chain(x.items(), y.items())), metadata, {}, ) return metadata def get_local_metadata(package_name): try: dist", "syntax # name [\"-\" version [\"-py\" pyver [\"-\" required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata", "\"\"\", re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k, v) for k,", "for k, v in metadata.items() if v) def parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line))", "get_funding_data(metadata) except KeyError: # Package not available locally, # or there isn't a", "or there isn't a 'Funding' entry in the project_urls raise MetaDataNotFound() return funding_url", "except FileNotFoundError: # No metadata.json file locally raise MetaDataNotFound() return metadata def get_local_funding_metadata(package_name):", "fh: metadata = parse_metadata(fh.read()) return metadata def get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): #", "else: ext = None metadata_file = None filename = '{}.{}'.format(filename, ext) path =", "def parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines() ) metadata =", "Funding URL \"\"\", re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k, v)", "# Package not available locally, # or there isn't a 'Funding' entry in", "available locally, # or there isn't a 'Funding' entry in the project_urls raise", "y.items())), metadata, {}, ) return metadata def get_local_metadata(package_name): try: dist = get_local_dist(package_name) metadata", "elif isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO' else: ext =", "= os.path.join(dist.location, filename, metadata_file) if ext: return path else: return None metadata_patterns =", "metadata = ( filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines() ) metadata = [m for", "= reduce( lambda x, y: dict((k, v) for k, v in chain(x.items(), y.items())),", "def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k, v) for k, v in", "for m in metadata if m] metadata = reduce( lambda x, y: dict((k,", "filename syntax # name [\"-\" version [\"-py\" pyver [\"-\" required_platform]]] \".\" ext #", "('-py', dist.py_version), ('-', dist.platform), )) filename = ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext =", "path else: return None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer", "None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding", ") return metadata def get_local_metadata(package_name): try: dist = get_local_dist(package_name) metadata = get_dist_metadata(dist) except", "= None filename = '{}.{}'.format(filename, ext) path = os.path.join(dist.location, filename, metadata_file) if ext:", "{}, ) return metadata def get_local_metadata(package_name): try: dist = get_local_dist(package_name) metadata = get_dist_metadata(dist)", "pkg_resources.WorkingSet() ) return working_set[package_name] def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh:", "'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO' else: ext", "return dict((k, v) for k, v in metadata.items() if v) def parse_metadata(metadata): metadata", "required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1] # Stop taking filename", "components at the first missing/invalid component filename_component = takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-',", "import reduce from itertools import chain, takewhile import os import pkg_resources import re", "elif isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file = 'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext", "metadata_file = 'PKG-INFO' else: ext = None metadata_file = None filename = '{}.{}'.format(filename,", "return metadata def get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist filename syntax #", "[\"-py\" pyver [\"-\" required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1] #", "Dist filename syntax # name [\"-\" version [\"-py\" pyver [\"-\" required_platform]]] \".\" ext", "pkg_resources import re class MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set = dict( (dist.project_name, dist)", "in chain(x.items(), y.items())), metadata, {}, ) return metadata def get_local_metadata(package_name): try: dist =", "v) def parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines() ) metadata", "= get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh: metadata = parse_metadata(fh.read()) return metadata def get_funding_data(metadata):", "= 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file = 'METADATA' elif isinstance(dist,", "metadata.items() if v) def parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines()", "itertools import chain, takewhile import os import pkg_resources import re class MetaDataNotFound(Exception): pass", "(\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\", re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict()", "def valid_component(component): return component[1] # Stop taking filename components at the first missing/invalid", "file locally raise MetaDataNotFound() return metadata def get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name) funding_url", "for dist in pkg_resources.WorkingSet() ) return working_set[package_name] def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with", "('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform), )) filename = ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution):", "component filename_component = takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform),", "taking filename components at the first missing/invalid component filename_component = takewhile(valid_component, ( ('',", "for line in metadata.splitlines() ) metadata = [m for m in metadata if", "metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh: metadata = parse_metadata(fh.read()) return metadata def", "= dict( (dist.project_name, dist) for dist in pkg_resources.WorkingSet() ) return working_set[package_name] def get_dist_metadata(dist):", "return path else: return None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? #", "metadata = [m for m in metadata if m] metadata = reduce( lambda", "with open(metadata_path) as fh: metadata = parse_metadata(fh.read()) return metadata def get_funding_data(metadata): return metadata.get('funding_url')", "# Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\", re.VERBOSE) def get_line_metadata(line):", "get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name) funding_url = get_funding_data(metadata) except KeyError: # Package not", "v) for k, v in chain(x.items(), y.items())), metadata, {}, ) return metadata def", "return None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? #", "# No metadata.json file locally raise MetaDataNotFound() return metadata def get_local_funding_metadata(package_name): try: metadata", "dist.platform), )) filename = ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file =", "pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file = 'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg',", "pass def get_local_dist(package_name): working_set = dict( (dist.project_name, dist) for dist in pkg_resources.WorkingSet() )", "= parse_metadata(fh.read()) return metadata def get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist filename", "lambda x, y: dict((k, v) for k, v in chain(x.items(), y.items())), metadata, {},", "metadata = parse_metadata(fh.read()) return metadata def get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist", "parse_metadata(fh.read()) return metadata def get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist filename syntax", "v in metadata.items() if v) def parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line)) for line", "metadata def get_local_metadata(package_name): try: dist = get_local_dist(package_name) metadata = get_dist_metadata(dist) except FileNotFoundError: #", "metadata = get_dist_metadata(dist) except FileNotFoundError: # No metadata.json file locally raise MetaDataNotFound() return", "= takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform), )) filename", "metadata.splitlines() ) metadata = [m for m in metadata if m] metadata =", "takewhile import os import pkg_resources import re class MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set", "not available locally, # or there isn't a 'Funding' entry in the project_urls", "m] metadata = reduce( lambda x, y: dict((k, v) for k, v in", "name [\"-\" version [\"-py\" pyver [\"-\" required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component):", "if ext: return path else: return None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author", "get_dist_metadata(dist) except FileNotFoundError: # No metadata.json file locally raise MetaDataNotFound() return metadata def", "= None metadata_file = None filename = '{}.{}'.format(filename, ext) path = os.path.join(dist.location, filename,", "= '{}.{}'.format(filename, ext) path = os.path.join(dist.location, filename, metadata_file) if ext: return path else:", "pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO' else: ext = None metadata_file", "Stop taking filename components at the first missing/invalid component filename_component = takewhile(valid_component, (", "dist in pkg_resources.WorkingSet() ) return working_set[package_name] def get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path)", "os import pkg_resources import re class MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set = dict(", "= [m for m in metadata if m] metadata = reduce( lambda x,", "(\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\", re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return", "ext = None metadata_file = None filename = '{}.{}'.format(filename, ext) path = os.path.join(dist.location,", "in metadata.items() if v) def parse_metadata(metadata): metadata = ( filter_empty_metadata(get_line_metadata(line)) for line in", "pyver [\"-\" required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1] # Stop", "'{}.{}'.format(filename, ext) path = os.path.join(dist.location, filename, metadata_file) if ext: return path else: return", "return component[1] # Stop taking filename components at the first missing/invalid component filename_component", "in metadata.splitlines() ) metadata = [m for m in metadata if m] metadata", "working_set = dict( (dist.project_name, dist) for dist in pkg_resources.WorkingSet() ) return working_set[package_name] def", "os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO' else: ext = None metadata_file = None filename", "line in metadata.splitlines() ) metadata = [m for m in metadata if m]", "metadata def get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist filename syntax # name", "the first missing/invalid component filename_component = takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py',", "re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k, v) for k, v", "metadata = reduce( lambda x, y: dict((k, v) for k, v in chain(x.items(),", "isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file = 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext =", "= get_dist_metadata(dist) except FileNotFoundError: # No metadata.json file locally raise MetaDataNotFound() return metadata", "missing/invalid component filename_component = takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-',", "try: metadata = get_local_metadata(package_name) funding_url = get_funding_data(metadata) except KeyError: # Package not available", "chain, takewhile import os import pkg_resources import re class MetaDataNotFound(Exception): pass def get_local_dist(package_name):", "if m] metadata = reduce( lambda x, y: dict((k, v) for k, v", "pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform), )) filename = ''.join(chain(*filename_component)) if isinstance(dist,", "MetaDataNotFound() return metadata def get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name) funding_url = get_funding_data(metadata) except", "reduce( lambda x, y: dict((k, v) for k, v in chain(x.items(), y.items())), metadata,", "def get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name) funding_url = get_funding_data(metadata) except KeyError: # Package", "# or there isn't a 'Funding' entry in the project_urls raise MetaDataNotFound() return", "metadata if m] metadata = reduce( lambda x, y: dict((k, v) for k,", "metadata_file = 'METADATA' elif isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO'", "metadata_file) if ext: return path else: return None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? #", "def get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist filename syntax # name [\"-\"", "dict((k, v) for k, v in chain(x.items(), y.items())), metadata, {}, ) return metadata", "metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist filename syntax # name [\"-\" version [\"-py\" pyver", "[m for m in metadata if m] metadata = reduce( lambda x, y:", "import os import pkg_resources import re class MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set =", "MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set = dict( (dist.project_name, dist) for dist in pkg_resources.WorkingSet()", "metadata def get_local_funding_metadata(package_name): try: metadata = get_local_metadata(package_name) funding_url = get_funding_data(metadata) except KeyError: #", "= 'egg-info' metadata_file = 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution): ext = 'dist-info' metadata_file =", "os.path.join(dist.location, filename, metadata_file) if ext: return path else: return None metadata_patterns = re.compile(r\"\"\"", "[\"-\" version [\"-py\" pyver [\"-\" required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return", "( filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines() ) metadata = [m for m in", "functools import reduce from itertools import chain, takewhile import os import pkg_resources import", "filter_empty_metadata(get_line_metadata(line)) for line in metadata.splitlines() ) metadata = [m for m in metadata", ")) filename = ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file = 'PKG-INFO'", "import re class MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set = dict( (dist.project_name, dist) for", "import chain, takewhile import os import pkg_resources import re class MetaDataNotFound(Exception): pass def", "version [\"-py\" pyver [\"-\" required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1]", "metadata, {}, ) return metadata def get_local_metadata(package_name): try: dist = get_local_dist(package_name) metadata =", "get_dist_metadata(dist): metadata_path = get_local_dist_metadata_filepath(dist) with open(metadata_path) as fh: metadata = parse_metadata(fh.read()) return metadata", "def get_local_dist(package_name): working_set = dict( (dist.project_name, dist) for dist in pkg_resources.WorkingSet() ) return", "dict( (dist.project_name, dist) for dist in pkg_resources.WorkingSet() ) return working_set[package_name] def get_dist_metadata(dist): metadata_path", "# Funding URL \"\"\", re.VERBOSE) def get_line_metadata(line): return metadata_patterns.search(line).groupdict() def filter_empty_metadata(metadata): return dict((k,", "FileNotFoundError: # No metadata.json file locally raise MetaDataNotFound() return metadata def get_local_funding_metadata(package_name): try:", "def filter_empty_metadata(metadata): return dict((k, v) for k, v in metadata.items() if v) def", "= get_local_metadata(package_name) funding_url = get_funding_data(metadata) except KeyError: # Package not available locally, #", "from itertools import chain, takewhile import os import pkg_resources import re class MetaDataNotFound(Exception):", "filename components at the first missing/invalid component filename_component = takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))),", "No metadata.json file locally raise MetaDataNotFound() return metadata def get_local_funding_metadata(package_name): try: metadata =", "= get_funding_data(metadata) except KeyError: # Package not available locally, # or there isn't", "as fh: metadata = parse_metadata(fh.read()) return metadata def get_funding_data(metadata): return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist):", "return metadata.get('funding_url') def get_local_dist_metadata_filepath(dist): # Dist filename syntax # name [\"-\" version [\"-py\"", "# name [\"-\" version [\"-py\" pyver [\"-\" required_platform]]] \".\" ext # https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def", "at the first missing/invalid component filename_component = takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))),", "first missing/invalid component filename_component = takewhile(valid_component, ( ('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version),", "= os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO' else: ext = None metadata_file = None", "'PKG-INFO' else: ext = None metadata_file = None filename = '{}.{}'.format(filename, ext) path", "metadata_file = None filename = '{}.{}'.format(filename, ext) path = os.path.join(dist.location, filename, metadata_file) if", "ext: return path else: return None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)?", "get_local_metadata(package_name) funding_url = get_funding_data(metadata) except KeyError: # Package not available locally, # or", "open(metadata_path) as fh: metadata = parse_metadata(fh.read()) return metadata def get_funding_data(metadata): return metadata.get('funding_url') def", "isinstance(dist, pkg_resources.Distribution): ext = os.path.join('egg', 'EGG-INFO') metadata_file = 'PKG-INFO' else: ext = None", "dict((k, v) for k, v in metadata.items() if v) def parse_metadata(metadata): metadata =", "metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL", "metadata = get_local_metadata(package_name) funding_url = get_funding_data(metadata) except KeyError: # Package not available locally,", "pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform), )) filename = ''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext", "funding_url = get_funding_data(metadata) except KeyError: # Package not available locally, # or there", "reduce from itertools import chain, takewhile import os import pkg_resources import re class", "https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata def valid_component(component): return component[1] # Stop taking filename components at the first", "('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))), ('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))), ('-py', dist.py_version), ('-', dist.platform), )) filename = ''.join(chain(*filename_component)) if", "import pkg_resources import re class MetaDataNotFound(Exception): pass def get_local_dist(package_name): working_set = dict( (dist.project_name,", "(dist.project_name, dist) for dist in pkg_resources.WorkingSet() ) return working_set[package_name] def get_dist_metadata(dist): metadata_path =", "''.join(chain(*filename_component)) if isinstance(dist, pkg_resources.EggInfoDistribution): ext = 'egg-info' metadata_file = 'PKG-INFO' elif isinstance(dist, pkg_resources.DistInfoDistribution):", "else: return None metadata_patterns = re.compile(r\"\"\" (\\s*Author:\\s+(?P<author>.*)\\s*)? # Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)?", "None metadata_file = None filename = '{}.{}'.format(filename, ext) path = os.path.join(dist.location, filename, metadata_file)", "Author (\\s*Maintainer:\\s+(?P<maintainer>.+)\\s*)? # Maintainer (\\s*Project-URL:\\sFunding,\\s+(?P<funding_url>.+)\\s*)? # Funding URL \"\"\", re.VERBOSE) def get_line_metadata(line): return" ]
[ "def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s api client Args: API (client.<type>) -", "equivalent to Annotations in Kubernetes API\") class Meta: abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase):", "deployed, deleted \"\"\" id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto field.\") title =", "models.DateTimeField(null=True, blank=True, help_text=\"Time when object is removed from cluster.\") class Meta: abstract =", "cluster.\") class Meta: abstract = True def slugify_function(self): \"\"\" :description: Overrides default slugify", "id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto field.\") title = models.CharField(max_length=128) cluster =", "port = models.IntegerField(default=80, help_text=\"Port object will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace", "is applied to cluster.\") removed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is removed", "Overrides default slugify with custom logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property", "default=\"v1\", help_text=\"API version used to deploy child object.\") kind = models.CharField(max_length=16, help_text=\"String representation", "object.\") kind = models.CharField(max_length=16, help_text=\"String representation of Kubernetes object kind\") port = models.IntegerField(default=80,", "uuid import uuid4 from django.contrib.postgres.fields import JSONField from django.db import models from kubernetes", "- Kubernetes Client Type Returns: object of type <API> \"\"\" if \"persist_config\" not", ":inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config, deployed, deleted \"\"\" id = models.UUIDField(default=uuid4, editable=False,", "expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the object is associated with.\") kuid", "KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model (abstract) :description: Base parent model that all subsequent", "config, deployed, deleted \"\"\" id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto field.\") title", "(abstract) :description: Extends KubernetesBase to include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations", "= False with NamedTemporaryFile() as ntf: kwargs[\"config_file\"] = ntf.name cc = json.dumps(self.cluster.config) if", "def slugify_function(self): \"\"\" :description: Overrides default slugify with custom logic. \"\"\" return self.title.replace(\"_\",", ":description: Overrides default slugify with custom logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower()", "blank=True, help_text=\"Dictionary store equivalent to Annotations in Kubernetes API\") class Meta: abstract =", "Extends KubernetesMetadataObjBase to include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\" api_version", "import json from tempfile import NamedTemporaryFile from uuid import uuid4 from django.contrib.postgres.fields import", "of type <API> \"\"\" if \"persist_config\" not in kwargs: kwargs[\"persist_config\"] = False with", "help_text=\"Pass in extra parameters here.\") deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is", "models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID in the cluster\") class Meta: abstract = True", "open(ntf.name, \"w\") as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model", "version used to deploy child object.\") kind = models.CharField(max_length=16, help_text=\"String representation of Kubernetes", "= JSONField(default=dict, help_text=\"Dictionary store equivalent to Labels in Kubernetes API\") annotations = JSONField(default=dict,", "from django.db import models from kubernetes import client, config class KubernetesBase(models.Model): \"\"\" KubernetesBase", "api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API version used to deploy child object.\") kind =", "to include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\" labels = JSONField(default=dict,", "Labels in Kubernetes API\") annotations = JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store equivalent to", "labels, annotations \"\"\" api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API version used to deploy child", "= models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster object.\") config = JSONField(default=dict, null=True, blank=True,", "logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property def slug(self): return self.slugify_function() def", "False with NamedTemporaryFile() as ntf: kwargs[\"config_file\"] = ntf.name cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config,", "= models.IntegerField(default=80, help_text=\"Port object will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the", "object of type <API> \"\"\" if \"persist_config\" not in kwargs: kwargs[\"persist_config\"] = False", "self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s api client Args: API (client.<type>)", "KubernetesBase :type: model (abstract) :description: Base parent model that all subsequent models inherit", "will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the object is associated with.\")", "include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\" labels = JSONField(default=dict, help_text=\"Dictionary", "help_text=\"Time when object is removed from cluster.\") class Meta: abstract = True def", "primary_key=True, help_text=\"UUID Auto field.\") title = models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey", "json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config with open(ntf.name, \"w\") as f: f.write(cc) return", "blank=True, help_text=\"Pass in extra parameters here.\") deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object", "Annotations in Kubernetes API\") class Meta: abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase", "(abstract) :description: Extends KubernetesMetadataObjBase to include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations", ":inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\" api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API version used", "f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model (abstract) :description: Extends", "\"\"\" api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API version used to deploy child object.\") kind", "the object is associated with.\") kuid = models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID in", "client Args: API (client.<type>) - Kubernetes Client Type Returns: object of type <API>", "django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config, deployed, deleted \"\"\" id = models.UUIDField(default=uuid4, editable=False, primary_key=True,", "kwargs: kwargs[\"persist_config\"] = False with NamedTemporaryFile() as ntf: kwargs[\"config_file\"] = ntf.name cc =", "KubernetesBase to include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\" labels =", "\"\"\" labels = JSONField(default=dict, help_text=\"Dictionary store equivalent to Labels in Kubernetes API\") annotations", "title = models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster object.\") config", "abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model (abstract) :description: Extends KubernetesMetadataObjBase", "labels, annotations \"\"\" labels = JSONField(default=dict, help_text=\"Dictionary store equivalent to Labels in Kubernetes", "django.db import models from kubernetes import client, config class KubernetesBase(models.Model): \"\"\" KubernetesBase :type:", "help_text=\"Live namespace the object is associated with.\") kuid = models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's", "= models.DateTimeField(null=True, blank=True, help_text=\"Time when object is applied to cluster.\") removed = models.DateTimeField(null=True,", "kuid = models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID in the cluster\") class Meta: abstract", "from uuid import uuid4 from django.contrib.postgres.fields import JSONField from django.db import models from", "to TargetCluster object.\") config = JSONField(default=dict, null=True, blank=True, help_text=\"Pass in extra parameters here.\")", "models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster object.\") config = JSONField(default=dict, null=True, blank=True, help_text=\"Pass", "is removed from cluster.\") class Meta: abstract = True def slugify_function(self): \"\"\" :description:", "tempfile import NamedTemporaryFile from uuid import uuid4 from django.contrib.postgres.fields import JSONField from django.db", "\"-\").lower() @property def slug(self): return self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s", "in kwargs: kwargs[\"persist_config\"] = False with NamedTemporaryFile() as ntf: kwargs[\"config_file\"] = ntf.name cc", "if isinstance(self.cluster.config, dict) else self.cluster.config with open(ntf.name, \"w\") as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name))", "representation of Kubernetes object kind\") port = models.IntegerField(default=80, help_text=\"Port object will expose\") namespace", "parent model that all subsequent models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster,", "all subsequent models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config, deployed, deleted", "**kwargs): \"\"\"Gets a k8s api client Args: API (client.<type>) - Kubernetes Client Type", "= models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID in the cluster\") class Meta: abstract =", "when object is removed from cluster.\") class Meta: abstract = True def slugify_function(self):", "default slugify with custom logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property def", "models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config, deployed, deleted \"\"\" id", "models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster object.\") config = JSONField(default=dict,", "kwargs[\"config_file\"] = ntf.name cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config with open(ntf.name,", "(abstract) :description: Base parent model that all subsequent models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel", "= models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the object is associated with.\") kuid = models.CharField(max_length=48,", "in Kubernetes API\") annotations = JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store equivalent to Annotations", "models from kubernetes import client, config class KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model (abstract)", "annotations \"\"\" api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API version used to deploy child object.\")", "object.\") config = JSONField(default=dict, null=True, blank=True, help_text=\"Pass in extra parameters here.\") deployed =", "slugify with custom logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property def slug(self):", "annotations \"\"\" labels = JSONField(default=dict, help_text=\"Dictionary store equivalent to Labels in Kubernetes API\")", "removed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is removed from cluster.\") class Meta:", "model (abstract) :description: Base parent model that all subsequent models inherit from. :inherits:", "Meta: abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model (abstract) :description: Extends", "help_text=\"String representation of Kubernetes object kind\") port = models.IntegerField(default=80, help_text=\"Port object will expose\")", ":fields: id, cluster, config, deployed, deleted \"\"\" id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID", "\"\"\"Gets a k8s api client Args: API (client.<type>) - Kubernetes Client Type Returns:", "= json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config with open(ntf.name, \"w\") as f: f.write(cc)", ":description: Extends KubernetesBase to include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\"", "\"\"\" return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property def slug(self): return self.slugify_function() def get_client(self,", "in Kubernetes API\") class Meta: abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type:", "annotations = JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store equivalent to Annotations in Kubernetes API\")", "from django.contrib.postgres.fields import JSONField from django.db import models from kubernetes import client, config", "\"\"\" id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto field.\") title = models.CharField(max_length=128) cluster", "null=True, blank=True, help_text=\"Dictionary store equivalent to Annotations in Kubernetes API\") class Meta: abstract", "class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model (abstract) :description: Extends KubernetesBase to include metadata", "\", \"-\").lower() @property def slug(self): return self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a", ":fields: labels, annotations \"\"\" labels = JSONField(default=dict, help_text=\"Dictionary store equivalent to Labels in", ":type: model (abstract) :description: Extends KubernetesMetadataObjBase to include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields:", "to include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\" api_version = models.CharField(max_length=16,", "self.cluster.config with open(ntf.name, \"w\") as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase", "id, cluster, config, deployed, deleted \"\"\" id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto", "<API> \"\"\" if \"persist_config\" not in kwargs: kwargs[\"persist_config\"] = False with NamedTemporaryFile() as", "that all subsequent models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config, deployed,", "Type Returns: object of type <API> \"\"\" if \"persist_config\" not in kwargs: kwargs[\"persist_config\"]", "object kind\") port = models.IntegerField(default=80, help_text=\"Port object will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE,", "namespace the object is associated with.\") kuid = models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID", "is associated with.\") kuid = models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID in the cluster\")", "class Meta: abstract = True def slugify_function(self): \"\"\" :description: Overrides default slugify with", "null=True, help_text=\"ForeignKey to TargetCluster object.\") config = JSONField(default=dict, null=True, blank=True, help_text=\"Pass in extra", "import client, config class KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model (abstract) :description: Base parent", "from cluster.\") class Meta: abstract = True def slugify_function(self): \"\"\" :description: Overrides default", "\"\"\" :description: Overrides default slugify with custom logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\" \",", "inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config, deployed, deleted \"\"\" id =", "= models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto field.\") title = models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\",", "api client Args: API (client.<type>) - Kubernetes Client Type Returns: object of type", "KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model (abstract) :description: Extends KubernetesMetadataObjBase to include network fields.", "get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s api client Args: API (client.<type>) - Kubernetes", "models.CharField(max_length=16, help_text=\"String representation of Kubernetes object kind\") port = models.IntegerField(default=80, help_text=\"Port object will", "kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\" labels = JSONField(default=dict, help_text=\"Dictionary store equivalent to Labels", ":description: Base parent model that all subsequent models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields:", "else self.cluster.config with open(ntf.name, \"w\") as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\"", "= models.CharField(max_length=16, help_text=\"String representation of Kubernetes object kind\") port = models.IntegerField(default=80, help_text=\"Port object", "model that all subsequent models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config,", "API\") class Meta: abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model (abstract)", "to cluster.\") removed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is removed from cluster.\")", "as ntf: kwargs[\"config_file\"] = ntf.name cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config", "blank=True, help_text=\"Time when object is removed from cluster.\") class Meta: abstract = True", "network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\" api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API", "dict) else self.cluster.config with open(ntf.name, \"w\") as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase):", "with custom logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property def slug(self): return", "config = JSONField(default=dict, null=True, blank=True, help_text=\"Pass in extra parameters here.\") deployed = models.DateTimeField(null=True,", "child object.\") kind = models.CharField(max_length=16, help_text=\"String representation of Kubernetes object kind\") port =", "namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the object is associated with.\") kuid =", "k8s api client Args: API (client.<type>) - Kubernetes Client Type Returns: object of", "when object is applied to cluster.\") removed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object", "\"persist_config\" not in kwargs: kwargs[\"persist_config\"] = False with NamedTemporaryFile() as ntf: kwargs[\"config_file\"] =", "models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto field.\") title = models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL,", "deploy child object.\") kind = models.CharField(max_length=16, help_text=\"String representation of Kubernetes object kind\") port", "import NamedTemporaryFile from uuid import uuid4 from django.contrib.postgres.fields import JSONField from django.db import", "class Meta: abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model (abstract) :description:", "Meta: abstract = True def slugify_function(self): \"\"\" :description: Overrides default slugify with custom", "Kubernetes API\") annotations = JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store equivalent to Annotations in", "= True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model (abstract) :description: Extends KubernetesMetadataObjBase to", "Args: API (client.<type>) - Kubernetes Client Type Returns: object of type <API> \"\"\"", "cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster object.\") config = JSONField(default=dict, null=True,", "to deploy child object.\") kind = models.CharField(max_length=16, help_text=\"String representation of Kubernetes object kind\")", "API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model (abstract) :description: Extends KubernetesBase to include", "models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the object is associated with.\") kuid = models.CharField(max_length=48, null=True,", "Kubernetes Client Type Returns: object of type <API> \"\"\" if \"persist_config\" not in", "f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model (abstract) :description: Extends KubernetesBase", "help_text=\"Time when object is applied to cluster.\") removed = models.DateTimeField(null=True, blank=True, help_text=\"Time when", "django.contrib.postgres.fields import JSONField from django.db import models from kubernetes import client, config class", "cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config with open(ntf.name, \"w\") as f:", "removed from cluster.\") class Meta: abstract = True def slugify_function(self): \"\"\" :description: Overrides", "with NamedTemporaryFile() as ntf: kwargs[\"config_file\"] = ntf.name cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict)", "\"\"\" KubernetesNetworkingBase :type: model (abstract) :description: Extends KubernetesMetadataObjBase to include network fields. :inherits:", "KubernetesNetworkingBase :type: model (abstract) :description: Extends KubernetesMetadataObjBase to include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase", "True def slugify_function(self): \"\"\" :description: Overrides default slugify with custom logic. \"\"\" return", "kind\") port = models.IntegerField(default=80, help_text=\"Port object will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live", "ntf: kwargs[\"config_file\"] = ntf.name cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config with", "extra parameters here.\") deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is applied to", "cluster.\") removed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is removed from cluster.\") class", "associated with.\") kuid = models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID in the cluster\") class", "return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property def slug(self): return self.slugify_function() def get_client(self, API=client.CoreV1Api,", "json from tempfile import NamedTemporaryFile from uuid import uuid4 from django.contrib.postgres.fields import JSONField", "NamedTemporaryFile() as ntf: kwargs[\"config_file\"] = ntf.name cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else", "from tempfile import NamedTemporaryFile from uuid import uuid4 from django.contrib.postgres.fields import JSONField from", "JSONField(default=dict, null=True, blank=True, help_text=\"Pass in extra parameters here.\") deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time", "include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\" api_version = models.CharField(max_length=16, default=\"v1\",", "class KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model (abstract) :description: Base parent model that all", "slugify_function(self): \"\"\" :description: Overrides default slugify with custom logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\"", "= ntf.name cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config with open(ntf.name, \"w\")", "editable=False, primary_key=True, help_text=\"UUID Auto field.\") title = models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True,", "JSONField(default=dict, help_text=\"Dictionary store equivalent to Labels in Kubernetes API\") annotations = JSONField(default=dict, null=True,", "fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\" api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API version", "kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\" api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API version used to", "used to deploy child object.\") kind = models.CharField(max_length=16, help_text=\"String representation of Kubernetes object", "subsequent models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config, deployed, deleted \"\"\"", "import JSONField from django.db import models from kubernetes import client, config class KubernetesBase(models.Model):", "= models.CharField(max_length=16, default=\"v1\", help_text=\"API version used to deploy child object.\") kind = models.CharField(max_length=16,", "help_text=\"Dictionary store equivalent to Labels in Kubernetes API\") annotations = JSONField(default=dict, null=True, blank=True,", "to Labels in Kubernetes API\") annotations = JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store equivalent", "not in kwargs: kwargs[\"persist_config\"] = False with NamedTemporaryFile() as ntf: kwargs[\"config_file\"] = ntf.name", "client, config class KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model (abstract) :description: Base parent model", "Returns: object of type <API> \"\"\" if \"persist_config\" not in kwargs: kwargs[\"persist_config\"] =", "in extra parameters here.\") deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is applied", "models.DateTimeField(null=True, blank=True, help_text=\"Time when object is applied to cluster.\") removed = models.DateTimeField(null=True, blank=True,", "object is removed from cluster.\") class Meta: abstract = True def slugify_function(self): \"\"\"", "as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model (abstract) :description:", "Kubernetes object kind\") port = models.IntegerField(default=80, help_text=\"Port object will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\",", "class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model (abstract) :description: Extends KubernetesMetadataObjBase to include network", ":type: model (abstract) :description: Base parent model that all subsequent models inherit from.", "null=True, blank=True, help_text=\"Pass in extra parameters here.\") deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time when", "metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\" labels = JSONField(default=dict, help_text=\"Dictionary store", "deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is applied to cluster.\") removed =", "\"\"\" if \"persist_config\" not in kwargs: kwargs[\"persist_config\"] = False with NamedTemporaryFile() as ntf:", "slug(self): return self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s api client Args:", "@property def slug(self): return self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s api", "= JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store equivalent to Annotations in Kubernetes API\") class", "model (abstract) :description: Extends KubernetesMetadataObjBase to include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels,", "object is associated with.\") kuid = models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID in the", ":description: Extends KubernetesMetadataObjBase to include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\"", "= JSONField(default=dict, null=True, blank=True, help_text=\"Pass in extra parameters here.\") deployed = models.DateTimeField(null=True, blank=True,", "kubernetes import client, config class KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model (abstract) :description: Base", "= models.DateTimeField(null=True, blank=True, help_text=\"Time when object is removed from cluster.\") class Meta: abstract", "fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\" labels = JSONField(default=dict, help_text=\"Dictionary store equivalent", "models.IntegerField(default=80, help_text=\"Port object will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the object", "self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property def slug(self): return self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs):", "API (client.<type>) - Kubernetes Client Type Returns: object of type <API> \"\"\" if", "KubernetesMetadataObjBase :type: model (abstract) :description: Extends KubernetesBase to include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase", "KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model (abstract) :description: Extends KubernetesBase to include metadata fields.", "models.CharField(max_length=16, default=\"v1\", help_text=\"API version used to deploy child object.\") kind = models.CharField(max_length=16, help_text=\"String", "\"\"\" KubernetesMetadataObjBase :type: model (abstract) :description: Extends KubernetesBase to include metadata fields. :inherits:", "API\") annotations = JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store equivalent to Annotations in Kubernetes", "cluster, config, deployed, deleted \"\"\" id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto field.\")", "applied to cluster.\") removed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is removed from", "with open(ntf.name, \"w\") as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type:", ":inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\" labels = JSONField(default=dict, help_text=\"Dictionary store equivalent to", "to Annotations in Kubernetes API\") class Meta: abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\"", ":fields: labels, annotations \"\"\" api_version = models.CharField(max_length=16, default=\"v1\", help_text=\"API version used to deploy", "on_delete=models.CASCADE, help_text=\"Live namespace the object is associated with.\") kuid = models.CharField(max_length=48, null=True, blank=True,", "return self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s api client Args: API", "custom logic. \"\"\" return self.title.replace(\"_\", \"-\").replace(\" \", \"-\").lower() @property def slug(self): return self.slugify_function()", "ntf.name cc = json.dumps(self.cluster.config) if isinstance(self.cluster.config, dict) else self.cluster.config with open(ntf.name, \"w\") as", "True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model (abstract) :description: Extends KubernetesMetadataObjBase to include", "KubernetesMetadataObjBase to include network fields. :inherits: kubernetes_manager.models.base.KubernetesMetadataObjBase :fields: labels, annotations \"\"\" api_version =", "\"w\") as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model (abstract)", "Extends KubernetesBase to include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels, annotations \"\"\" labels", "deleted \"\"\" id = models.UUIDField(default=uuid4, editable=False, primary_key=True, help_text=\"UUID Auto field.\") title = models.CharField(max_length=128)", "model (abstract) :description: Extends KubernetesBase to include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields: labels,", "equivalent to Labels in Kubernetes API\") annotations = JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store", "parameters here.\") deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is applied to cluster.\")", "= True def slugify_function(self): \"\"\" :description: Overrides default slugify with custom logic. \"\"\"", "if \"persist_config\" not in kwargs: kwargs[\"persist_config\"] = False with NamedTemporaryFile() as ntf: kwargs[\"config_file\"]", "JSONField from django.db import models from kubernetes import client, config class KubernetesBase(models.Model): \"\"\"", "kwargs[\"persist_config\"] = False with NamedTemporaryFile() as ntf: kwargs[\"config_file\"] = ntf.name cc = json.dumps(self.cluster.config)", "object will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the object is associated", "uuid4 from django.contrib.postgres.fields import JSONField from django.db import models from kubernetes import client,", "help_text=\"UUID Auto field.\") title = models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to", "a k8s api client Args: API (client.<type>) - Kubernetes Client Type Returns: object", "\"\"\" KubernetesBase :type: model (abstract) :description: Base parent model that all subsequent models", "of Kubernetes object kind\") port = models.IntegerField(default=80, help_text=\"Port object will expose\") namespace =", "help_text=\"API version used to deploy child object.\") kind = models.CharField(max_length=16, help_text=\"String representation of", "blank=True, help_text=\"Time when object is applied to cluster.\") removed = models.DateTimeField(null=True, blank=True, help_text=\"Time", "help_text=\"Port object will expose\") namespace = models.ForeignKey(\"KubernetesNamespace\", on_delete=models.CASCADE, help_text=\"Live namespace the object is", "store equivalent to Labels in Kubernetes API\") annotations = JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary", "kind = models.CharField(max_length=16, help_text=\"String representation of Kubernetes object kind\") port = models.IntegerField(default=80, help_text=\"Port", "def slug(self): return self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s api client", "API=client.CoreV1Api, **kwargs): \"\"\"Gets a k8s api client Args: API (client.<type>) - Kubernetes Client", "help_text=\"Dictionary store equivalent to Annotations in Kubernetes API\") class Meta: abstract = True", "TargetCluster object.\") config = JSONField(default=dict, null=True, blank=True, help_text=\"Pass in extra parameters here.\") deployed", "import uuid4 from django.contrib.postgres.fields import JSONField from django.db import models from kubernetes import", "abstract = True def slugify_function(self): \"\"\" :description: Overrides default slugify with custom logic.", "\"-\").replace(\" \", \"-\").lower() @property def slug(self): return self.slugify_function() def get_client(self, API=client.CoreV1Api, **kwargs): \"\"\"Gets", "return API(api_client=config.new_client_from_config(config_file=ntf.name)) class KubernetesMetadataObjBase(KubernetesBase): \"\"\" KubernetesMetadataObjBase :type: model (abstract) :description: Extends KubernetesBase to", "JSONField(default=dict, null=True, blank=True, help_text=\"Dictionary store equivalent to Annotations in Kubernetes API\") class Meta:", "labels = JSONField(default=dict, help_text=\"Dictionary store equivalent to Labels in Kubernetes API\") annotations =", "Kubernetes API\") class Meta: abstract = True class KubernetesNetworkingBase(KubernetesMetadataObjBase): \"\"\" KubernetesNetworkingBase :type: model", "from kubernetes import client, config class KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model (abstract) :description:", "on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster object.\") config = JSONField(default=dict, null=True, blank=True, help_text=\"Pass in", "config class KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model (abstract) :description: Base parent model that", "= models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster object.\") config =", "NamedTemporaryFile from uuid import uuid4 from django.contrib.postgres.fields import JSONField from django.db import models", "help_text=\"ForeignKey to TargetCluster object.\") config = JSONField(default=dict, null=True, blank=True, help_text=\"Pass in extra parameters", "with.\") kuid = models.CharField(max_length=48, null=True, blank=True, help_text=\"Object's UID in the cluster\") class Meta:", "(client.<type>) - Kubernetes Client Type Returns: object of type <API> \"\"\" if \"persist_config\"", "type <API> \"\"\" if \"persist_config\" not in kwargs: kwargs[\"persist_config\"] = False with NamedTemporaryFile()", "from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id, cluster, config, deployed, deleted \"\"\" id = models.UUIDField(default=uuid4,", "Auto field.\") title = models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster", "store equivalent to Annotations in Kubernetes API\") class Meta: abstract = True class", "here.\") deployed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is applied to cluster.\") removed", "field.\") title = models.CharField(max_length=128) cluster = models.ForeignKey(\"TargetCluster\", on_delete=models.SET_NULL, null=True, help_text=\"ForeignKey to TargetCluster object.\")", "import models from kubernetes import client, config class KubernetesBase(models.Model): \"\"\" KubernetesBase :type: model", "Client Type Returns: object of type <API> \"\"\" if \"persist_config\" not in kwargs:", "object is applied to cluster.\") removed = models.DateTimeField(null=True, blank=True, help_text=\"Time when object is", "<reponame>breimers/Django-Kubernetes-Manager import json from tempfile import NamedTemporaryFile from uuid import uuid4 from django.contrib.postgres.fields", "isinstance(self.cluster.config, dict) else self.cluster.config with open(ntf.name, \"w\") as f: f.write(cc) return API(api_client=config.new_client_from_config(config_file=ntf.name)) class", ":type: model (abstract) :description: Extends KubernetesBase to include metadata fields. :inherits: kubernetes_manager.models.base.KubernetesBase :fields:", "Base parent model that all subsequent models inherit from. :inherits: django_extensions.db.models.TitleSlugDescriptionModel :fields: id," ]
[ "int(batch_size) transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path) original_frames =", "that need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file where to store results.',", "help='Output image size.', default='360,640') @click.option('--batch_size', help='Batch size.', default='1') def video_demo(video_path: str, result_path: str,", "= i * batch_size end = min((i + 1) * batch_size, num_frames) curr_frames", ":param path: path where to store resulting video. :param frames: frames sequence. \"\"\"", "generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\" Generate video file by frames sequence. :param path:", "for i in range(num_batches): begin = i * batch_size end = min((i +", "to video. :return: loaded frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path) frames = [] for", "axis=1) combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path', help='Path to video that need to process.',", "str, frames: List[np.ndarray]): \"\"\" Generate video file by frames sequence. :param path: path", "results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.', default='360,640')", "= num_frames // batch_size num_batches += int(num_batches % batch_size != 0) styled_frames =", "= [ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame in original_frames ] counter = tqdm(original_frames,", "where to store resulting video. :param frames: frames sequence. \"\"\" (height, width, _)", "to video that need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file where to", "video_path: path to video. :return: loaded frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path) frames =", "-> List[np.ndarray]: \"\"\" Combine two sequences of frames into one by concatenating them.", "import numpy as np from typing import List import click def get_frames(video_path: str)", "styled_frames = [] for i in range(num_batches): begin = i * batch_size end", "get_frames(video_path=video_path) original_frames = [ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame in original_frames ] counter", "default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file where to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to", "= get_frames(video_path=video_path) original_frames = [ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame in original_frames ]", "by frames sequence. :param path: path where to store resulting video. :param frames:", "if len(left_frames) != len(right_frames): raise ValueError('Sequences of frames must be same length!') combined_frames", "to file where to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model protobuf.', default='../model/optimized_model.pb')", "to store resulting video. :param frames: frames sequence. \"\"\" (height, width, _) =", "= [] for _ in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames def", "styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames ) generate_video_by_frames(result_path, frames=resulting_images) if", "= np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if batch_size != 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end", "= frames[0].shape video = cv2.VideoWriter(path, -1, 30, (width, height)) for image in tqdm(frames,", "for left_frame, right_frame in zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return", "image_size: str, batch_size: str): image_size = [int(size) for size in image_size.split(',')] batch_size =", "in original_frames ] counter = tqdm(original_frames, desc='Processing frames') num_frames = len(original_frames) num_batches =", "tqdm import cv2 from utils.demo_utils import StyleTransferDemo import numpy as np from typing", "= cv2.VideoWriter(path, -1, 30, (width, height)) for image in tqdm(frames, desc='Writing video'): video.write(image)", "begin)) resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames ) generate_video_by_frames(result_path, frames=resulting_images) if __name__ == '__main__':", "curr_frames = np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if batch_size != 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames)", "combined_frames @click.command() @click.option('--video_path', help='Path to video that need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path", "range(num_batches): begin = i * batch_size end = min((i + 1) * batch_size,", "left side sequence. :param right_frames: right side sequence. :return: concatenated sequence. \"\"\" if", "- begin)) resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames ) generate_video_by_frames(result_path, frames=resulting_images) if __name__ ==", "\"\"\" if len(left_frames) != len(right_frames): raise ValueError('Sequences of frames must be same length!')", "image in tqdm(frames, desc='Writing video'): video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) ->", "num_frames // batch_size num_batches += int(num_batches % batch_size != 0) styled_frames = []", "FFMPEG_VideoReader(video_path) frames = [] for _ in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return", "= FFMPEG_VideoReader(video_path) frames = [] for _ in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame())", "video'): video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine two", "= int(batch_size) transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path) original_frames", "tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\"", "tqdm(frames, desc='Writing video'): video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\"", "be same length!') combined_frames = [] for left_frame, right_frame in zip(left_frames, right_frames): combined_frame", "i in range(num_batches): begin = i * batch_size end = min((i + 1)", "size.', default='1') def video_demo(video_path: str, result_path: str, model_path: str, image_size: str, batch_size: str):", "np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if batch_size != 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end -", "str, image_size: str, batch_size: str): image_size = [int(size) for size in image_size.split(',')] batch_size", "] counter = tqdm(original_frames, desc='Processing frames') num_frames = len(original_frames) num_batches = num_frames //", "import click def get_frames(video_path: str) -> List[np.ndarray]: \"\"\" Load frames from video. :param", "List[np.ndarray]: \"\"\" Load frames from video. :param video_path: path to video. :return: loaded", "file by frames sequence. :param path: path where to store resulting video. :param", "-> List[np.ndarray]: \"\"\" Load frames from video. :param video_path: path to video. :return:", "= transformer(curr_frames) if batch_size != 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images", "of frames into one by concatenating them. :param left_frames: left side sequence. :param", "@click.option('--model_path', help='Path to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.', default='360,640') @click.option('--batch_size', help='Batch", "typing import List import click def get_frames(video_path: str) -> List[np.ndarray]: \"\"\" Load frames", "for frame in original_frames ] counter = tqdm(original_frames, desc='Processing frames') num_frames = len(original_frames)", "else: styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames ) generate_video_by_frames(result_path, frames=resulting_images)", "def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine two sequences of frames", "int(num_batches % batch_size != 0) styled_frames = [] for i in range(num_batches): begin", "numpy as np from typing import List import click def get_frames(video_path: str) ->", "styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames ) generate_video_by_frames(result_path,", "frames into one by concatenating them. :param left_frames: left side sequence. :param right_frames:", "out_frames = transformer(curr_frames) if batch_size != 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end - begin))", "_) = frames[0].shape video = cv2.VideoWriter(path, -1, 30, (width, height)) for image in", "import FFMPEG_VideoReader from tqdm import tqdm import cv2 from utils.demo_utils import StyleTransferDemo import", "concatenating them. :param left_frames: left side sequence. :param right_frames: right side sequence. :return:", "raise ValueError('Sequences of frames must be same length!') combined_frames = [] for left_frame,", "click def get_frames(video_path: str) -> List[np.ndarray]: \"\"\" Load frames from video. :param video_path:", "need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file where to store results.', default='../data/videos/Africa_styled.mp4')", "to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file where to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path',", "help='Path to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.', default='360,640') @click.option('--batch_size', help='Batch size.',", "image_size.split(',')] batch_size = int(batch_size) transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames =", "model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.', default='360,640') @click.option('--batch_size', help='Batch size.', default='1') def", "\"\"\" video_reader = FFMPEG_VideoReader(video_path) frames = [] for _ in tqdm(range(video_reader.nframes), desc='Getting video", "in zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path',", "batch_size != 0) styled_frames = [] for i in range(num_batches): begin = i", "video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine two sequences", "0) styled_frames = [] for i in range(num_batches): begin = i * batch_size", "utils.demo_utils import StyleTransferDemo import numpy as np from typing import List import click", "frames def generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\" Generate video file by frames sequence.", "\"\"\" (height, width, _) = frames[0].shape video = cv2.VideoWriter(path, -1, 30, (width, height))", "30, (width, height)) for image in tqdm(frames, desc='Writing video'): video.write(image) video.release() def combine_frames(left_frames:", "moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader from tqdm import tqdm import cv2 from utils.demo_utils import StyleTransferDemo", "_ in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path: str, frames:", "import List import click def get_frames(video_path: str) -> List[np.ndarray]: \"\"\" Load frames from", "video = cv2.VideoWriter(path, -1, 30, (width, height)) for image in tqdm(frames, desc='Writing video'):", "combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine two sequences of frames into", "in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path: str, frames: List[np.ndarray]):", "where to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output", "must be same length!') combined_frames = [] for left_frame, right_frame in zip(left_frames, right_frames):", "combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path', help='Path to video that need to process.', default='../data/videos/Africa.mp4')", "right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine two sequences of frames into one by", "into one by concatenating them. :param left_frames: left side sequence. :param right_frames: right", "to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image", "original_frames = get_frames(video_path=video_path) original_frames = [ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame in original_frames", "resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames ) generate_video_by_frames(result_path, frames=resulting_images) if __name__ == '__main__': video_demo()", "sequences of frames into one by concatenating them. :param left_frames: left side sequence.", "+= int(num_batches % batch_size != 0) styled_frames = [] for i in range(num_batches):", "len(right_frames): raise ValueError('Sequences of frames must be same length!') combined_frames = [] for", "frames from video. :param video_path: path to video. :return: loaded frames. \"\"\" video_reader", "resulting video. :param frames: frames sequence. \"\"\" (height, width, _) = frames[0].shape video", "image_size[0])) for frame in original_frames ] counter = tqdm(original_frames, desc='Processing frames') num_frames =", "num_frames = len(original_frames) num_batches = num_frames // batch_size num_batches += int(num_batches % batch_size", "frames = [] for _ in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames", "tqdm import tqdm import cv2 from utils.demo_utils import StyleTransferDemo import numpy as np", "begin = i * batch_size end = min((i + 1) * batch_size, num_frames)", "(width, height)) for image in tqdm(frames, desc='Writing video'): video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray],", "Combine two sequences of frames into one by concatenating them. :param left_frames: left", "protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.', default='360,640') @click.option('--batch_size', help='Batch size.', default='1') def video_demo(video_path:", "def get_frames(video_path: str) -> List[np.ndarray]: \"\"\" Load frames from video. :param video_path: path", "original_frames = [ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame in original_frames ] counter =", "frames') num_frames = len(original_frames) num_batches = num_frames // batch_size num_batches += int(num_batches %", "two sequences of frames into one by concatenating them. :param left_frames: left side", "side sequence. :param right_frames: right side sequence. :return: concatenated sequence. \"\"\" if len(left_frames)", "1) * batch_size, num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if batch_size !=", "Load frames from video. :param video_path: path to video. :return: loaded frames. \"\"\"", "\"\"\" Generate video file by frames sequence. :param path: path where to store", "frames[0].shape video = cv2.VideoWriter(path, -1, 30, (width, height)) for image in tqdm(frames, desc='Writing", "model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path) original_frames = [ cv2.resize(frame, dsize=(image_size[1], image_size[0]))", "str, batch_size: str): image_size = [int(size) for size in image_size.split(',')] batch_size = int(batch_size)", "[ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame in original_frames ] counter = tqdm(original_frames, desc='Processing", "cv2.VideoWriter(path, -1, 30, (width, height)) for image in tqdm(frames, desc='Writing video'): video.write(image) video.release()", "of frames must be same length!') combined_frames = [] for left_frame, right_frame in", "video. :param frames: frames sequence. \"\"\" (height, width, _) = frames[0].shape video =", "desc='Writing video'): video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine", "result_path: str, model_path: str, image_size: str, batch_size: str): image_size = [int(size) for size", "as np from typing import List import click def get_frames(video_path: str) -> List[np.ndarray]:", "one by concatenating them. :param left_frames: left side sequence. :param right_frames: right side", "num_batches = num_frames // batch_size num_batches += int(num_batches % batch_size != 0) styled_frames", "in tqdm(frames, desc='Writing video'): video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]:", "StyleTransferDemo import numpy as np from typing import List import click def get_frames(video_path:", ":return: loaded frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path) frames = [] for _ in", "frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\" Generate video file by", "file where to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size',", "counter.update(n=(end - begin)) resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames ) generate_video_by_frames(result_path, frames=resulting_images) if __name__", "by concatenating them. :param left_frames: left side sequence. :param right_frames: right side sequence.", "batch_size, num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if batch_size != 1: styled_frames.extend(out_frames)", "video.release() def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine two sequences of", ":param right_frames: right side sequence. :return: concatenated sequence. \"\"\" if len(left_frames) != len(right_frames):", "@click.option('--video_path', help='Path to video that need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file", "video_reader = FFMPEG_VideoReader(video_path) frames = [] for _ in tqdm(range(video_reader.nframes), desc='Getting video frames'):", "store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.',", "image size.', default='360,640') @click.option('--batch_size', help='Batch size.', default='1') def video_demo(video_path: str, result_path: str, model_path:", "desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\" Generate", "frames'): frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\" Generate video file", "batch_size = int(batch_size) transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path)", "= [] for left_frame, right_frame in zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame, right_frame], axis=1)", "= tqdm(original_frames, desc='Processing frames') num_frames = len(original_frames) num_batches = num_frames // batch_size num_batches", "right_frames: right side sequence. :return: concatenated sequence. \"\"\" if len(left_frames) != len(right_frames): raise", "sequence. \"\"\" if len(left_frames) != len(right_frames): raise ValueError('Sequences of frames must be same", "store resulting video. :param frames: frames sequence. \"\"\" (height, width, _) = frames[0].shape", "video. :return: loaded frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path) frames = [] for _", "width, _) = frames[0].shape video = cv2.VideoWriter(path, -1, 30, (width, height)) for image", "List import click def get_frames(video_path: str) -> List[np.ndarray]: \"\"\" Load frames from video.", "path to video. :return: loaded frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path) frames = []", "video file by frames sequence. :param path: path where to store resulting video.", "sequence. \"\"\" (height, width, _) = frames[0].shape video = cv2.VideoWriter(path, -1, 30, (width,", "loaded frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path) frames = [] for _ in tqdm(range(video_reader.nframes),", "@click.option('--batch_size', help='Batch size.', default='1') def video_demo(video_path: str, result_path: str, model_path: str, image_size: str,", "for size in image_size.split(',')] batch_size = int(batch_size) transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn'", ":param left_frames: left side sequence. :param right_frames: right side sequence. :return: concatenated sequence.", "zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path', help='Path", "from utils.demo_utils import StyleTransferDemo import numpy as np from typing import List import", "size.', default='360,640') @click.option('--batch_size', help='Batch size.', default='1') def video_demo(video_path: str, result_path: str, model_path: str,", "batch_size: str): image_size = [int(size) for size in image_size.split(',')] batch_size = int(batch_size) transformer", "sequence. :param right_frames: right side sequence. :return: concatenated sequence. \"\"\" if len(left_frames) !=", "concatenated sequence. \"\"\" if len(left_frames) != len(right_frames): raise ValueError('Sequences of frames must be", "them. :param left_frames: left side sequence. :param right_frames: right side sequence. :return: concatenated", "right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path', help='Path to video that need to", "help='Path to video that need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file where", "@click.option('--result_path', help='Path to file where to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model", "image_size = [int(size) for size in image_size.split(',')] batch_size = int(batch_size) transformer = StyleTransferDemo(", "original_frames ] counter = tqdm(original_frames, desc='Processing frames') num_frames = len(original_frames) num_batches = num_frames", "!= len(right_frames): raise ValueError('Sequences of frames must be same length!') combined_frames = []", "frames must be same length!') combined_frames = [] for left_frame, right_frame in zip(left_frames,", "cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame in original_frames ] counter = tqdm(original_frames, desc='Processing frames')", "// batch_size num_batches += int(num_batches % batch_size != 0) styled_frames = [] for", "* batch_size, num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if batch_size != 1:", "from video. :param video_path: path to video. :return: loaded frames. \"\"\" video_reader =", "desc='Processing frames') num_frames = len(original_frames) num_batches = num_frames // batch_size num_batches += int(num_batches", "FFMPEG_VideoReader from tqdm import tqdm import cv2 from utils.demo_utils import StyleTransferDemo import numpy", "!= 0) styled_frames = [] for i in range(num_batches): begin = i *", ":param frames: frames sequence. \"\"\" (height, width, _) = frames[0].shape video = cv2.VideoWriter(path,", "counter = tqdm(original_frames, desc='Processing frames') num_frames = len(original_frames) num_batches = num_frames // batch_size", "for image in tqdm(frames, desc='Writing video'): video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray], right_frames: List[np.ndarray])", "str, model_path: str, image_size: str, batch_size: str): image_size = [int(size) for size in", "frames: List[np.ndarray]): \"\"\" Generate video file by frames sequence. :param path: path where", "+ 1) * batch_size, num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if batch_size", "\"\"\" Combine two sequences of frames into one by concatenating them. :param left_frames:", "frames sequence. \"\"\" (height, width, _) = frames[0].shape video = cv2.VideoWriter(path, -1, 30,", "right_frames): combined_frame = np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path', help='Path to", "combined_frame = np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path', help='Path to video", "help='Path to file where to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model protobuf.',", "str, result_path: str, model_path: str, image_size: str, batch_size: str): image_size = [int(size) for", "= [] for i in range(num_batches): begin = i * batch_size end =", "end = min((i + 1) * batch_size, num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames =", ") original_frames = get_frames(video_path=video_path) original_frames = [ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame in", "process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file where to store results.', default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path", "if batch_size != 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images = combine_frames(", "\"\"\" Load frames from video. :param video_path: path to video. :return: loaded frames.", "frame in original_frames ] counter = tqdm(original_frames, desc='Processing frames') num_frames = len(original_frames) num_batches", "tqdm(original_frames, desc='Processing frames') num_frames = len(original_frames) num_batches = num_frames // batch_size num_batches +=", "return frames def generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\" Generate video file by frames", "video. :param video_path: path to video. :return: loaded frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path)", "@click.command() @click.option('--video_path', help='Path to video that need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to", "!= 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames", "str) -> List[np.ndarray]: \"\"\" Load frames from video. :param video_path: path to video.", "np from typing import List import click def get_frames(video_path: str) -> List[np.ndarray]: \"\"\"", "num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if batch_size != 1: styled_frames.extend(out_frames) else:", "default='360,640') @click.option('--batch_size', help='Batch size.', default='1') def video_demo(video_path: str, result_path: str, model_path: str, image_size:", "from tqdm import tqdm import cv2 from utils.demo_utils import StyleTransferDemo import numpy as", "def video_demo(video_path: str, result_path: str, model_path: str, image_size: str, batch_size: str): image_size =", ":return: concatenated sequence. \"\"\" if len(left_frames) != len(right_frames): raise ValueError('Sequences of frames must", "List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine two sequences of frames into one by concatenating", "left_frames: left side sequence. :param right_frames: right side sequence. :return: concatenated sequence. \"\"\"", "frames: frames sequence. \"\"\" (height, width, _) = frames[0].shape video = cv2.VideoWriter(path, -1,", "import StyleTransferDemo import numpy as np from typing import List import click def", "in range(num_batches): begin = i * batch_size end = min((i + 1) *", "batch_size num_batches += int(num_batches % batch_size != 0) styled_frames = [] for i", "import tqdm import cv2 from utils.demo_utils import StyleTransferDemo import numpy as np from", "List[np.ndarray]: \"\"\" Combine two sequences of frames into one by concatenating them. :param", "scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path) original_frames = [ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for frame", "min((i + 1) * batch_size, num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames = transformer(curr_frames) if", "for _ in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path: str,", "path where to store resulting video. :param frames: frames sequence. \"\"\" (height, width,", "(height, width, _) = frames[0].shape video = cv2.VideoWriter(path, -1, 30, (width, height)) for", "combined_frames = [] for left_frame, right_frame in zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame, right_frame],", "video frames'): frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\" Generate video", "same length!') combined_frames = [] for left_frame, right_frame in zip(left_frames, right_frames): combined_frame =", "dsize=(image_size[1], image_size[0])) for frame in original_frames ] counter = tqdm(original_frames, desc='Processing frames') num_frames", "= [int(size) for size in image_size.split(',')] batch_size = int(batch_size) transformer = StyleTransferDemo( model_path=model_path,", "default='../data/videos/Africa_styled.mp4') @click.option('--model_path', help='Path to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.', default='360,640') @click.option('--batch_size',", "= min((i + 1) * batch_size, num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames = transformer(curr_frames)", "[] for _ in tqdm(range(video_reader.nframes), desc='Getting video frames'): frames.append(video_reader.read_frame()) return frames def generate_video_by_frames(path:", "to model protobuf.', default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.', default='360,640') @click.option('--batch_size', help='Batch size.', default='1')", ":param video_path: path to video. :return: loaded frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path) frames", "= len(original_frames) num_batches = num_frames // batch_size num_batches += int(num_batches % batch_size !=", "frames sequence. :param path: path where to store resulting video. :param frames: frames", "height)) for image in tqdm(frames, desc='Writing video'): video.write(image) video.release() def combine_frames(left_frames: List[np.ndarray], right_frames:", "input_shape=image_size, scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path) original_frames = [ cv2.resize(frame, dsize=(image_size[1], image_size[0])) for", "len(original_frames) num_batches = num_frames // batch_size num_batches += int(num_batches % batch_size != 0)", "* batch_size end = min((i + 1) * batch_size, num_frames) curr_frames = np.array(original_frames[begin:end])", "from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader from tqdm import tqdm import cv2 from utils.demo_utils import", "import cv2 from utils.demo_utils import StyleTransferDemo import numpy as np from typing import", "video_demo(video_path: str, result_path: str, model_path: str, image_size: str, batch_size: str): image_size = [int(size)", "np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path', help='Path to video that need", "return combined_frames @click.command() @click.option('--video_path', help='Path to video that need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path',", "Generate video file by frames sequence. :param path: path where to store resulting", "sequence. :return: concatenated sequence. \"\"\" if len(left_frames) != len(right_frames): raise ValueError('Sequences of frames", "transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path) original_frames = [", "in image_size.split(',')] batch_size = int(batch_size) transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames", "default='1') def video_demo(video_path: str, result_path: str, model_path: str, image_size: str, batch_size: str): image_size", "from typing import List import click def get_frames(video_path: str) -> List[np.ndarray]: \"\"\" Load", "-1, 30, (width, height)) for image in tqdm(frames, desc='Writing video'): video.write(image) video.release() def", "batch_size end = min((i + 1) * batch_size, num_frames) curr_frames = np.array(original_frames[begin:end]) out_frames", "batch_size != 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images = combine_frames( left_frames=original_frames,", "ValueError('Sequences of frames must be same length!') combined_frames = [] for left_frame, right_frame", "List[np.ndarray]): \"\"\" Generate video file by frames sequence. :param path: path where to", "[int(size) for size in image_size.split(',')] batch_size = int(batch_size) transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size,", "% batch_size != 0) styled_frames = [] for i in range(num_batches): begin =", "right side sequence. :return: concatenated sequence. \"\"\" if len(left_frames) != len(right_frames): raise ValueError('Sequences", "List[np.ndarray], right_frames: List[np.ndarray]) -> List[np.ndarray]: \"\"\" Combine two sequences of frames into one", "= StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path) original_frames = [ cv2.resize(frame,", "length!') combined_frames = [] for left_frame, right_frame in zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame,", "str): image_size = [int(size) for size in image_size.split(',')] batch_size = int(batch_size) transformer =", "= np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames @click.command() @click.option('--video_path', help='Path to video that", "size in image_size.split(',')] batch_size = int(batch_size) transformer = StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' )", "[] for i in range(num_batches): begin = i * batch_size end = min((i", "def generate_video_by_frames(path: str, frames: List[np.ndarray]): \"\"\" Generate video file by frames sequence. :param", "side sequence. :return: concatenated sequence. \"\"\" if len(left_frames) != len(right_frames): raise ValueError('Sequences of", "StyleTransferDemo( model_path=model_path, input_shape=image_size, scope='style_transfer_cnn' ) original_frames = get_frames(video_path=video_path) original_frames = [ cv2.resize(frame, dsize=(image_size[1],", "1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images = combine_frames( left_frames=original_frames, right_frames=styled_frames )", "help='Batch size.', default='1') def video_demo(video_path: str, result_path: str, model_path: str, image_size: str, batch_size:", "get_frames(video_path: str) -> List[np.ndarray]: \"\"\" Load frames from video. :param video_path: path to", "<reponame>ALEXKIRNAS/tensorflow-fast-style-transfer from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader from tqdm import tqdm import cv2 from utils.demo_utils", "[] for left_frame, right_frame in zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame)", "default='../model/optimized_model.pb') @click.option('--image_size', help='Output image size.', default='360,640') @click.option('--batch_size', help='Batch size.', default='1') def video_demo(video_path: str,", "frames. \"\"\" video_reader = FFMPEG_VideoReader(video_path) frames = [] for _ in tqdm(range(video_reader.nframes), desc='Getting", "i * batch_size end = min((i + 1) * batch_size, num_frames) curr_frames =", "transformer(curr_frames) if batch_size != 1: styled_frames.extend(out_frames) else: styled_frames.append(out_frames) counter.update(n=(end - begin)) resulting_images =", "cv2 from utils.demo_utils import StyleTransferDemo import numpy as np from typing import List", "len(left_frames) != len(right_frames): raise ValueError('Sequences of frames must be same length!') combined_frames =", "path: path where to store resulting video. :param frames: frames sequence. \"\"\" (height,", "video that need to process.', default='../data/videos/Africa.mp4') @click.option('--result_path', help='Path to file where to store", "right_frame in zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames @click.command()", "num_batches += int(num_batches % batch_size != 0) styled_frames = [] for i in", "sequence. :param path: path where to store resulting video. :param frames: frames sequence.", "model_path: str, image_size: str, batch_size: str): image_size = [int(size) for size in image_size.split(',')]", "@click.option('--image_size', help='Output image size.', default='360,640') @click.option('--batch_size', help='Batch size.', default='1') def video_demo(video_path: str, result_path:", "left_frame, right_frame in zip(left_frames, right_frames): combined_frame = np.concatenate([left_frame, right_frame], axis=1) combined_frames.append(combined_frame) return combined_frames" ]
[ "return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) service_list = \"\\n\".join(", "service[\"metadata\"][\"name\"] for service in loads( sh( \"gcloud\", \"run\", \"services\", \"list\", \"--platform\", \"managed\", \"--region\",", "email=email): abort(401) if service not in list_services(): abort(404) out = reversed( [ entry[\"timestamp\"]", "+ escape(entry[\"textPayload\"]) for entry in loads( sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\",", "f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for service in list_services() ) return f\"\"\" <h1>Log Viewer</h1>", "service in loads( sh( \"gcloud\", \"run\", \"services\", \"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\",", "from Google Cloud Run necessary to access app logs :return: list of services", "not is_admin(course=\"cs61a\", email=email): abort(401) if service not in list_services(): abort(404) out = reversed(", "\"</pre>\" def list_services(): \"\"\"Returns the list of services from Google Cloud Run necessary", "__name__ == \"__main__\": app.debug = True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index(): if not", "import url_for from common.rpc.auth import is_admin app = Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__", "\"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\", capture_output=True, ) ) if", "\"--region\", \"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True, ) ) ] if __name__ == \"__main__\":", "get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) if service not in list_services(): abort(404) out", "\"\"\" return [ service[\"metadata\"][\"name\"] for service in loads( sh( \"gcloud\", \"run\", \"services\", \"list\",", "import create_oauth_client, get_user, is_staff, login from common.shell_utils import sh from common.url_for import url_for", "\"\"\" @app.route(\"/service/<service>\") def create_secret(service): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if", "for service in loads( sh( \"gcloud\", \"run\", \"services\", \"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\",", "+ \"</pre>\" def list_services(): \"\"\"Returns the list of services from Google Cloud Run", "loads( sh( \"gcloud\", \"run\", \"services\", \"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\", \"json\", \"-q\",", "is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) if service", "def create_secret(service): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\",", "list_services(): \"\"\"Returns the list of services from Google Cloud Run necessary to access", "\"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for service in list_services() ) return f\"\"\" <h1>Log", "not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) if", "static_url_path=\"\") if __name__ == \"__main__\": app.debug = True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index():", "loads( sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\", capture_output=True,", "entry in loads( sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\",", "list of services from Google Cloud Run necessary to access app logs :return:", "in loads( sh( \"gcloud\", \"run\", \"services\", \"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\", \"json\",", "Cloud Run necessary to access app logs :return: list of services \"\"\" return", "create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index(): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"]", "\"gcloud\", \"run\", \"services\", \"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True, )", "logs :return: list of services \"\"\" return [ service[\"metadata\"][\"name\"] for service in loads(", ") ) if \"textPayload\" in entry ] ) return \"<pre>\" + \"\\n\".join(map(str, out))", "escape from json import loads from flask import Flask, abort from common.oauth_client import", "if not is_admin(course=\"cs61a\", email=email): abort(401) service_list = \"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for", "abort(401) if service not in list_services(): abort(404) out = reversed( [ entry[\"timestamp\"] +", "if \"textPayload\" in entry ] ) return \"<pre>\" + \"\\n\".join(map(str, out)) + \"</pre>\"", "get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) service_list = \"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\"", "capture_output=True, ) ) if \"textPayload\" in entry ] ) return \"<pre>\" + \"\\n\".join(map(str,", "from flask import Flask, abort from common.oauth_client import create_oauth_client, get_user, is_staff, login from", "\"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\", capture_output=True, ) ) if \"textPayload\"", "login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) if service not in", "in loads( sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\",", "\"\"\"Returns the list of services from Google Cloud Run necessary to access app", "sh from common.url_for import url_for from common.rpc.auth import is_admin app = Flask(__name__, static_folder=\"\",", "index(): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email):", "\" \" + escape(entry[\"textPayload\"]) for entry in loads( sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com", "from html import escape from json import loads from flask import Flask, abort", ") return f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service): if not is_staff(\"cs61a\"):", "] ) return \"<pre>\" + \"\\n\".join(map(str, out)) + \"</pre>\" def list_services(): \"\"\"Returns the", "is_admin app = Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__ == \"__main__\": app.debug = True", "def list_services(): \"\"\"Returns the list of services from Google Cloud Run necessary to", "import loads from flask import Flask, abort from common.oauth_client import create_oauth_client, get_user, is_staff,", "reversed( [ entry[\"timestamp\"] + \" \" + escape(entry[\"textPayload\"]) for entry in loads( sh(", "from common.rpc.auth import is_admin app = Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__ == \"__main__\":", "[ entry[\"timestamp\"] + \" \" + escape(entry[\"textPayload\"]) for entry in loads( sh( \"gcloud\",", "to access app logs :return: list of services \"\"\" return [ service[\"metadata\"][\"name\"] for", "if service not in list_services(): abort(404) out = reversed( [ entry[\"timestamp\"] + \"", "app = Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__ == \"__main__\": app.debug = True create_oauth_client(app,", "\"61a-logs\") @app.route(\"/\") def index(): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if", "\"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True, ) ) ] if __name__ == \"__main__\": app.run(debug=True)", "entry ] ) return \"<pre>\" + \"\\n\".join(map(str, out)) + \"</pre>\" def list_services(): \"\"\"Returns", "out)) + \"</pre>\" def list_services(): \"\"\"Returns the list of services from Google Cloud", "True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index(): if not is_staff(\"cs61a\"): return login() email =", "from common.oauth_client import create_oauth_client, get_user, is_staff, login from common.shell_utils import sh from common.url_for", "for service in list_services() ) return f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def", "= get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) service_list = \"\\n\".join( f\"<p /><a href={url_for('create_secret',", "Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__ == \"__main__\": app.debug = True create_oauth_client(app, \"61a-logs\") @app.route(\"/\")", "== \"__main__\": app.debug = True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index(): if not is_staff(\"cs61a\"):", "login from common.shell_utils import sh from common.url_for import url_for from common.rpc.auth import is_admin", "\"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True, ) ) ] if", "f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service): if not is_staff(\"cs61a\"): return login()", "create_secret(service): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email):", "return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) if service not", "f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\", capture_output=True, ) ) if \"textPayload\" in", "loads from flask import Flask, abort from common.oauth_client import create_oauth_client, get_user, is_staff, login", "\"textPayload\" in entry ] ) return \"<pre>\" + \"\\n\".join(map(str, out)) + \"</pre>\" def", "necessary to access app logs :return: list of services \"\"\" return [ service[\"metadata\"][\"name\"]", "\"run\", \"services\", \"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True, ) )", "if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401)", "app logs :return: list of services \"\"\" return [ service[\"metadata\"][\"name\"] for service in", "\"__main__\": app.debug = True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index(): if not is_staff(\"cs61a\"): return", "= reversed( [ entry[\"timestamp\"] + \" \" + escape(entry[\"textPayload\"]) for entry in loads(", "services \"\"\" return [ service[\"metadata\"][\"name\"] for service in loads( sh( \"gcloud\", \"run\", \"services\",", "service in list_services() ) return f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service):", "entry[\"timestamp\"] + \" \" + escape(entry[\"textPayload\"]) for entry in loads( sh( \"gcloud\", \"logging\",", "from json import loads from flask import Flask, abort from common.oauth_client import create_oauth_client,", "return [ service[\"metadata\"][\"name\"] for service in loads( sh( \"gcloud\", \"run\", \"services\", \"list\", \"--platform\",", "email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) if service not in list_services():", "Flask, abort from common.oauth_client import create_oauth_client, get_user, is_staff, login from common.shell_utils import sh", "not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) service_list", "\"100\", \"--format\", \"json\", capture_output=True, ) ) if \"textPayload\" in entry ] ) return", "\"\\n\".join(map(str, out)) + \"</pre>\" def list_services(): \"\"\"Returns the list of services from Google", "if __name__ == \"__main__\": app.debug = True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index(): if", "common.shell_utils import sh from common.url_for import url_for from common.rpc.auth import is_admin app =", "return \"<pre>\" + \"\\n\".join(map(str, out)) + \"</pre>\" def list_services(): \"\"\"Returns the list of", "list of services \"\"\" return [ service[\"metadata\"][\"name\"] for service in loads( sh( \"gcloud\",", "AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\", capture_output=True, ) ) if \"textPayload\" in entry", "from common.shell_utils import sh from common.url_for import url_for from common.rpc.auth import is_admin app", "Run necessary to access app logs :return: list of services \"\"\" return [", "\"managed\", \"--region\", \"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True, ) ) ] if __name__ ==", "is_staff, login from common.shell_utils import sh from common.url_for import url_for from common.rpc.auth import", "service not in list_services(): abort(404) out = reversed( [ entry[\"timestamp\"] + \" \"", "+ \" \" + escape(entry[\"textPayload\"]) for entry in loads( sh( \"gcloud\", \"logging\", \"read\",", "is_admin(course=\"cs61a\", email=email): abort(401) service_list = \"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for service in", "common.rpc.auth import is_admin app = Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__ == \"__main__\": app.debug", "\"services\", \"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True, ) ) ]", "the list of services from Google Cloud Run necessary to access app logs", "\" + escape(entry[\"textPayload\"]) for entry in loads( sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND", "@app.route(\"/\") def index(): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not", "flask import Flask, abort from common.oauth_client import create_oauth_client, get_user, is_staff, login from common.shell_utils", "login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) service_list = \"\\n\".join( f\"<p", ") return \"<pre>\" + \"\\n\".join(map(str, out)) + \"</pre>\" def list_services(): \"\"\"Returns the list", "is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) service_list =", "escape(entry[\"textPayload\"]) for entry in loads( sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\",", "href={url_for('create_secret', service=service)}>{service}</a>\" for service in list_services() ) return f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\"", "\"--limit\", \"100\", \"--format\", \"json\", capture_output=True, ) ) if \"textPayload\" in entry ] )", "return f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service): if not is_staff(\"cs61a\"): return", "not in list_services(): abort(404) out = reversed( [ entry[\"timestamp\"] + \" \" +", "not is_admin(course=\"cs61a\", email=email): abort(401) service_list = \"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for service", "\"--format\", \"json\", capture_output=True, ) ) if \"textPayload\" in entry ] ) return \"<pre>\"", "email=email): abort(401) service_list = \"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for service in list_services()", "url_for from common.rpc.auth import is_admin app = Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__ ==", "\"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\", capture_output=True, ) )", "import is_admin app = Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__ == \"__main__\": app.debug =", "html import escape from json import loads from flask import Flask, abort from", ") if \"textPayload\" in entry ] ) return \"<pre>\" + \"\\n\".join(map(str, out)) +", "list_services() ) return f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service): if not", "in entry ] ) return \"<pre>\" + \"\\n\".join(map(str, out)) + \"</pre>\" def list_services():", "Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service): if not is_staff(\"cs61a\"): return login() email =", "{service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"]", "[ service[\"metadata\"][\"name\"] for service in loads( sh( \"gcloud\", \"run\", \"services\", \"list\", \"--platform\", \"managed\",", "import sh from common.url_for import url_for from common.rpc.auth import is_admin app = Flask(__name__,", "= \"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for service in list_services() ) return f\"\"\"", "in list_services(): abort(404) out = reversed( [ entry[\"timestamp\"] + \" \" + escape(entry[\"textPayload\"])", "<h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service): if not is_staff(\"cs61a\"): return login() email", "Google Cloud Run necessary to access app logs :return: list of services \"\"\"", "of services \"\"\" return [ service[\"metadata\"][\"name\"] for service in loads( sh( \"gcloud\", \"run\",", "abort from common.oauth_client import create_oauth_client, get_user, is_staff, login from common.shell_utils import sh from", "from common.url_for import url_for from common.rpc.auth import is_admin app = Flask(__name__, static_folder=\"\", static_url_path=\"\")", "def index(): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not is_admin(course=\"cs61a\",", "if not is_admin(course=\"cs61a\", email=email): abort(401) if service not in list_services(): abort(404) out =", "is_admin(course=\"cs61a\", email=email): abort(401) if service not in list_services(): abort(404) out = reversed( [", ":return: list of services \"\"\" return [ service[\"metadata\"][\"name\"] for service in loads( sh(", "static_folder=\"\", static_url_path=\"\") if __name__ == \"__main__\": app.debug = True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def", "= get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) if service not in list_services(): abort(404)", "\"json\", capture_output=True, ) ) if \"textPayload\" in entry ] ) return \"<pre>\" +", "app.debug = True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index(): if not is_staff(\"cs61a\"): return login()", "resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\", capture_output=True, ) ) if \"textPayload\" in entry ]", "import escape from json import loads from flask import Flask, abort from common.oauth_client", "for entry in loads( sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\",", "abort(404) out = reversed( [ entry[\"timestamp\"] + \" \" + escape(entry[\"textPayload\"]) for entry", "access app logs :return: list of services \"\"\" return [ service[\"metadata\"][\"name\"] for service", "json import loads from flask import Flask, abort from common.oauth_client import create_oauth_client, get_user,", "common.url_for import url_for from common.rpc.auth import is_admin app = Flask(__name__, static_folder=\"\", static_url_path=\"\") if", "@app.route(\"/service/<service>\") def create_secret(service): if not is_staff(\"cs61a\"): return login() email = get_user()[\"email\"] if not", "of services from Google Cloud Run necessary to access app logs :return: list", "\"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True, ) ) ] if __name__", "get_user, is_staff, login from common.shell_utils import sh from common.url_for import url_for from common.rpc.auth", "= Flask(__name__, static_folder=\"\", static_url_path=\"\") if __name__ == \"__main__\": app.debug = True create_oauth_client(app, \"61a-logs\")", "create_oauth_client, get_user, is_staff, login from common.shell_utils import sh from common.url_for import url_for from", "in list_services() ) return f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\") def create_secret(service): if", "list_services(): abort(404) out = reversed( [ entry[\"timestamp\"] + \" \" + escape(entry[\"textPayload\"]) for", "abort(401) service_list = \"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for service in list_services() )", "email = get_user()[\"email\"] if not is_admin(course=\"cs61a\", email=email): abort(401) service_list = \"\\n\".join( f\"<p /><a", "service_list = \"\\n\".join( f\"<p /><a href={url_for('create_secret', service=service)}>{service}</a>\" for service in list_services() ) return", "services from Google Cloud Run necessary to access app logs :return: list of", "\"<pre>\" + \"\\n\".join(map(str, out)) + \"</pre>\" def list_services(): \"\"\"Returns the list of services", "out = reversed( [ entry[\"timestamp\"] + \" \" + escape(entry[\"textPayload\"]) for entry in", "sh( \"gcloud\", \"run\", \"services\", \"list\", \"--platform\", \"managed\", \"--region\", \"us-west1\", \"--format\", \"json\", \"-q\", capture_output=True,", "= True create_oauth_client(app, \"61a-logs\") @app.route(\"/\") def index(): if not is_staff(\"cs61a\"): return login() email", "common.oauth_client import create_oauth_client, get_user, is_staff, login from common.shell_utils import sh from common.url_for import", "/><a href={url_for('create_secret', service=service)}>{service}</a>\" for service in list_services() ) return f\"\"\" <h1>Log Viewer</h1> {service_list}", "+ \"\\n\".join(map(str, out)) + \"</pre>\" def list_services(): \"\"\"Returns the list of services from", "service=service)}>{service}</a>\" for service in list_services() ) return f\"\"\" <h1>Log Viewer</h1> {service_list} \"\"\" @app.route(\"/service/<service>\")", "<reponame>akshitdewan/cs61a-apps from html import escape from json import loads from flask import Flask,", "sh( \"gcloud\", \"logging\", \"read\", f\"projects/cs61a-140900/logs/run.googleapis.com AND resource.labels.service_name={service}\", \"--limit\", \"100\", \"--format\", \"json\", capture_output=True, )", "import Flask, abort from common.oauth_client import create_oauth_client, get_user, is_staff, login from common.shell_utils import" ]
[ "FocalLoss from data import CaseDataset from torchvision.transforms import Compose from transform import Crop,", "Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1,", "loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191],", "import datetime model = ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1, 148,", "Dice, HybirdLoss, DiceLoss, FocalLoss from data import CaseDataset from torchvision.transforms import Compose from", "ToTensor() ]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer(", "# %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257' trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt') trainer.fit( num_epochs=800, use_amp=True, save_dir=save_dir", "= ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128, 128) train_transform", "191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0,", "scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, ) # %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir", "RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform =", "lr=1e-4) loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1, 148,", "torchvision.transforms import Compose from transform import Crop, RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast,", "import CaseDataset from torchvision.transforms import Compose from transform import Crop, RandomCrop, ToTensor, CombineLabels,", "from data import CaseDataset from torchvision.transforms import Compose from transform import Crop, RandomCrop,", "from torch.optim import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from datetime import datetime model", "train_transform = Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor()", "'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size", "RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim import Adam from torch.optim.lr_scheduler", "= {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0,", "factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128, 128) train_transform = Compose([", "ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model, optimizer=optimizer, loss=loss,", "import Crop, RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror", "RandomMirror from torch.optim import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from datetime import datetime", "trainer = Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0,", "ToTensor() ]) valid_transform = Compose([ RandomCrop(patch_size), ToTensor() ]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') #", "import Trainer from network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss import Dice,", "model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, ) #", "from transform import Crop, RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale,", "crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform = Compose([ RandomCrop(patch_size),", "ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1) metrics", "128, 128) train_transform = Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1),", "import Compose from transform import Crop, RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma,", "191]), 'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2,", "= Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200,", "148, 191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc':", "Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform", "metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, ) # %% save_dir =", "1, 0]), 'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset =", "from torchvision.transforms import Compose from transform import Crop, RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness,", "Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset", "HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1),", "from trainer import Trainer from network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss", "= CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128, 128) train_transform = Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'),", "'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler", "train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, ) # %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir =", "data import CaseDataset from torchvision.transforms import Compose from transform import Crop, RandomCrop, ToTensor,", "Compose from transform import Crop, RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma, \\", "torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset,", "%% from trainer import Trainer from network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from", "{'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0, 1,", "import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss import Dice, HybirdLoss, DiceLoss, FocalLoss from", "RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform = Compose([ RandomCrop(patch_size), ToTensor() ]) # ckpt =", "ResAttrBNUnet3D from loss import Dice, HybirdLoss, DiceLoss, FocalLoss from data import CaseDataset from", "from datetime import datetime model = ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4) loss =", "DiceLoss, FocalLoss from data import CaseDataset from torchvision.transforms import Compose from transform import", "loss import Dice, HybirdLoss, DiceLoss, FocalLoss from data import CaseDataset from torchvision.transforms import", "optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, ) # %%", "from loss import Dice, HybirdLoss, DiceLoss, FocalLoss from data import CaseDataset from torchvision.transforms", "= \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257' trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt') trainer.fit( num_epochs=800, use_amp=True, save_dir=save_dir ) # %%", "= ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1)", "datetime import datetime model = ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1,", "= Compose([ RandomCrop(patch_size), ToTensor() ]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict'])", "ResAttrUnet3D2, ResAttrBNUnet3D from loss import Dice, HybirdLoss, DiceLoss, FocalLoss from data import CaseDataset", "torch.optim import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from datetime import datetime model =", "CaseDataset from torchvision.transforms import Compose from transform import Crop, RandomCrop, ToTensor, CombineLabels, \\", "= Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ])", "Crop, RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror from", "= HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9,", "# %% from trainer import Trainer from network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D", ") # %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257' trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt') trainer.fit( num_epochs=800, use_amp=True,", "from network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss import Dice, HybirdLoss, DiceLoss,", "alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0, 0,", "scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128, 128)", "0]), 'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm')", "from torch.optim.lr_scheduler import ReduceLROnPlateau from datetime import datetime model = ResUnet3D(out_channels=3).cuda() optimizer =", "patch_size = (128, 128, 128) train_transform = Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5,", "= torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics,", "import ReduceLROnPlateau from datetime import datetime model = ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4)", "model = ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9,", "patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform = Compose([", "1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128,", "RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim", "alpha=0.9, beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148,", "HybirdLoss, DiceLoss, FocalLoss from data import CaseDataset from torchvision.transforms import Compose from transform", "RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from datetime", "trainer import Trainer from network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss import", "\\ RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from", "128) train_transform = Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1),", "%% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257' trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt') trainer.fit( num_epochs=800, use_amp=True, save_dir=save_dir )", "CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim import Adam", "DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0, 1, 0]),", "(128, 128, 128) train_transform = Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1),", "valid_transform = Compose([ RandomCrop(patch_size), ToTensor() ]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) #", "optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2,", "RandomRescaleCrop, RandomMirror from torch.optim import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from datetime import", "148, 191]), 'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler = ReduceLROnPlateau(optimizer,", "num_samples=200, ) # %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257' trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt') trainer.fit( num_epochs=800,", "FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler =", "RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau", "ReduceLROnPlateau from datetime import datetime model = ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4) loss", "RandomCrop(patch_size), ToTensor() ]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer =", "ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss import Dice, HybirdLoss, DiceLoss, FocalLoss from data", "Compose([ RandomCrop(patch_size), ToTensor() ]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer", "148, 191], alpha=0.9, beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1), 'focal':", "0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform = Compose([ RandomCrop(patch_size), ToTensor() ]) #", "0, 1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128,", "'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0, 0, 1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25)", "RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim import Adam from torch.optim.lr_scheduler import", "Trainer from network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss import Dice, HybirdLoss,", "RandomMirror((0.5, 0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform = Compose([ RandomCrop(patch_size), ToTensor()", "= Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1) metrics = {'dsc':", "loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, ) # %% save_dir", "save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257' trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt') trainer.fit( num_epochs=800, use_amp=True, save_dir=save_dir ) #", "]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model,", "datetime model = ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1, 148, 191],", "# optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform,", "191], alpha=0.9, beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1,", "beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc': Dice(weight_v=[0, 1, 0]), 'v_dsc': Dice(weight_v=[0, 0, 1])}", "metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]), 'a_dsc':", "Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, )", "model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler, train_transform=train_transform,", "ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim import", "# ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model, optimizer=optimizer,", "dataset=dataset, scheduler=scheduler, train_transform=train_transform, valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, ) # %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\"))", "]) valid_transform = Compose([ RandomCrop(patch_size), ToTensor() ]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt') # model.load_state_dict(ckpt['model_state_dict'])", "transform import Crop, RandomCrop, ToTensor, CombineLabels, \\ RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop,", "import Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from datetime import datetime model = ResUnet3D(out_channels=3).cuda()", "patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128, 128) train_transform = Compose([ RandomRescaleCrop(0.1,", "valid_transform=valid_transform, batch_size=2, valid_split=0.0, num_samples=200, ) # %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257'", "CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128, 128) train_transform = Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5,", "network import ResUnet3D, ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss import Dice, HybirdLoss, DiceLoss, FocalLoss", "= (128, 128, 128) train_transform = Compose([ RandomRescaleCrop(0.1, patch_size, crop_mode='random'), RandomMirror((0.5, 0.5, 0.5)),", "ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128, 128) train_transform =", "import Dice, HybirdLoss, DiceLoss, FocalLoss from data import CaseDataset from torchvision.transforms import Compose", "Adam from torch.optim.lr_scheduler import ReduceLROnPlateau from datetime import datetime model = ResUnet3D(out_channels=3).cuda() optimizer", "optimizer = Adam(model.parameters(), lr=1e-4) loss = HybirdLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1) metrics =", "Dice(weight_v=[0, 0, 1])} scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=25) dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size =", "RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform = Compose([ RandomCrop(patch_size), ToTensor() ]) # ckpt", "# model.load_state_dict(ckpt['model_state_dict']) # optimizer.load_state_dict(ckpt['optimizer_state_dict']) trainer = Trainer( model=model, optimizer=optimizer, loss=loss, metrics=metrics, dataset=dataset, scheduler=scheduler,", "valid_split=0.0, num_samples=200, ) # %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257' trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt') trainer.fit(", "batch_size=2, valid_split=0.0, num_samples=200, ) # %% save_dir = \"logs/DOC/iib-H-09-{}\".format(datetime.now().strftime(\"%y%m%d%H%M\")) save_dir = 'logs/DOC/iib-H-09-2006150257' trainer.load_checkpoint('logs/DOC/iib-H-09-2006150257-last.pt')", "ResAttrUnet3D, ResAttrUnet3D2, ResAttrBNUnet3D from loss import Dice, HybirdLoss, DiceLoss, FocalLoss from data import", "torch.optim.lr_scheduler import ReduceLROnPlateau from datetime import datetime model = ResUnet3D(out_channels=3).cuda() optimizer = Adam(model.parameters(),", "\\ RandomBrightness, RandomContrast, RandomGamma, \\ RandomRescale, RandomRescaleCrop, RandomMirror from torch.optim import Adam from", "dataset = CaseDataset('data/Task20_Kidney/vessel_region_norm') patch_size = (128, 128, 128) train_transform = Compose([ RandomRescaleCrop(0.1, patch_size,", "0.5, 0.5)), RandomContrast(0.1), RandomBrightness(0.1), RandomGamma(0.1), ToTensor() ]) valid_transform = Compose([ RandomCrop(patch_size), ToTensor() ])", "beta=0.1) metrics = {'dsc': DiceLoss(weight_v=[1, 148, 191], alpha=0.9, beta=0.1), 'focal': FocalLoss(weight_v=[1, 148, 191]),", "RandomGamma(0.1), ToTensor() ]) valid_transform = Compose([ RandomCrop(patch_size), ToTensor() ]) # ckpt = torch.load('logs/Task20_Kidney/av-loss-last.pt')" ]
[ "string \"{string}\"') def step_a_string(context, string): context.first = string @when('we compare it to itself')", "distance): assert context.distance == distance @given('the first string \"{first}\" and the second string", "<filename>features/steps/levenshtein_steps.py import random from behave import given, when, then from howabout import get_levenshtein", "empty string') def step_a_string_and_the_emtpy_string(context, string): context.first = string context.second = '' @given('a string", "compare them') def step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second) @then('the interpreter should not overflow')", "overflow') def step_assert_no_overflow(context): assert not context.failed @given('\"{string}\" and the empty string') def step_a_string_and_the_emtpy_string(context,", "= string @when('we compare it to itself') def step_compare_string_to_itself(context): string = context.first, context.first", ":type first str :type second str :type prefix str \"\"\" context.first = first", "when, then from howabout import get_levenshtein @given('two long strings') def step_two_long_strings(context): alphabet =", "for _ in range(0, size)] context.first = random_str(1024) context.second = random_str(1024) @given('two empty", "context.distance = get_levenshtein(string, string) @then('the distance is {distance:d}') def step_assert_distance(context, distance): assert context.distance", "and the empty string') def step_a_string_and_the_emtpy_string(context, string): context.first = string context.second = ''", "first string \"{first}\" and the second string \"{second}\" starting with \"{prefix}\"') def step_impl2(context,", "step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str = lambda size: [random.choice(alphabet) for _ in range(0,", "string): context.first = string @when('we compare it to itself') def step_compare_string_to_itself(context): string =", "= '' @when('we compare them') def step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second) @then('the interpreter", "'' @when('we compare them') def step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second) @then('the interpreter should", "it to itself') def step_compare_string_to_itself(context): string = context.first, context.first context.distance = get_levenshtein(string, string)", "string) @then('the distance is {distance:d}') def step_assert_distance(context, distance): assert context.distance == distance @given('the", "in range(0, size)] context.first = random_str(1024) context.second = random_str(1024) @given('two empty strings') def", "\"\"\" :type context behave.runner.Context :type first str :type second str :type prefix str", "then from howabout import get_levenshtein @given('two long strings') def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz'", "string): context.first = string context.second = '' @given('a string \"{string}\"') def step_a_string(context, string):", "[random.choice(alphabet) for _ in range(0, size)] context.first = random_str(1024) context.second = random_str(1024) @given('two", "with \"{prefix}\"') def step_impl2(context, first, second, prefix): \"\"\" :type context behave.runner.Context :type first", "= 'abcdefghijklmnopqrstuvwxyz' random_str = lambda size: [random.choice(alphabet) for _ in range(0, size)] context.first", "context.first context.distance = get_levenshtein(string, string) @then('the distance is {distance:d}') def step_assert_distance(context, distance): assert", "\"{string}\"') def step_a_string(context, string): context.first = string @when('we compare it to itself') def", "context.second = random_str(1024) @given('two empty strings') def step_two_empty_strings(context): context.first = '' context.second =", "str :type second str :type prefix str \"\"\" context.first = first context.second =", "string @when('we compare it to itself') def step_compare_string_to_itself(context): string = context.first, context.first context.distance", "_ in range(0, size)] context.first = random_str(1024) context.second = random_str(1024) @given('two empty strings')", "second, prefix): \"\"\" :type context behave.runner.Context :type first str :type second str :type", "random from behave import given, when, then from howabout import get_levenshtein @given('two long", "assert not context.failed @given('\"{string}\" and the empty string') def step_a_string_and_the_emtpy_string(context, string): context.first =", "strings') def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str = lambda size: [random.choice(alphabet) for _", "def step_compare_string_to_itself(context): string = context.first, context.first context.distance = get_levenshtein(string, string) @then('the distance is", "size: [random.choice(alphabet) for _ in range(0, size)] context.first = random_str(1024) context.second = random_str(1024)", "not context.failed @given('\"{string}\" and the empty string') def step_a_string_and_the_emtpy_string(context, string): context.first = string", "the second string \"{second}\" starting with \"{prefix}\"') def step_impl2(context, first, second, prefix): \"\"\"", "howabout import get_levenshtein @given('two long strings') def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str =", "@then('the distance is {distance:d}') def step_assert_distance(context, distance): assert context.distance == distance @given('the first", "context.second) @then('the interpreter should not overflow') def step_assert_no_overflow(context): assert not context.failed @given('\"{string}\" and", "step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second) @then('the interpreter should not overflow') def step_assert_no_overflow(context): assert", "step_assert_no_overflow(context): assert not context.failed @given('\"{string}\" and the empty string') def step_a_string_and_the_emtpy_string(context, string): context.first", "= string context.second = '' @given('a string \"{string}\"') def step_a_string(context, string): context.first =", "context.first = random_str(1024) context.second = random_str(1024) @given('two empty strings') def step_two_empty_strings(context): context.first =", "= random_str(1024) @given('two empty strings') def step_two_empty_strings(context): context.first = '' context.second = ''", "string = context.first, context.first context.distance = get_levenshtein(string, string) @then('the distance is {distance:d}') def", "@given('a string \"{string}\"') def step_a_string(context, string): context.first = string @when('we compare it to", "long strings') def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str = lambda size: [random.choice(alphabet) for", "\"{second}\" starting with \"{prefix}\"') def step_impl2(context, first, second, prefix): \"\"\" :type context behave.runner.Context", "import random from behave import given, when, then from howabout import get_levenshtein @given('two", "should not overflow') def step_assert_no_overflow(context): assert not context.failed @given('\"{string}\" and the empty string')", "step_a_string_and_the_emtpy_string(context, string): context.first = string context.second = '' @given('a string \"{string}\"') def step_a_string(context,", "@then('the interpreter should not overflow') def step_assert_no_overflow(context): assert not context.failed @given('\"{string}\" and the", "context.distance == distance @given('the first string \"{first}\" and the second string \"{second}\" starting", "get_levenshtein(context.first, context.second) @then('the interpreter should not overflow') def step_assert_no_overflow(context): assert not context.failed @given('\"{string}\"", "get_levenshtein @given('two long strings') def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str = lambda size:", "string \"{first}\" and the second string \"{second}\" starting with \"{prefix}\"') def step_impl2(context, first,", "@given('\"{string}\" and the empty string') def step_a_string_and_the_emtpy_string(context, string): context.first = string context.second =", "them') def step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second) @then('the interpreter should not overflow') def", "context.first = string context.second = '' @given('a string \"{string}\"') def step_a_string(context, string): context.first", "context.failed @given('\"{string}\" and the empty string') def step_a_string_and_the_emtpy_string(context, string): context.first = string context.second", "and the second string \"{second}\" starting with \"{prefix}\"') def step_impl2(context, first, second, prefix):", "def step_two_empty_strings(context): context.first = '' context.second = '' @when('we compare them') def step_compare_two_strings(context):", "starting with \"{prefix}\"') def step_impl2(context, first, second, prefix): \"\"\" :type context behave.runner.Context :type", "the empty string') def step_a_string_and_the_emtpy_string(context, string): context.first = string context.second = '' @given('a", "@when('we compare it to itself') def step_compare_string_to_itself(context): string = context.first, context.first context.distance =", "\"{first}\" and the second string \"{second}\" starting with \"{prefix}\"') def step_impl2(context, first, second,", "prefix): \"\"\" :type context behave.runner.Context :type first str :type second str :type prefix", "random_str(1024) @given('two empty strings') def step_two_empty_strings(context): context.first = '' context.second = '' @when('we", "step_a_string(context, string): context.first = string @when('we compare it to itself') def step_compare_string_to_itself(context): string", "distance @given('the first string \"{first}\" and the second string \"{second}\" starting with \"{prefix}\"')", "behave import given, when, then from howabout import get_levenshtein @given('two long strings') def", "context.second = '' @when('we compare them') def step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second) @then('the", "def step_assert_distance(context, distance): assert context.distance == distance @given('the first string \"{first}\" and the", ":type second str :type prefix str \"\"\" context.first = first context.second = second", "second string \"{second}\" starting with \"{prefix}\"') def step_impl2(context, first, second, prefix): \"\"\" :type", "def step_assert_no_overflow(context): assert not context.failed @given('\"{string}\" and the empty string') def step_a_string_and_the_emtpy_string(context, string):", "= random_str(1024) context.second = random_str(1024) @given('two empty strings') def step_two_empty_strings(context): context.first = ''", "empty strings') def step_two_empty_strings(context): context.first = '' context.second = '' @when('we compare them')", "alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str = lambda size: [random.choice(alphabet) for _ in range(0, size)]", "= get_levenshtein(context.first, context.second) @then('the interpreter should not overflow') def step_assert_no_overflow(context): assert not context.failed", "= context.first, context.first context.distance = get_levenshtein(string, string) @then('the distance is {distance:d}') def step_assert_distance(context,", "first, second, prefix): \"\"\" :type context behave.runner.Context :type first str :type second str", "not overflow') def step_assert_no_overflow(context): assert not context.failed @given('\"{string}\" and the empty string') def", "import given, when, then from howabout import get_levenshtein @given('two long strings') def step_two_long_strings(context):", "def step_impl2(context, first, second, prefix): \"\"\" :type context behave.runner.Context :type first str :type", "from behave import given, when, then from howabout import get_levenshtein @given('two long strings')", "string') def step_a_string_and_the_emtpy_string(context, string): context.first = string context.second = '' @given('a string \"{string}\"')", "@when('we compare them') def step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second) @then('the interpreter should not", "string context.second = '' @given('a string \"{string}\"') def step_a_string(context, string): context.first = string", "context.second = '' @given('a string \"{string}\"') def step_a_string(context, string): context.first = string @when('we", "= '' context.second = '' @when('we compare them') def step_compare_two_strings(context): context.distance = get_levenshtein(context.first,", "import get_levenshtein @given('two long strings') def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str = lambda", "to itself') def step_compare_string_to_itself(context): string = context.first, context.first context.distance = get_levenshtein(string, string) @then('the", "context behave.runner.Context :type first str :type second str :type prefix str \"\"\" context.first", "== distance @given('the first string \"{first}\" and the second string \"{second}\" starting with", "= '' @given('a string \"{string}\"') def step_a_string(context, string): context.first = string @when('we compare", "@given('the first string \"{first}\" and the second string \"{second}\" starting with \"{prefix}\"') def", ":type context behave.runner.Context :type first str :type second str :type prefix str \"\"\"", "'' @given('a string \"{string}\"') def step_a_string(context, string): context.first = string @when('we compare it", "get_levenshtein(string, string) @then('the distance is {distance:d}') def step_assert_distance(context, distance): assert context.distance == distance", "assert context.distance == distance @given('the first string \"{first}\" and the second string \"{second}\"", "step_impl2(context, first, second, prefix): \"\"\" :type context behave.runner.Context :type first str :type second", "= get_levenshtein(string, string) @then('the distance is {distance:d}') def step_assert_distance(context, distance): assert context.distance ==", "lambda size: [random.choice(alphabet) for _ in range(0, size)] context.first = random_str(1024) context.second =", "given, when, then from howabout import get_levenshtein @given('two long strings') def step_two_long_strings(context): alphabet", "@given('two empty strings') def step_two_empty_strings(context): context.first = '' context.second = '' @when('we compare", "def step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second) @then('the interpreter should not overflow') def step_assert_no_overflow(context):", "compare it to itself') def step_compare_string_to_itself(context): string = context.first, context.first context.distance = get_levenshtein(string,", "itself') def step_compare_string_to_itself(context): string = context.first, context.first context.distance = get_levenshtein(string, string) @then('the distance", "@given('two long strings') def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str = lambda size: [random.choice(alphabet)", "step_two_empty_strings(context): context.first = '' context.second = '' @when('we compare them') def step_compare_two_strings(context): context.distance", "context.distance = get_levenshtein(context.first, context.second) @then('the interpreter should not overflow') def step_assert_no_overflow(context): assert not", "size)] context.first = random_str(1024) context.second = random_str(1024) @given('two empty strings') def step_two_empty_strings(context): context.first", "def step_a_string_and_the_emtpy_string(context, string): context.first = string context.second = '' @given('a string \"{string}\"') def", "range(0, size)] context.first = random_str(1024) context.second = random_str(1024) @given('two empty strings') def step_two_empty_strings(context):", "'' context.second = '' @when('we compare them') def step_compare_two_strings(context): context.distance = get_levenshtein(context.first, context.second)", "context.first = string @when('we compare it to itself') def step_compare_string_to_itself(context): string = context.first,", "behave.runner.Context :type first str :type second str :type prefix str \"\"\" context.first =", "def step_a_string(context, string): context.first = string @when('we compare it to itself') def step_compare_string_to_itself(context):", "context.first, context.first context.distance = get_levenshtein(string, string) @then('the distance is {distance:d}') def step_assert_distance(context, distance):", "step_compare_string_to_itself(context): string = context.first, context.first context.distance = get_levenshtein(string, string) @then('the distance is {distance:d}')", "string \"{second}\" starting with \"{prefix}\"') def step_impl2(context, first, second, prefix): \"\"\" :type context", "from howabout import get_levenshtein @given('two long strings') def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str", "interpreter should not overflow') def step_assert_no_overflow(context): assert not context.failed @given('\"{string}\" and the empty", "distance is {distance:d}') def step_assert_distance(context, distance): assert context.distance == distance @given('the first string", "{distance:d}') def step_assert_distance(context, distance): assert context.distance == distance @given('the first string \"{first}\" and", "strings') def step_two_empty_strings(context): context.first = '' context.second = '' @when('we compare them') def", "\"{prefix}\"') def step_impl2(context, first, second, prefix): \"\"\" :type context behave.runner.Context :type first str", "step_assert_distance(context, distance): assert context.distance == distance @given('the first string \"{first}\" and the second", "first str :type second str :type prefix str \"\"\" context.first = first context.second", "random_str(1024) context.second = random_str(1024) @given('two empty strings') def step_two_empty_strings(context): context.first = '' context.second", "= lambda size: [random.choice(alphabet) for _ in range(0, size)] context.first = random_str(1024) context.second", "is {distance:d}') def step_assert_distance(context, distance): assert context.distance == distance @given('the first string \"{first}\"", "def step_two_long_strings(context): alphabet = 'abcdefghijklmnopqrstuvwxyz' random_str = lambda size: [random.choice(alphabet) for _ in", "context.first = '' context.second = '' @when('we compare them') def step_compare_two_strings(context): context.distance =", "random_str = lambda size: [random.choice(alphabet) for _ in range(0, size)] context.first = random_str(1024)", "'abcdefghijklmnopqrstuvwxyz' random_str = lambda size: [random.choice(alphabet) for _ in range(0, size)] context.first =" ]
[ "### DO NOT CHANGE THIS FILE ### ### The code is auto generated,", "if status_code is not None: self.status_code = status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401)", "code, message=None, errors=None, status_code=None): super().__init__(message) self.error_code = code self.message = message self.errors =", "UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException): pass @add_status_code(403) class Forbidden(JSONException): pass @add_status_code(500) class ServerError(JSONException):", "### ### DO NOT CHANGE THIS FILE ### ### The code is auto", "def add_status_code(code): \"\"\" Decorator used for adding exceptions to _sanic_exceptions. \"\"\" def class_decorator(cls):", "import SanicException def add_status_code(code): \"\"\" Decorator used for adding exceptions to _sanic_exceptions. \"\"\"", "code self.message = message self.errors = errors if status_code is not None: self.status_code", "not None: self.status_code = status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException): pass", "### from sanic.exceptions import SanicException def add_status_code(code): \"\"\" Decorator used for adding exceptions", "= errors if status_code is not None: self.status_code = status_code @add_status_code(422) class UnprocessableEntity(JSONException):", "status_code=None): super().__init__(message) self.error_code = code self.message = message self.errors = errors if status_code", "None: self.status_code = status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException): pass @add_status_code(403)", "errors if status_code is not None: self.status_code = status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass", "DO NOT CHANGE THIS FILE ### ### The code is auto generated, your", "to _sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code = code return cls return class_decorator class", "code return cls return class_decorator class JSONException(SanicException): def __init__(self, code, message=None, errors=None, status_code=None):", "generating. ### from sanic.exceptions import SanicException def add_status_code(code): \"\"\" Decorator used for adding", "be overwritten by ### code generating. ### from sanic.exceptions import SanicException def add_status_code(code):", "for adding exceptions to _sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code = code return cls", "class UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException): pass @add_status_code(403) class Forbidden(JSONException): pass @add_status_code(500) class", "return class_decorator class JSONException(SanicException): def __init__(self, code, message=None, errors=None, status_code=None): super().__init__(message) self.error_code =", "self.message = message self.errors = errors if status_code is not None: self.status_code =", "FILE ### ### The code is auto generated, your change will be overwritten", "pass @add_status_code(401) class Unauthorized(JSONException): pass @add_status_code(403) class Forbidden(JSONException): pass @add_status_code(500) class ServerError(JSONException): pass", "generated, your change will be overwritten by ### code generating. ### from sanic.exceptions", "Decorator used for adding exceptions to _sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code = code", "auto generated, your change will be overwritten by ### code generating. ### from", "-*- coding: utf-8 -*- ### ### DO NOT CHANGE THIS FILE ### ###", "def class_decorator(cls): cls.status_code = code return cls return class_decorator class JSONException(SanicException): def __init__(self,", "### code generating. ### from sanic.exceptions import SanicException def add_status_code(code): \"\"\" Decorator used", "status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException): pass @add_status_code(403) class Forbidden(JSONException): pass", "from sanic.exceptions import SanicException def add_status_code(code): \"\"\" Decorator used for adding exceptions to", "THIS FILE ### ### The code is auto generated, your change will be", "class_decorator class JSONException(SanicException): def __init__(self, code, message=None, errors=None, status_code=None): super().__init__(message) self.error_code = code", "def __init__(self, code, message=None, errors=None, status_code=None): super().__init__(message) self.error_code = code self.message = message", "### The code is auto generated, your change will be overwritten by ###", "change will be overwritten by ### code generating. ### from sanic.exceptions import SanicException", "errors=None, status_code=None): super().__init__(message) self.error_code = code self.message = message self.errors = errors if", "add_status_code(code): \"\"\" Decorator used for adding exceptions to _sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code", "overwritten by ### code generating. ### from sanic.exceptions import SanicException def add_status_code(code): \"\"\"", "used for adding exceptions to _sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code = code return", "### ### The code is auto generated, your change will be overwritten by", "self.status_code = status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException): pass @add_status_code(403) class", "\"\"\" def class_decorator(cls): cls.status_code = code return cls return class_decorator class JSONException(SanicException): def", "code generating. ### from sanic.exceptions import SanicException def add_status_code(code): \"\"\" Decorator used for", "@add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException): pass @add_status_code(403) class Forbidden(JSONException): pass @add_status_code(500)", "NOT CHANGE THIS FILE ### ### The code is auto generated, your change", "adding exceptions to _sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code = code return cls return", "__init__(self, code, message=None, errors=None, status_code=None): super().__init__(message) self.error_code = code self.message = message self.errors", "will be overwritten by ### code generating. ### from sanic.exceptions import SanicException def", "= status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException): pass @add_status_code(403) class Forbidden(JSONException):", "class JSONException(SanicException): def __init__(self, code, message=None, errors=None, status_code=None): super().__init__(message) self.error_code = code self.message", "# -*- coding: utf-8 -*- ### ### DO NOT CHANGE THIS FILE ###", "JSONException(SanicException): def __init__(self, code, message=None, errors=None, status_code=None): super().__init__(message) self.error_code = code self.message =", "sanic.exceptions import SanicException def add_status_code(code): \"\"\" Decorator used for adding exceptions to _sanic_exceptions.", "coding: utf-8 -*- ### ### DO NOT CHANGE THIS FILE ### ### The", "= code return cls return class_decorator class JSONException(SanicException): def __init__(self, code, message=None, errors=None,", "self.error_code = code self.message = message self.errors = errors if status_code is not", "by ### code generating. ### from sanic.exceptions import SanicException def add_status_code(code): \"\"\" Decorator", "class_decorator(cls): cls.status_code = code return cls return class_decorator class JSONException(SanicException): def __init__(self, code,", "<reponame>gusibi/Metis # -*- coding: utf-8 -*- ### ### DO NOT CHANGE THIS FILE", "\"\"\" Decorator used for adding exceptions to _sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code =", "CHANGE THIS FILE ### ### The code is auto generated, your change will", "message self.errors = errors if status_code is not None: self.status_code = status_code @add_status_code(422)", "is auto generated, your change will be overwritten by ### code generating. ###", "cls return class_decorator class JSONException(SanicException): def __init__(self, code, message=None, errors=None, status_code=None): super().__init__(message) self.error_code", "super().__init__(message) self.error_code = code self.message = message self.errors = errors if status_code is", "-*- ### ### DO NOT CHANGE THIS FILE ### ### The code is", "cls.status_code = code return cls return class_decorator class JSONException(SanicException): def __init__(self, code, message=None,", "status_code is not None: self.status_code = status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401) class", "= message self.errors = errors if status_code is not None: self.status_code = status_code", "SanicException def add_status_code(code): \"\"\" Decorator used for adding exceptions to _sanic_exceptions. \"\"\" def", "self.errors = errors if status_code is not None: self.status_code = status_code @add_status_code(422) class", "The code is auto generated, your change will be overwritten by ### code", "_sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code = code return cls return class_decorator class JSONException(SanicException):", "message=None, errors=None, status_code=None): super().__init__(message) self.error_code = code self.message = message self.errors = errors", "return cls return class_decorator class JSONException(SanicException): def __init__(self, code, message=None, errors=None, status_code=None): super().__init__(message)", "your change will be overwritten by ### code generating. ### from sanic.exceptions import", "exceptions to _sanic_exceptions. \"\"\" def class_decorator(cls): cls.status_code = code return cls return class_decorator", "code is auto generated, your change will be overwritten by ### code generating.", "is not None: self.status_code = status_code @add_status_code(422) class UnprocessableEntity(JSONException): pass @add_status_code(401) class Unauthorized(JSONException):", "= code self.message = message self.errors = errors if status_code is not None:", "utf-8 -*- ### ### DO NOT CHANGE THIS FILE ### ### The code" ]
[ "``(3, 3, 3, 3)`` .. note:: The three angles (``alpha``, ``beta``, ``gam``) correspond", "= a0**2 b0 = b0**2 if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif", "(np.ndarray): Elastic tensor with shape ``(3, 3, 3, 3)`` alpha (float): Angle in", "pl)*np.pi/180. # Get tensor with horizontal axis # Minerals if typ=='atg': C, rho", "not implemented') return # Convert Voigt to full tensor cc = voigt2cc(C)*1.e9/rho #", "def voigt2cc(C): \"\"\" Convert the Voigt representation of the stiffness matrix to the", "if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si':", "4.*S[0,1] - 4.*S[0,2] - \\ 4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5]) #", "6)``) Returns: (np.ndarray): cc: Elastic tensor (shape ``(3, 3, 3, 3)``) \"\"\" C", "\\ np.cos(gam)*np.cos(alpha) # # c_ijkl ---> c_mnrs # for m in range(3): for", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream tfs = Stream(traces=[tfr, tft]) # Return stream", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data =", "trV = trR.copy() trH = trT.copy() # Vertical slownesses qp = np.sqrt(1/vp/vp -", "(obspy.stream,): Stream 2 pws (bool, optional): Enables Phase-Weighted Stacking Returns: (tuple): tuple containing:", "sequence of the rotation is important: (AB ~= BA). In this case we", "populate lists try: open(modfile) except: raise(Exception('model file cannot be opened: ',modfile)) with open(modfile)", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data", "as np >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> # Define two-layer", "= np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi =", "Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\" Calculates the isotropic P and S wave velocities", "\"\"\" Checks whether or not all required global variables are set and throws", "if obs: # If OBS, then further pass OBS-related paramters to Fortran conf", "``obspy`` ``Stream`` object. trxyz = get_trxyz(yx, yy, yz) return trxyz def get_trxyz(yx, yy,", "Returns: (obspy.stream): tfs: Stream containing Radial and Transverse transfer functions \"\"\" # Extract", "wave mode. Args: trZ (obspy.trace): Vertical component trR (obspy.trace): Radial component trT (obspy.trace):", "'nt', 'slow', 'baz'\") if obs: lst = [cf.dp, cf.c, cf.rhof] check = [f", "calculation. .. note:: The ``conf`` global variables need to be set for this", "used for rotation Returns: (tuple): tuple containing: * trP (obspy.trace): Compressional (P) wave", "eta = FF/(AA - 2.*LL) # Get tensor with horizontal axis cc =", "main ``telewavesim.rmat_f.plane_****`` function. Args: fortran (book, option): Whether or not the Fortran modules", "= es.garnet() elif typ=='gln': C, rho = es.glaucophane() elif typ=='hbl': C, rho =", "np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex) #", "an Exception and stop check_cf(obs) # Pass variables to Fortran conf model2for() wave2for()", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "def set_tri_tensor(a, b, tr, pl, ani): \"\"\" Function to generate tensor for transverse", "to generate transfer functions from displacement traces. Args: trxyz (obspy.stream): Obspy ``Stream`` object", "= rho else: print('\\nFlag not defined: use either \"iso\", \"tri\" or one among\\n')", "trV.data = vec[1,:] trH.data = -trT.data/2. return trP, trV, trH def stack_all(st1, st2,", "cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1 def read_model(modfile): \"\"\" Reads model", "Number of samples dt (float): Sampling rate slow (float): Slowness value (s/km) baz", "option): Whether or not the Fortran modules are used obs (bool, optional): Whether", "es.glaucophane() elif typ=='hbl': C, rho = es.hornblende() elif typ=='jade': C, rho = es.jadeite()", "(np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (np.ndarray): cc: Elastic tensor (shape ``(3,", "'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'\") if obs: lst = [cf.dp, cf.c,", "Radial and Vertical r_z = np.array([trR.data,trZ.data]) # Rotation vec = np.dot(rot, r_z) #", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data =", "Pass variables to the `conf` module >>> # Only topmost layer is useful", "to do so, subject to the following conditions: # The above copyright notice", "of the stiffness matrix to the full 3x3x3x3 tensor representation. Args: C (np.ndarray):", "\"\"\" Function to generate tensor for transverse isotropy. The tensor is rotated using", "typ=='SP_80': C, rho = es.serpentinite_80() elif typ=='LHZ': C, rho = es.lherzolite() else: print('type", "in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum + rr*a[i,j,k,l] aa[m,n,r,s] = asum", "(km/s) b (float): S-wave velocity (km/s) Returns: (np.ndarray): cc: Elastic tensor (GPa /density)", "elif typ=='zo': C, rho = es.zoisite() # Rocks elif typ=='BS_f': C, rho =", "3, 3)``) \"\"\" C = np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float) for i, j,", "j in range(6): k, l = Voigt_notation[i] m, n = Voigt_notation[j] C[i,j] =", "tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV':", "aa = np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0]", "= trZ.copy() trV = trR.copy() trH = trT.copy() # Vertical slownesses qp =", "for tr in st1: tmp1 += tr.data hilb1 = hilbert(tr.data) phase1 = np.arctan2(hilb1.imag,", "isotropic material. Args: a (float): P-wave velocity (km/s) b (float): S-wave velocity (km/s)", "Return stream return tfs def update_stats(tr, nt, dt, slow, baz): \"\"\" Updates the", "C, rho = es.blueschist_mafic() elif typ=='EC_f': C, rho = es.eclogite_foliated() elif typ=='EC_m': C,", "cc[i, j, k, l] = C[Voigt_i, Voigt_j] return cc def cc2voigt(cc): \"\"\" Convert", "typ=='bt': C, rho = es.biotite() elif typ=='cpx': C, rho = es.clinopyroxene_92() elif typ=='dol':", "or one among\\n') print(mins,rocks) print() raise(Exception()) return def check_cf(obs=False): \"\"\" Checks whether or", "Kreuss (float): Reuss average bulk modulus (GPa) * Greuss (float): Reuss average shear", "to permit persons to whom the Software is # furnished to do so,", "stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def calc_ttime(slow): \"\"\" Calculates total propagation time", "containing: * Vp (float): P-wave velocity (m/s) * Vs (float): S-wave velocity (m/s)", "Extract P and SV components trP.data = vec[0,:] trV.data = vec[1,:] trH.data =", "(b*1.e3 + db/2.)**2 NN = (b*1.e3 - db/2.)**2 AC = (a*1.e3)**2 FF =", "vs=3.5): \"\"\" Rotates traces from `Z-R-T` orientation to `P-SV-SH` wave mode. Args: trZ", "plunge of symmetry axis tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. #", "Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv))))", "Trace, Stream from obspy.signal.rotate import rotate_ne_rt from telewavesim import conf as cf from", "= trxyz[1] ztr = trxyz[2] baz = cf.baz # Copy to radial and", "bulk modulus (GPa) * Greuss (float): Reuss average shear modulus (GPa) * Kvrh", "S-wave velocity (m/s) Example ------- >>> from telewavesim import utils >>> cc, rho", "Vertically polarized shear (SV) wave mode * trH (obspy.trace): Horizontally polarized shear (SH)", "rho else: print('\\nFlag not defined: use either \"iso\", \"tri\" or one among\\n') print(mins,rocks)", "j, k, l in itertools.product(range(3), range(3), range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j", "all other modules \"\"\" h = []; r = []; a = [];", "tensor for isotropic material. Args: a (float): P-wave velocity (km/s) b (float): S-wave", "range(6): k, l = Voigt_notation[i] m, n = Voigt_notation[j] C[i,j] = cc[k,l,m,n] return", "\"\"\" # Check if all variables are set. If not, throw an Exception", "(np.ndarray): z-component displacement seismogram Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\"", "cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data", "[(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)] tol", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum + rr*a[i,j,k,l] aa[m,n,r,s] = asum return", "C, rho = es.hornblende() elif typ=='jade': C, rho = es.jadeite() elif typ=='lws': C,", "either \"iso\", \"tri\" or one among\\n') print(mins,rocks) print() raise(Exception()) return def check_cf(obs=False): \"\"\"", "(float): Percent anisotropy Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape ``(3,", "hilbert from obspy.core import Trace, Stream from obspy.signal.rotate import rotate_ne_rt from telewavesim import", "set. If not, throw an Exception and stop check_cf(obs) # Pass variables to", "tuz]) return trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\" Function to generate transfer functions from", "C, rho = es.lizardite() elif typ=='ms': C, rho = es.muscovite() elif typ=='ol': C,", "(float): Voigt-Reuss-Hill average shear modulus (GPa) Example ------- >>> from telewavesim import utils", "mode * trH (obspy.trace): Horizontally polarized shear (SH) wave mode \"\"\" # Copy", "for ``land`` case yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement seismograms", "trxyz: Stream containing 3-component displacement seismograms \"\"\" # Get displacements in time domain", "Stiffness matrix (shape ``(6, 6)``) \"\"\" Voigt_notation = [(0, 0), (1, 1), (2,", "conf as cf from telewavesim import elast as es from telewavesim.rmat_f import conf", "for i, j, k, l in itertools.product(range(3), range(3), range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i,", "h cf.rho = r cf.isoflg = fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex)", "value (s/km) baz (float): Back-azimuth value (degree) Returns: (obspy.trace): tr: Trace with updated", "cf.c, cf.rhof] check = [f is None for f in lst] if sum(check)/len(check)>0.:", "asum return aa def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5): \"\"\" Rotates traces from", "Returns: (tuple): tuple containing: * Vp (float): P-wave velocity (m/s) * Vs (float):", "# Rotation matrix rot = np.array([[-m11, m12], [-m21, m22]]) # Vector of Radial", "weight1 = np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) # Put back into traces stack1 =", "typ=='plag': C, rho = es.plagioclase_06() elif typ=='qtz': C, rho = es.quartz() elif typ=='zo':", "def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5): \"\"\" Rotates traces from `Z-R-T` orientation to", "For trend and plunge of symmetry axis (e.g., tri_tensor): ``alpha`` = plunge ``beta``", "modulus (GPa) G (float): Shear modulus (GPa) rho (float): Density (kg/m^3) Returns: (tuple):", "# Trend and plunge of symmetry axis tr = -tr*np.pi/180. pl = (90.", "Angle in radians gam (float): Angle in radians Returns: (np.ndarray): aa: Rotated tensor", "Args: tr (obspy.trace): Trace object to update nt (int): Number of samples dt", "or cf.wvtype=='SV' or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1 def read_model(modfile):", "return stack1, stack2 def calc_ttime(slow): \"\"\" Calculates total propagation time through model. The", "Returns: (np.ndarray): aa: Rotated tensor with shape ``(3, 3, 3, 3)`` .. note::", "trT (obspy.trace): Transverse component vp (float, optional): P-wave velocity used for rotation vs", "cc = rot_tensor(cc, pl, tr, 0.) # Return tensor return cc def set_aniso_tensor(tr,", "as fileobj: for line in fileobj: if not line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3)", "= utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" # Compliance", "module. Returns: None Variables to pass are ``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx", "cf.a[:,:,:,:,j] = cc cf.rho[j] = rho else: print('\\nFlag not defined: use either \"iso\",", "\"\"\" Calculates total propagation time through model. The bottom layer is irrelevant in", "import elast as es from telewavesim.rmat_f import conf as cf_f from telewavesim.rmat_f import", "conf module: 'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'\") if obs: lst", "pvh=False): \"\"\" Function to generate transfer functions from displacement traces. Args: trxyz (obspy.stream):", "for tr in st2: tmp2 += tr.data hilb2 = hilbert(tr.data) phase2 = np.arctan2(hilb2.imag,", "cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j] = rho else: print('\\nFlag not", "up matrix elements AA = (a*1.e3 - da/2.)**2 CC = (a*1.e3 + da/2.)**2", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "i in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if", "for i in range(6): for j in range(6): k, l = Voigt_notation[i] m,", "trxyz: Stream containing 3-component displacement seismograms \"\"\" # Check if all variables are", "1)] tol = 1e-3 cc = np.asarray(cc) C = np.zeros((6,6)) for i in", "3)`` .. note:: The three angles (``alpha``, ``beta``, ``gam``) correspond to rotation about", "1./(S[0,0] + S[1,1] + S[2,2] + 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2]) Greuss =", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "3, 3, 3)``) * rho (float): Density (kg/m^3) \"\"\" # Trend and plunge", "rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5): \"\"\" Rotates traces from `Z-R-T` orientation to `P-SV-SH`", "C, rho = es.jadeite() elif typ=='lws': C, rho = es.lawsonite() elif typ=='lz': C,", "line in fileobj: if not line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3]))", "= set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif fl[j] in mins or fl[j] in rocks:", "(float): Angle in radians Returns: (np.ndarray): aa: Rotated tensor with shape ``(3, 3,", "Voigt averaging Kvoigt = (C[0,0] + C[1,1] + C[2,2] + 2.*C[0,1] + 2.*C[0,2]", "displacement seismograms into ``obspy`` ``Trace`` obsjects and then an ``obspy`` ``Stream`` object. Args:", "vs=vs) tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft = trH.copy(); tft.data = np.zeros(len(tft.data))", "module and return 3-component seismograms as an ``obspy`` ``Stream`` object. .. note:: The", "np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex)", "Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation of the tensor cc (c_ijkl)", "set and throws an Exception if not. Args: obs (bool, optional): Whether the", "cf_f.dt = cf.dt cf_f.slow = cf.slow cf_f.baz = cf.baz def obs2for(): \"\"\" Passes", "(str, optional): Type of elastic material Returns: (tuple): Tuple containing: * cc (np.ndarray):", ">>> conf.thickn[0] = 10. >>> conf.wvtype = 'P' >>> slow = 0.06 #", "ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer function tfr.data =", "rotation matrix m11 = cf.slow*vs*vs/vp m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1", "``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp = cf.dp cf_f.c = cf.c cf_f.rhof = cf.rhof", "(obspy.trace): Stacked trace for Stream 2 \"\"\" print() print('Stacking ALL traces in streams')", "= np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces tux", "slow (float): Slowness value (s/km) baz (float): Back-azimuth value (degree) Returns: (obspy.trace): tr:", "ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer function tfr.data =", "st2 (obspy.stream,): Stream 2 pws (bool, optional): Enables Phase-Weighted Stacking Returns: (tuple): tuple", "trV (obspy.trace): Vertically polarized shear (SV) wave mode * trH (obspy.trace): Horizontally polarized", "for an OBS case or not. :raises ExceptionError: Throws ExceptionError if not all", "= np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) # Put back into", "for n in range(3): for r in range(3): for s in range(3): asum=0.0", "typ=='HB': C, rho = es.harzburgite() elif typ=='SP_37': C, rho = es.serpentinite_37() elif typ=='SP_80':", "(shape ``(6, 6)``) Returns: (np.ndarray): cc: Elastic tensor (shape ``(3, 3, 3, 3)``)", "= es.plagioclase_06() elif typ=='qtz': C, rho = es.quartz() elif typ=='zo': C, rho =", "= np.zeros((nlaymx), dtype='int') for i in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i]", "global wavefield variables to Fortran ``conf`` module. Returns: None Variables to pass are", "i in range(cf.nlay-1): if cf.isoflg[i] == 'iso': a0 = cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i]", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "cf.baz] check = [f is None for f in lst] if sum(check)/len(check)>0.: raise", "------- >>> from telewavesim import conf >>> from telewavesim import utils >>> import", "cf.slow, cf.baz] check = [f is None for f in lst] if sum(check)/len(check)>0.:", "ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer", "* rho (float): Density (kg/m^3) \"\"\" # Trend and plunge of symmetry axis", "cf.rho[j] = rho else: print('\\nFlag not defined: use either \"iso\", \"tri\" or one", "vp=vp, vs=vs) tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft = trH.copy(); tft.data =", "cf.nt, cf.dt, cf.slow, cf.baz) tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz) # Append", "np.sqrt(G/rho) return Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation of the tensor", "elif fl[j] in mins or fl[j] in rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j]", "'dt', 'nt', 'slow', 'baz'\") if obs: lst = [cf.dp, cf.c, cf.rhof] check =", "c_mnrs # for m in range(3): for n in range(3): for r in", "axis (degree) ani (float): Percent anisotropy Returns: (np.ndarray): cc: Elastic tensor (GPa /density)", "Gvrh def mod2vel(K,G,rho): \"\"\" Calculates the isotropic P and S wave velocities from", "Only topmost layer is useful for travel time calculation >>> conf.isoflg = ['atg']", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "matrix S = np.linalg.inv(C) # Voigt averaging Kvoigt = (C[0,0] + C[1,1] +", "(G) moduli and density (rho) in kg/m^3 Args: K (float): Bulk modulus (GPa)", "software and associated documentation files (the \"Software\"), to deal # in the Software", "l] = C[Voigt_i, Voigt_j] return cc def cc2voigt(cc): \"\"\" Convert from the full", "np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': #", "np.zeros(len(tfr.data)) tft = trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data)", "in range(6): for j in range(6): k, l = Voigt_notation[i] m, n =", "Reuss average bulk modulus (GPa) * Greuss (float): Reuss average shear modulus (GPa)", "cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif fl[j] in mins or fl[j] in", "str_stats = st1[0].stats # Initialize arrays tmp1 = np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1", "file from the function ``utils.read_model(modfile)``, and setting the variable ``conf.wvtype`` Args: slow (float):", "conditions: # The above copyright notice and this permission notice shall be included", "cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho) a0 = a0**2 b0 =", "obs (bool, optional): Whether the analysis is done for an OBS case or", "etr.copy() # Rotate to radial and transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz)", "Fortran ``conf`` module. Returns: None Variables to pass are ``a``, ``rho``, ``thickn``, ``isoflg``", "and to permit persons to whom the Software is # furnished to do", "module ``conf``. Returns: None: Parameters are now global variables shared between all other", "28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" # Compliance matrix S = np.linalg.inv(C) # Voigt averaging", "tensor for anisotropic minerals. The \\ tensor is rotated using the trend and", "= b0**2 if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV'", "the Software, and to permit persons to whom the Software is # furnished", "of symmetry axis (degree) pl (float): Plunge angle of symmetry axis (degree) ani", "hilb1 = hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1) for tr in", "Phase-Weighted Stacking Returns: (tuple): tuple containing: * stack1 (obspy.trace): Stacked trace for Stream", ">>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho)", "= line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration", "* Vp (float): P-wave velocity (m/s) * Vs (float): S-wave velocity (m/s) Example", ".. note:: The ``conf`` global variables need to be set for this calculation", "\\ 4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh =", "CC = (a*1.e3 + da/2.)**2 LL = (b*1.e3 + db/2.)**2 NN = (b*1.e3", "= (b*1.e3 - db/2.)**2 AC = (a*1.e3)**2 FF = -LL + np.sqrt((2.*AC)**2 -", "- cf.slow*cf.slow) # Elements of rotation matrix m11 = cf.slow*vs*vs/vp m12 = -(1", "Example ------- >>> from telewavesim import utils >>> cc, rho = utils.set_aniso_tensor(0., 0.,", ">>> # Define two-layer model model with identical material >>> conf.nlay = 2", "np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces tux =", "rho = es.garnet() elif typ=='gln': C, rho = es.glaucophane() elif typ=='hbl': C, rho", "case. Set all of the following variables through the conf module: 'dp', 'c',", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "the main ``telewavesim.rmat_f.plane_****`` function. Args: fortran (book, option): Whether or not the Fortran", "= C[Voigt_i, Voigt_j] return cc def cc2voigt(cc): \"\"\" Convert from the full 3x3x3x3", "(90. - pl)*np.pi/180. # Percent anisotropy da = (a*1.e3)*ani/100. db = (b*1.e3)*ani/100. #", "cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def wave2for(): \"\"\" Passes global wavefield variables to Fortran", "cf.slow, cf.baz) # Append to stream trxyz = Stream(traces=[tux, tuy, tuz]) return trxyz", "return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\" Calculates the isotropic", "weight1 = weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else:", "(bool, optional): Whether to rotate from Z-R-T coordinate system to P-SV-SH wave mode", "average shear modulus (GPa) * Kreuss (float): Reuss average bulk modulus (GPa) *", "case yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement seismograms to an", "transform of seismograms for ``obs``case yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: #", "the stiffness matrix to the full 3x3x3x3 tensor representation. Args: C (np.ndarray): Stiffness", "(AB ~= BA). In this case we rotate about x_2 first, x_3 second", "ttr.data = rotate_ne_rt(ntr.data, etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data) if", "rho = es.harzburgite() elif typ=='SP_37': C, rho = es.serpentinite_37() elif typ=='SP_80': C, rho", "shear (SH) wave mode \"\"\" # Copy traces trP = trZ.copy() trV =", "\"\"\" Stacks all traces in two ``Stream`` objects. Args: st1 (obspy.stream): Stream 1", "object in cartesian coordinate system pvh (bool, optional): Whether to rotate from Z-R-T", "elastic material Returns: (tuple): Tuple containing: * cc (np.ndarray): Elastic tensor (GPa /density)\\", "None Variables to pass are ``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx = cf_f.nlaymx", "= Voigt_notation[j] C[i,j] = cc[k,l,m,n] return C def VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill", "to an ``obspy`` ``Stream`` object. trxyz = get_trxyz(yx, yy, yz) return trxyz def", "= cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G", "x_1 axes. Note that the sequence of the rotation is important: (AB ~=", "symmetry axis (degree) type (str, optional): Type of elastic material Returns: (tuple): Tuple", "''' import sys import itertools import numpy as np import pyfftw from scipy.signal", "3, 3, 3)``) \"\"\" a = a*1.e3 b = b*1.e3 C = es.iso_tensor(a,", "cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> K, G", "def get_trxyz(yx, yy, yz): \"\"\" Function to store displacement seismograms into ``obspy`` ``Trace``", "transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data,", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "[]; tr = []; pl = [] # Read file line by line", "mode Returns: (obspy.stream): tfs: Stream containing Radial and Transverse transfer functions \"\"\" #", "topmost layer is useful for travel time calculation >>> conf.isoflg = ['atg'] >>>", "moduli and density (rho) in kg/m^3 Args: K (float): Bulk modulus (GPa) G", "to full tensor cc = voigt2cc(C) return cc def set_tri_tensor(a, b, tr, pl,", "pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH = rotate_zrt_pvh(ztr, rtr,", "= -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces tux = Trace(data=ux) tuy = Trace(data=uy) tuz", "'opx', 'plag', 'qtz', 'zo'] rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37',", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "None: Parameters are now global variables shared between all other modules \"\"\" h", "tensor (GPa /density)\\ (shape ``(3, 3, 3, 3)``) * rho (float): Density (kg/m^3)", "es.serpentinite_80() elif typ=='LHZ': C, rho = es.lherzolite() else: print('type of mineral/rock not implemented')", "def mod2vel(K,G,rho): \"\"\" Calculates the isotropic P and S wave velocities from given", "IN THE # SOFTWARE. ''' Utility functions to interact with ``telewavesim`` modules. '''", "S[1,1] + S[2,2] + 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2]) Greuss = 15./(4.*S[0,0] +", "= np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int') for", "typ=='qtz': C, rho = es.quartz() elif typ=='zo': C, rho = es.zoisite() # Rocks", "telewavesim.rmat_f import conf as cf_f from telewavesim.rmat_f import plane as pw_f def set_iso_tensor(a,", "set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function to generate tensor for anisotropic minerals. The \\", "for Stream 2 \"\"\" print() print('Stacking ALL traces in streams') # Copy stats", "C, rho = es.antigorite() elif typ=='bt': C, rho = es.biotite() elif typ=='cpx': C,", "= cf.slow cf_f.baz = cf.baz def obs2for(): \"\"\" Passes global OBS-related variables to", "np import pyfftw from scipy.signal import hilbert from obspy.core import Trace, Stream from", "in st1: tmp1 += tr.data hilb1 = hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1", "(tuple): tuple containing: * Vp (float): P-wave velocity (m/s) * Vs (float): S-wave", "third. For trend and plunge of symmetry axis (e.g., tri_tensor): ``alpha`` = plunge", "to deal # in the Software without restriction, including without limitation the rights", "to any person obtaining a copy # of this software and associated documentation", "yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get the Fourier transform of", "rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp = np.sqrt((K + 4.*G/3.)/rho) Vs = np.sqrt(G/rho) return", "wave mode * trV (obspy.trace): Vertically polarized shear (SV) wave mode * trH", "es.blueschist_felsic() elif typ=='BS_m': C, rho = es.blueschist_mafic() elif typ=='EC_f': C, rho = es.eclogite_foliated()", "tmp2 = np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex) # Stack", "(s/km) baz (float): Back-azimuth value (degree) Returns: (obspy.trace): tr: Trace with updated stats", "r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration parameters cf.nlay =", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "= np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs) tfr =", "stack2 def calc_ttime(slow): \"\"\" Calculates total propagation time through model. The bottom layer", "C[i,j] = cc[k,l,m,n] return C def VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill average of", "value (s/km) Returns: (float): t1: Time in seconds Example ------- >>> from telewavesim", "st2: tmp2 += tr.data hilb2 = hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2 +=", "average Kvrh = (Kvoigt + Kreuss)/2. Gvrh = (Gvoigt + Greuss)/2. return Kvoigt,", "= trxyz[2] baz = cf.baz # Copy to radial and transverse rtr =", "Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def calc_ttime(slow): \"\"\" Calculates total propagation time through model.", "= np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) # Put back into traces stack1 = Trace(data=weight1*tmp1,header=str_stats)", "conf model2for() wave2for() # Run the ``plane`` module depending on land or OBS", "typ=='jade': C, rho = es.jadeite() elif typ=='lws': C, rho = es.lawsonite() elif typ=='lz':", "= es.tri_tensor(AA, CC, FF, LL, NN) # Rotate tensor using trend and plunge", "or not. :raises ExceptionError: Throws ExceptionError if not all variables are set. \"\"\"", "es.iso_tensor(a, b) # Convert Voigt to full tensor cc = voigt2cc(C) return cc", "Stacks all traces in two ``Stream`` objects. Args: st1 (obspy.stream): Stream 1 st2", "velocity (m/s) * Vs (float): S-wave velocity (m/s) Example ------- >>> from telewavesim", "all set before executing the main ``telewavesim.rmat_f.plane_****`` function. Args: fortran (book, option): Whether", "in radians gam (float): Angle in radians Returns: (np.ndarray): aa: Rotated tensor with", "cc2voigt(cc): \"\"\" Convert from the full 3x3x3x3 tensor representation to the Voigt notation", "cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif", "a (float): P-wave velocity (km/s) b (float): S-wave velocity (km/s) Returns: (np.ndarray): cc:", "/density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" # Trend and plunge of", "yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement seismograms to an ``obspy`` ``Stream`` object.", "throw an Exception and stop check_cf(obs) # Pass variables to Fortran conf model2for()", "range(3): asum=0.0 for i in range(3): for j in range(3): for k in", "the Software. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "= st1[0].stats # Initialize arrays tmp1 = np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1 =", "= pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz))))", "cf.slow*cf.slow) # Elements of rotation matrix m11 = cf.slow*vs*vs/vp m12 = -(1 -", "this software and associated documentation files (the \"Software\"), to deal # in the", "-trT.data/2. return trP, trV, trH def stack_all(st1, st2, pws=False): \"\"\" Stacks all traces", "trace for Stream 1 * stack2 (obspy.trace): Stacked trace for Stream 2 \"\"\"", "utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658,", "variables are set and throws an Exception if not. Args: obs (bool, optional):", "if sum(check)/len(check)>0.: raise Exception(\"global variables not all set. Set all of the following", "(float): Slowness value (s/km) baz (float): Back-azimuth value (degree) Returns: (obspy.trace): tr: Trace", "= np.dot(rot, r_z) # Extract P and SV components trP.data = vec[0,:] trV.data", "= pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer function", "itertools import numpy as np import pyfftw from scipy.signal import hilbert from obspy.core", "cc elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif fl[j] in mins", "granted, free of charge, to any person obtaining a copy # of this", "trH = trT.copy() # Vertical slownesses qp = np.sqrt(1/vp/vp - cf.slow*cf.slow) qs =", "three angles (``alpha``, ``beta``, ``gam``) correspond to rotation about the x_2, x_3, x_1", "1 * stack2 (obspy.trace): Stacked trace for Stream 2 \"\"\" print() print('Stacking ALL", "tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV':", "'SP_80'] for j in range(cf.nlay): if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc", "``conf`` global variables need to be set for this calculation to succeed. This", "displacement traces. Args: trxyz (obspy.stream): Obspy ``Stream`` object in cartesian coordinate system pvh", "Variables to pass are ``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx = cf_f.nlaymx cf_f.a", "db = (b*1.e3)*ani/100. # Set up matrix elements AA = (a*1.e3 - da/2.)**2", "cc def set_tri_tensor(a, b, tr, pl, ani): \"\"\" Function to generate tensor for", "cc = np.zeros((3,3,3,3), dtype=float) for i, j, k, l in itertools.product(range(3), range(3), range(3),", "= np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay)) >>> # Pass variables to the `conf`", "module. Returns: None Variables to pass are ``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp =", "np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data)", "and this permission notice shall be included in all # copies or substantial", "C, rho = es.olivine() elif typ=='opx': C, rho = es.orthopyroxene() elif typ=='plag': C,", "bulk modulus K and the shear modulus G. Args: C (np.ndarray): Stiffness matrix", "traces. Args: trxyz (obspy.stream): Obspy ``Stream`` object in cartesian coordinate system pvh (bool,", "# Permission is hereby granted, free of charge, to any person obtaining a", "# Only topmost layer is useful for travel time calculation >>> conf.isoflg =", "or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1 def read_model(modfile): \"\"\" Reads", "pl = [] # Read file line by line and populate lists try:", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "= voigt2cc(C) return cc def set_tri_tensor(a, b, tr, pl, ani): \"\"\" Function to", "Bulk modulus (GPa) G (float): Shear modulus (GPa) rho (float): Density (kg/m^3) Returns:", "(float): P-wave velocity (m/s) * Vs (float): S-wave velocity (m/s) Example ------- >>>", "tmp1 = tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) # Phase-weighting if pws: weight1 = weight1/np.float(len(st1))", "def model2for(): \"\"\" Passes global model variables to Fortran ``conf`` module. Returns: None", "= Stream(traces=[tux, tuy, tuz]) return trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\" Function to generate", "Shear modulus (GPa) rho (float): Density (kg/m^3) Returns: (tuple): tuple containing: * Vp", "weight1 += np.exp(1j*phase1) for tr in st2: tmp2 += tr.data hilb2 = hilbert(tr.data)", "(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs # Rotation matrix rot = np.array([[-m11, m12],", "model file from the function ``utils.read_model(modfile)``, and setting the variable ``conf.wvtype`` Args: slow", "np.zeros(len(st2[0].data), dtype=complex) # Stack all traces for tr in st1: tmp1 += tr.data", "(degree) type (str, optional): Type of elastic material Returns: (tuple): Tuple containing: *", "= (Gvoigt + Greuss)/2. return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh def mod2vel(K,G,rho):", "ntr.copy() ttr = etr.copy() # Rotate to radial and transverse rtr.data, ttr.data =", "= es.lizardite() elif typ=='ms': C, rho = es.muscovite() elif typ=='ol': C, rho =", "of the tensor cc (c_ijkl) about three angles (alpha, beta, gamma) Args: a", "Copy traces trP = trZ.copy() trV = trR.copy() trH = trT.copy() # Vertical", "k in range(3): for l in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum", "stats \"\"\" tr.stats.delta = dt tr.stats.slow = slow tr.stats.baz = baz return tr", "(1, 1), (2, 2), (1, 2), (0, 2), (0, 1)] tol = 1e-3", "- 2.*AC*(AA + CC + 2.*LL) + (AA + LL)*(CC + LL)) #", "traces for tr in st1: tmp1 += tr.data hilb1 = hilbert(tr.data) phase1 =", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si':", "1e-3 cc = np.asarray(cc) C = np.zeros((6,6)) for i in range(6): for j", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "b) # Convert Voigt to full tensor cc = voigt2cc(C) return cc def", "Stacking Returns: (tuple): tuple containing: * stack1 (obspy.trace): Stacked trace for Stream 1", "typ=='ms': C, rho = es.muscovite() elif typ=='ol': C, rho = es.olivine() elif typ=='opx':", "j in range(cf.nlay): if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif fl[j]=='tri':", "np.array([trR.data,trZ.data]) # Rotation vec = np.dot(rot, r_z) # Extract P and SV components", "b = b*1.e3 C = es.iso_tensor(a, b) # Convert Voigt to full tensor", "ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data =", "tr, 0.) # Return tensor return cc, rho def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion", "np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements of rotation matrix m11 = cf.slow*vs*vs/vp m12 =", "typ='atg'): \"\"\" Function to generate tensor for anisotropic minerals. The \\ tensor is", "cc = voigt2cc(C)*1.e9/rho # Rotate tensor using trend and plunge cc = rot_tensor(cc,", "= (a*1.e3 - da/2.)**2 CC = (a*1.e3 + da/2.)**2 LL = (b*1.e3 +", "the following conditions: # The above copyright notice and this permission notice shall", "the full 3x3x3x3 tensor representation. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``)", "\"\"\" Function to generate tensor for isotropic material. Args: a (float): P-wave velocity", "tensor cc = voigt2cc(C)*1.e9/rho # Rotate tensor using trend and plunge cc =", "to full tensor cc = voigt2cc(C)*1.e9/rho # Rotate tensor using trend and plunge", "Copyright 2019 <NAME> # This file is part of Telewavesim. # Permission is", "Function to generate tensor for isotropic material. Args: a (float): P-wave velocity (km/s)", "copies of the Software, and to permit persons to whom the Software is", "trH.data = -trT.data/2. return trP, trV, trH def stack_all(st1, st2, pws=False): \"\"\" Stacks", "the symmetry axis. Args: a (float): P-wave velocity (km/s) b (float): S-wave velocity", "THE # SOFTWARE. ''' Utility functions to interact with ``telewavesim`` modules. ''' import", "\"\"\" cf_f.dt = cf.dt cf_f.slow = cf.slow cf_f.baz = cf.baz def obs2for(): \"\"\"", "(float): Reuss average bulk modulus (GPa) * Greuss (float): Reuss average shear modulus", "= ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz =", "and Transverse transfer functions \"\"\" # Extract East, North and Vertical ntr =", "z-component displacement seismogram Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" #", "b (float): S-wave velocity (km/s) Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\", "np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces tux = Trace(data=ux) tuy =", "tmp2/np.float(len(st2)) # Phase-weighting if pws: weight1 = weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1 =", "conf.thickn = np.zeros((conf.nlay)) >>> # Pass variables to the `conf` module >>> #", "range(3): for r in range(3): for s in range(3): asum=0.0 for i in", "The tensor is rotated using the trend and plunge of the symmetry axis.", "mode. Args: trZ (obspy.trace): Vertical component trR (obspy.trace): Radial component trT (obspy.trace): Transverse", "tr = []; pl = [] # Read file line by line and", "'ms', 'ol', 'opx', 'plag', 'qtz', 'zo'] rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB',", "obspy ``Trace`` object. Args: tr (obspy.trace): Trace object to update nt (int): Number", "or not all required global variables are set and throws an Exception if", "['atg'] >>> conf.a[:,:,:,:,0] = cc >>> conf.rho[0] = rho >>> conf.thickn[0] = 10.", "indices \"\"\" if i == j: return i return 6-i-j def voigt2cc(C): \"\"\"", "\\ (shape ``(3, 3, 3, 3)``) \"\"\" a = a*1.e3 b = b*1.e3", "= cc elif fl[j] in mins or fl[j] in rocks: cc, rho =", "= r cf.isoflg = fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals =", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data =", "i, j, k, l in itertools.product(range(3), range(3), range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j)", "# Minerals if typ=='atg': C, rho = es.antigorite() elif typ=='bt': C, rho =", "es from telewavesim.rmat_f import conf as cf_f from telewavesim.rmat_f import plane as pw_f", "Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i, j, k, l] =", "trP.data = vec[0,:] trV.data = vec[1,:] trH.data = -trT.data/2. return trP, trV, trH", "+ S[1,1] + S[2,2] + 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2]) Greuss = 15./(4.*S[0,0]", "l = Voigt_notation[i] m, n = Voigt_notation[j] C[i,j] = cc[k,l,m,n] return C def", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream tfs = Stream(traces=[tfr, tft])", "about x_2 first, x_3 second and x_1 third. For trend and plunge of", "Fortran ``conf`` module. Returns: None Variables to pass are ``dt``, ``slow``, ``baz`` \"\"\"", "\\ tensor is rotated using the trend and plunge of the symmetry \\", "(float): Shear modulus (GPa) rho (float): Density (kg/m^3) Returns: (tuple): tuple containing: *", "second and x_1 third. For trend and plunge of symmetry axis (e.g., tri_tensor):", "generate transfer functions from displacement traces. Args: trxyz (obspy.stream): Obspy ``Stream`` object in", "P-wave velocity (m/s) * Vs (float): S-wave velocity (m/s) Example ------- >>> from", "= np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\", "samples dt (float): Sampling rate slow (float): Slowness value (s/km) baz (float): Back-azimuth", "tr in st2: tmp2 += tr.data hilb2 = hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real)", "rho = es.biotite() elif typ=='cpx': C, rho = es.clinopyroxene_92() elif typ=='dol': C, rho", "= (b*1.e3 + db/2.)**2 NN = (b*1.e3 - db/2.)**2 AC = (a*1.e3)**2 FF", "return cc def cc2voigt(cc): \"\"\" Convert from the full 3x3x3x3 tensor representation to", "= utils.cc2voigt(cc) >>> K, G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896)", "= np.zeros((conf.nlay)) >>> # Pass variables to the `conf` module >>> # Only", "rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) -", "now global variables shared between all other modules \"\"\" h = []; r", "shall be included in all # copies or substantial portions of the Software.", "return cc, rho def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of tensor to Voigt notation", "None Variables to pass are ``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp = cf.dp cf_f.c", "Args: fortran (book, option): Whether or not the Fortran modules are used obs", "slownesses qp = np.sqrt(1/vp/vp - cf.slow*cf.slow) qs = np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements", "# Return stream return tfs def update_stats(tr, nt, dt, slow, baz): \"\"\" Updates", "AA = (a*1.e3 - da/2.)**2 CC = (a*1.e3 + da/2.)**2 LL = (b*1.e3", "the `conf` module >>> # Only topmost layer is useful for travel time", "Elastic tensor (shape ``(3, 3, 3, 3)``) \"\"\" C = np.asarray(C) cc =", "---> c_mnrs # for m in range(3): for n in range(3): for r", "rho (float): Density (kg/m^3) \"\"\" # Trend and plunge of symmetry axis tr", "+ 3.*C[5,5])/15. # Reuss averaging Kreuss = 1./(S[0,0] + S[1,1] + S[2,2] +", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) #", "cf_f.rhof = cf.rhof def run_plane(obs=False): \"\"\" Function to run the ``plane`` module and", "file cannot be opened: ',modfile)) with open(modfile) as fileobj: for line in fileobj:", "matrix rot = np.array([[-m11, m12], [-m21, m22]]) # Vector of Radial and Vertical", "update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz) tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz) #", "# Extract P and SV components trP.data = vec[0,:] trV.data = vec[1,:] trH.data", "function ``utils.read_model(modfile)``, and setting the variable ``conf.wvtype`` Args: slow (float): Slowness value (s/km)", "in traces tux = Trace(data=ux) tuy = Trace(data=uy) tuz = Trace(data=uz) # Update", "cc = voigt2cc(C) return cc def set_tri_tensor(a, b, tr, pl, ani): \"\"\" Function", "+ 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt = (C[0,0] + C[1,1] + C[2,2]", "set_iso_tensor(a, b): \"\"\" Function to generate tensor for isotropic material. Args: a (float):", "rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> K, G =", "rho = es.zoisite() # Rocks elif typ=='BS_f': C, rho = es.blueschist_felsic() elif typ=='BS_m':", "# Rotate tensor using trend and plunge cc = rot_tensor(cc, pl, tr, 0.)", "dt, slow, baz): \"\"\" Updates the ``stats`` doctionary from an obspy ``Trace`` object.", "np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int') for i in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i]", "2.*LL) # Get tensor with horizontal axis cc = es.tri_tensor(AA, CC, FF, LL,", "tr, 0.) # Return tensor return cc def set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function", "check = [f is None for f in lst] if sum(check)/len(check)>0.: raise Exception(\"global", "a Voigt-Reuss-Hill average of the anisotropic stifness matrix to the bulk modulus K", "cf.a[:,:,:,:,j] = cc elif fl[j] in mins or fl[j] in rocks: cc, rho", "stream tfs = Stream(traces=[tfr, tft]) # Return stream return tfs def update_stats(tr, nt,", "# Store in stream tfs = Stream(traces=[tfr, tft]) # Return stream return tfs", "# Elements of rotation matrix m11 = cf.slow*vs*vs/vp m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp)", "(float, optional): P-wave velocity used for rotation vs (float, optional): S-wave velocity used", "any person obtaining a copy # of this software and associated documentation files", "= (Kvoigt + Kreuss)/2. Gvrh = (Gvoigt + Greuss)/2. return Kvoigt, Gvoigt, Kreuss,", "np.array([[-m11, m12], [-m21, m22]]) # Vector of Radial and Vertical r_z = np.array([trR.data,trZ.data])", "tfs def update_stats(tr, nt, dt, slow, baz): \"\"\" Updates the ``stats`` doctionary from", "Pass variables to Fortran conf model2for() wave2for() # Run the ``plane`` module depending", "with horizontal axis # Minerals if typ=='atg': C, rho = es.antigorite() elif typ=='bt':", "all traces for tr in st1: tmp1 += tr.data hilb1 = hilbert(tr.data) phase1", "elif typ=='ep': C, rho = es.epidote() elif typ=='grt': C, rho = es.garnet() elif", "cf.dt cf_f.slow = cf.slow cf_f.baz = cf.baz def obs2for(): \"\"\" Passes global OBS-related", "to run the ``plane`` module and return 3-component seismograms as an ``obspy`` ``Stream``", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\\ 'lws', 'lz', 'ms', 'ol', 'opx', 'plag', 'qtz',", "a copy # of this software and associated documentation files (the \"Software\"), to", "for travel time calculation >>> conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0] = cc >>>", "\"iso\", \"tri\" or one among\\n') print(mins,rocks) print() raise(Exception()) return def check_cf(obs=False): \"\"\" Checks", "to rotate from Z-R-T coordinate system to P-SV-SH wave mode Returns: (obspy.stream): tfs:", "0., typ='atg') >>> C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149,", "to radial and transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data),", "= vec[0,:] trV.data = vec[1,:] trH.data = -trT.data/2. return trP, trV, trH def", "given bulk (K) and shear (G) moduli and density (rho) in kg/m^3 Args:", "(np.ndarray): C: Stiffness matrix (shape ``(6, 6)``) \"\"\" Voigt_notation = [(0, 0), (1,", "/density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" a = a*1.e3 b =", "seismogram uz (np.ndarray): z-component displacement seismogram Returns: (obspy.stream): trxyz: Stream containing 3-component displacement", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "= (a*1.e3 + da/2.)**2 LL = (b*1.e3 + db/2.)**2 NN = (b*1.e3 -", "rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> # Define two-layer model model with identical", "(float): Trend angle of symmetry axis (degree) pl (float): Plunge angle of symmetry", "= np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt', 'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\\", "type (str, optional): Type of elastic material Returns: (tuple): Tuple containing: * cc", "charge, to any person obtaining a copy # of this software and associated", "If OBS, then further pass OBS-related paramters to Fortran conf obs2for() # Get", "yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get the Fourier transform of seismograms for", "from scipy.signal import hilbert from obspy.core import Trace, Stream from obspy.signal.rotate import rotate_ne_rt", "\"\"\" cf_f.dp = cf.dp cf_f.c = cf.c cf_f.rhof = cf.rhof def run_plane(obs=False): \"\"\"", "tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz) # Append to stream trxyz =", "modulus K and the shear modulus G. Args: C (np.ndarray): Stiffness matrix (shape", "Returns: (tuple): Tuple containing: * Kvoigt (float): Voigt average bulk modulus (GPa) *", "``beta`` = trend \"\"\" rot = np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta)", "(e.g., tri_tensor): ``alpha`` = plunge ``beta`` = trend \"\"\" rot = np.zeros((3,3)) aa", "OBS-related variables to Fortran ``conf`` module. Returns: None Variables to pass are ``dp``,", "included in all # copies or substantial portions of the Software. # THE", "'P' >>> slow = 0.06 # s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 =", "# Append to stream trxyz = Stream(traces=[tux, tuy, tuz]) return trxyz def tf_from_xyz(trxyz,", "= np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\", "\"\"\" Passes global wavefield variables to Fortran ``conf`` module. Returns: None Variables to", "traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def calc_ttime(slow): \"\"\"", "baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data) if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape ``(3, 3, 3, 3)``)", "= cf.baz # Copy to radial and transverse rtr = ntr.copy() ttr =", "= VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho) a0 = a0**2 b0 = b0**2 if", "wave mode \"\"\" # Copy traces trP = trZ.copy() trV = trR.copy() trH", "np.ones(len(st1[0].data)) # Put back into traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return", "model with identical material >>> conf.nlay = 2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>>", "= es.dolomite() elif typ=='ep': C, rho = es.epidote() elif typ=='grt': C, rho =", "and the shear modulus G. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``)", "check_cf(obs=False): \"\"\" Checks whether or not all required global variables are set and", "``conf`` module. Returns: None Variables to pass are ``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt", "# This file is part of Telewavesim. # Permission is hereby granted, free", "or fl[j] in rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j] =", "module: 'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'\") if obs: lst =", "before executing the main ``telewavesim.rmat_f.plane_****`` function. Args: fortran (book, option): Whether or not", "(0, 2), (0, 1)] tol = 1e-3 cc = np.asarray(cc) C = np.zeros((6,6))", "(slow*1.e-3)**2) return t1 def read_model(modfile): \"\"\" Reads model parameters from file that are", "``(3, 3, 3, 3)``) * rho (float): Density (kg/m^3) \"\"\" # Trend and", "weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data)) weight2 =", "mode \"\"\" # Copy traces trP = trZ.copy() trV = trR.copy() trH =", "(C[0,0] + C[1,1] + C[2,2] + 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt =", "typ=='ol': C, rho = es.olivine() elif typ=='opx': C, rho = es.orthopyroxene() elif typ=='plag':", "the sequence of the rotation is important: (AB ~= BA). In this case", "tmp2 += tr.data hilb2 = hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2)", "tensor is rotated using the trend and plunge of the symmetry axis. Args:", "Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i, j, k, l] = C[Voigt_i, Voigt_j] return cc", "matrix (shape ``(6, 6)``) \"\"\" Voigt_notation = [(0, 0), (1, 1), (2, 2),", "# Get displacements in time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz", "\"\"\" Reads model parameters from file that are passed through the configuration module", "x_3 second and x_1 third. For trend and plunge of symmetry axis (e.g.,", "from displacement traces. Args: trxyz (obspy.stream): Obspy ``Stream`` object in cartesian coordinate system", "3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh = (Kvoigt + Kreuss)/2. Gvrh =", "in range(3): for j in range(3): for k in range(3): for l in", "rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) # # c_ijkl --->", "def cc2voigt(cc): \"\"\" Convert from the full 3x3x3x3 tensor representation to the Voigt", "es.epidote() elif typ=='grt': C, rho = es.garnet() elif typ=='gln': C, rho = es.glaucophane()", "= update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz) tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz)", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex)", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "# copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED", "'lz', 'ms', 'ol', 'opx', 'plag', 'qtz', 'zo'] rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m',", "fileobj: if not line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5]))", "Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" # Check if all", "modulus (GPa) * Gvoigt (float): Voigt average shear modulus (GPa) * Kreuss (float):", "portions of the Software. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "is part of Telewavesim. # Permission is hereby granted, free of charge, to", "np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex)", "cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy();", "if all variables are set. If not, throw an Exception and stop check_cf(obs)", "and setting the variable ``conf.wvtype`` Args: slow (float): Slowness value (s/km) Returns: (float):", "matrix to the full 3x3x3x3 tensor representation. Args: C (np.ndarray): Stiffness matrix (shape", "from telewavesim import utils >>> import numpy as np >>> cc, rho =", "tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Get tensor with horizontal", "Trace(data=ux) tuy = Trace(data=uy) tuz = Trace(data=uz) # Update trace header tux =", "Kvrh (float): Voigt-Reuss-Hill average bulk modulus (GPa) * Gvrh (float): Voigt-Reuss-Hill average shear", "optional): S-wave velocity used for rotation Returns: (tuple): tuple containing: * trP (obspy.trace):", "above copyright notice and this permission notice shall be included in all #", "(GPa) G (float): Shear modulus (GPa) rho (float): Density (kg/m^3) Returns: (tuple): tuple", "= ntr.copy() ttr = etr.copy() # Rotate to radial and transverse rtr.data, ttr.data", "traces in two ``Stream`` objects. Args: st1 (obspy.stream): Stream 1 st2 (obspy.stream,): Stream", "r = []; a = []; b = []; fl = []; ani", "Sampling rate slow (float): Slowness value (s/km) baz (float): Back-azimuth value (degree) Returns:", "to succeed. This is typically ensured through reading of the model file from", "\"\"\" rot = np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta)", "not the Fortran modules are used obs (bool, optional): Whether or not the", "by line and populate lists try: open(modfile) except: raise(Exception('model file cannot be opened:", "in range(3): for n in range(3): for r in range(3): for s in", "return tfs def update_stats(tr, nt, dt, slow, baz): \"\"\" Updates the ``stats`` doctionary", "to generate tensor for anisotropic minerals. The \\ tensor is rotated using the", "* Gvrh (float): Voigt-Reuss-Hill average shear modulus (GPa) Example ------- >>> from telewavesim", "cf.slow*vs # Rotation matrix rot = np.array([[-m11, m12], [-m21, m22]]) # Vector of", "conf as cf_f from telewavesim.rmat_f import plane as pw_f def set_iso_tensor(a, b): \"\"\"", "+ 3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh = (Kvoigt + Kreuss)/2. Gvrh", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream tfs = Stream(traces=[tfr, tft]) # Return stream return", "es.eclogite_foliated() elif typ=='EC_m': C, rho = es.eclogite_massive() elif typ=='HB': C, rho = es.harzburgite()", "rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs) tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft =", "``Stream`` object. Args: ux (np.ndarray): x-component displacement seismogram uy (np.ndarray): y-component displacement seismogram", "using trend and plunge cc = rot_tensor(cc, pl, tr, 0.) # Return tensor", "<filename>telewavesim/utils.py # Copyright 2019 <NAME> # This file is part of Telewavesim. #", "rho = es.eclogite_foliated() elif typ=='EC_m': C, rho = es.eclogite_massive() elif typ=='HB': C, rho", "Get tensor with horizontal axis cc = es.tri_tensor(AA, CC, FF, LL, NN) #", "P-wave velocity used for rotation vs (float, optional): S-wave velocity used for rotation", "Voigt-Reuss-Hill average bulk modulus (GPa) * Gvrh (float): Voigt-Reuss-Hill average shear modulus (GPa)", "Args: a (float): P-wave velocity (km/s) b (float): S-wave velocity (km/s) tr (float):", "an ``obspy`` ``Stream`` object. trxyz = get_trxyz(yx, yy, yz) return trxyz def get_trxyz(yx,", "(float): Back-azimuth value (degree) Returns: (obspy.trace): tr: Trace with updated stats \"\"\" tr.stats.delta", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "in range(3): for s in range(3): asum=0.0 for i in range(3): for j", "not all variables are set. \"\"\" lst = [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt,", ">>> conf.nlay = 2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay)) >>>", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data", "= pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz))))", "+ 4.*G/3.)/rho) Vs = np.sqrt(G/rho) return Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a", "3x3x3x3 tensor representation to the Voigt notation of the stiffness matrix. Args: cc", "'baz'\") if obs: lst = [cf.dp, cf.c, cf.rhof] check = [f is None", "cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def wave2for(): \"\"\" Passes global wavefield variables", "matrix m11 = cf.slow*vs*vs/vp m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 -", "es.blueschist_mafic() elif typ=='EC_f': C, rho = es.eclogite_foliated() elif typ=='EC_m': C, rho = es.eclogite_massive()", "of the following variables through the conf module: 'a', 'rho', 'thickn', 'isoflg', 'dt',", "shared between all other modules \"\"\" h = []; r = []; a", "Store in stream tfs = Stream(traces=[tfr, tft]) # Return stream return tfs def", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", ">>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" # Compliance matrix S", "cc (c_ijkl) about three angles (alpha, beta, gamma) Args: a (np.ndarray): Elastic tensor", "``conf.wvtype`` Args: slow (float): Slowness value (s/km) Returns: (float): t1: Time in seconds", "print('Stacking ALL traces in streams') # Copy stats from stream str_stats = st1[0].stats", "of the Software, and to permit persons to whom the Software is #", "Set up matrix elements AA = (a*1.e3 - da/2.)**2 CC = (a*1.e3 +", "(C[0,0] + C[1,1] + C[2,2] - C[0,1] - C[0,2] - C[1,2] + 3.*C[3,3]", "are set. \"\"\" lst = [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz]", "= Trace(data=ux) tuy = Trace(data=uy) tuz = Trace(data=uz) # Update trace header tux", "return t1 def read_model(modfile): \"\"\" Reads model parameters from file that are passed", "= full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i, j, k, l] = C[Voigt_i,", "yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement seismograms to an ``obspy`` ``Stream``", "functions from displacement traces. Args: trxyz (obspy.stream): Obspy ``Stream`` object in cartesian coordinate", "of symmetry axis tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Get", "nlaymx = cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg", "about the x_2, x_3, x_1 axes. Note that the sequence of the rotation", "= np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2) # Normalize tmp1 = tmp1/np.float(len(st1)) tmp2 =", "full tensor cc = voigt2cc(C) return cc def set_tri_tensor(a, b, tr, pl, ani):", "of symmetry axis (degree) type (str, optional): Type of elastic material Returns: (tuple):", "velocity used for rotation vs (float, optional): S-wave velocity used for rotation Returns:", "(float): t1: Time in seconds Example ------- >>> from telewavesim import conf >>>", "if sum(check)/len(check)>0.: raise Exception(\"global variables not all set for OBS case. Set all", "of elastic material Returns: (tuple): Tuple containing: * cc (np.ndarray): Elastic tensor (GPa", "x-component displacement seismogram uy (np.ndarray): y-component displacement seismogram uz (np.ndarray): z-component displacement seismogram", "rho def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of tensor to Voigt notation for indices", "weight2 = np.zeros(len(st2[0].data), dtype=complex) # Stack all traces for tr in st1: tmp1", "= np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex) # Stack all traces for tr", "trV, trH def stack_all(st1, st2, pws=False): \"\"\" Stacks all traces in two ``Stream``", "Returns: (np.ndarray): C: Stiffness matrix (shape ``(6, 6)``) \"\"\" Voigt_notation = [(0, 0),", "tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream tfs = Stream(traces=[tfr, tft]) # Return", "line and populate lists try: open(modfile) except: raise(Exception('model file cannot be opened: ',modfile))", "utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 = 0. for i in range(cf.nlay-1): if cf.isoflg[i] ==", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "``telewavesim`` modules. ''' import sys import itertools import numpy as np import pyfftw", "asum=0.0 for i in range(3): for j in range(3): for k in range(3):", "rotation vs (float, optional): S-wave velocity used for rotation Returns: (tuple): tuple containing:", "bulk modulus (GPa) * Gvrh (float): Voigt-Reuss-Hill average shear modulus (GPa) Example -------", "# eta = FF/(AA - 2.*LL) # Get tensor with horizontal axis cc", "tuy = Trace(data=uy) tuz = Trace(data=uz) # Update trace header tux = update_stats(tux,", "= 10. >>> conf.wvtype = 'P' >>> slow = 0.06 # s/km >>>", "return 3-component seismograms as an ``obspy`` ``Stream`` object. .. note:: The ``conf`` global", "Transverse transfer functions \"\"\" # Extract East, North and Vertical ntr = trxyz[0]", "(2, 2), (1, 2), (0, 2), (0, 1)] tol = 1e-3 cc =", "= cc elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif fl[j] in", "= rotate_ne_rt(ntr.data, etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data) if pvh:", "radial and transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float')", "of tensor to Voigt notation for indices \"\"\" if i == j: return", "cc[k,l,m,n] return C def VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill average of the anisotropic", "= pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement seismograms to an ``obspy`` ``Stream`` object. trxyz", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "cartesian coordinate system pvh (bool, optional): Whether to rotate from Z-R-T coordinate system", "/density)\\ (shape ``(3, 3, 3, 3)``) * rho (float): Density (kg/m^3) \"\"\" #", "return trP, trV, trH def stack_all(st1, st2, pws=False): \"\"\" Stacks all traces in", "3, 3)``) Returns: (np.ndarray): C: Stiffness matrix (shape ``(6, 6)``) \"\"\" Voigt_notation =", "b0 = mod2vel(K,G,rho) a0 = a0**2 b0 = b0**2 if cf.wvtype=='P': t1 +=", "np.exp(1j*phase2) # Normalize tmp1 = tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) # Phase-weighting if pws:", "OBS case. if obs: # If OBS, then further pass OBS-related paramters to", "from telewavesim.rmat_f import plane as pw_f def set_iso_tensor(a, b): \"\"\" Function to generate", "elif typ=='LHZ': C, rho = es.lherzolite() else: print('type of mineral/rock not implemented') return", "In this case we rotate about x_2 first, x_3 second and x_1 third.", "= np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp,", "= (90. - pl)*np.pi/180. # Get tensor with horizontal axis # Minerals if", "= 1./(S[0,0] + S[1,1] + S[2,2] + 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2]) Greuss", "qs = np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements of rotation matrix m11 = cf.slow*vs*vs/vp", "set. \"\"\" lst = [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz] check", "a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration parameters cf.nlay = len(h)", "rho = es.glaucophane() elif typ=='hbl': C, rho = es.hornblende() elif typ=='jade': C, rho", "plunge of symmetry axis (e.g., tri_tensor): ``alpha`` = plunge ``beta`` = trend \"\"\"", "(degree) pl (float): Plunge angle of symmetry axis (degree) type (str, optional): Type", "full_3x3_to_Voigt_6_index(k, l) cc[i, j, k, l] = C[Voigt_i, Voigt_j] return cc def cc2voigt(cc):", "4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] - \\ 4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] +", "i == j: return i return 6-i-j def voigt2cc(C): \"\"\" Convert the Voigt", "\"\"\" Passes global model variables to Fortran ``conf`` module. Returns: None Variables to", "f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set. Set all", "cc = es.tri_tensor(AA, CC, FF, LL, NN) # Rotate tensor using trend and", "the Software is # furnished to do so, subject to the following conditions:", "Return tensor return cc, rho def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of tensor to", "0.) # Return tensor return cc, rho def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of", "LL, NN) # Rotate tensor using trend and plunge cc = rot_tensor(cc, pl,", "matrix to the bulk modulus K and the shear modulus G. Args: C", "``Stream`` object. .. note:: The ``conf`` global variables need to be set for", "[-m21, m22]]) # Vector of Radial and Vertical r_z = np.array([trR.data,trZ.data]) # Rotation", "Plunge angle of symmetry axis (degree) ani (float): Percent anisotropy Returns: (np.ndarray): cc:", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "= FF/(AA - 2.*LL) # Get tensor with horizontal axis cc = es.tri_tensor(AA,", "elif typ=='dol': C, rho = es.dolomite() elif typ=='ep': C, rho = es.epidote() elif", "= np.ones(len(st1[0].data)) # Put back into traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats)", "telewavesim import elast as es from telewavesim.rmat_f import conf as cf_f from telewavesim.rmat_f", "(float): Sampling rate slow (float): Slowness value (s/km) baz (float): Back-azimuth value (degree)", "sys import itertools import numpy as np import pyfftw from scipy.signal import hilbert", "cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui", "generate tensor for transverse isotropy. The tensor is rotated using the trend and", "an obspy ``Trace`` object. Args: tr (obspy.trace): Trace object to update nt (int):", "``obs``case yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get the Fourier transform", "``obspy`` ``Stream`` object. Args: ux (np.ndarray): x-component displacement seismogram uy (np.ndarray): y-component displacement", "np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex) # Stack all traces", "C, rho = es.lawsonite() elif typ=='lz': C, rho = es.lizardite() elif typ=='ms': C,", "trend and plunge of symmetry axis (e.g., tri_tensor): ``alpha`` = plunge ``beta`` =", "as pw_f def set_iso_tensor(a, b): \"\"\" Function to generate tensor for isotropic material.", "(kg/m^3) Returns: (tuple): tuple containing: * Vp (float): P-wave velocity (m/s) * Vs", "(degree) ani (float): Percent anisotropy Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\", "(AA + LL)*(CC + LL)) # eta = FF/(AA - 2.*LL) # Get", "\\ (shape ``(3, 3, 3, 3)``) \"\"\" # Trend and plunge of symmetry", "to succeed. This function first checks to make sure the variables are all", "variables to Fortran conf model2for() wave2for() # Run the ``plane`` module depending on", "np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) # # c_ijkl ---> c_mnrs # for m in", "elif typ=='BS_m': C, rho = es.blueschist_mafic() elif typ=='EC_f': C, rho = es.eclogite_foliated() elif", "\"\"\" Convert from the full 3x3x3x3 tensor representation to the Voigt notation of", "= np.array([[-m11, m12], [-m21, m22]]) # Vector of Radial and Vertical r_z =", "Type of elastic material Returns: (tuple): Tuple containing: * cc (np.ndarray): Elastic tensor", "fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration parameters cf.nlay = len(h) cf.thickn =", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "= trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft = trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv =", "= es.iso_tensor(a, b) # Convert Voigt to full tensor cc = voigt2cc(C) return", "+ 2.*LL) + (AA + LL)*(CC + LL)) # eta = FF/(AA -", "in range(3): for l in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum +", "b*1.e3 C = es.iso_tensor(a, b) # Convert Voigt to full tensor cc =", "[cf.dp, cf.c, cf.rhof] check = [f is None for f in lst] if", "Fortran conf model2for() wave2for() # Run the ``plane`` module depending on land or", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "cf_f from telewavesim.rmat_f import plane as pw_f def set_iso_tensor(a, b): \"\"\" Function to", "np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs)", "= np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui =", "optional): Whether to rotate from Z-R-T coordinate system to P-SV-SH wave mode Returns:", "t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH': t1 +=", "C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (np.ndarray): cc: Elastic tensor (shape", "The ``conf`` global variables need to be set for this calculation to succeed.", "of samples dt (float): Sampling rate slow (float): Slowness value (s/km) baz (float):", "Compressional (P) wave mode * trV (obspy.trace): Vertically polarized shear (SV) wave mode", "bottom layer is irrelevant in this calculation. .. note:: The ``conf`` global variables", "# Put back into traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1,", "is hereby granted, free of charge, to any person obtaining a copy #", "seismograms to an ``obspy`` ``Stream`` object. trxyz = get_trxyz(yx, yy, yz) return trxyz", "``Stream`` objects. Args: st1 (obspy.stream): Stream 1 st2 (obspy.stream,): Stream 2 pws (bool,", "\"\"\" a = a*1.e3 b = b*1.e3 C = es.iso_tensor(a, b) # Convert", "= es.blueschist_mafic() elif typ=='EC_f': C, rho = es.eclogite_foliated() elif typ=='EC_m': C, rho =", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "def run_plane(obs=False): \"\"\" Function to run the ``plane`` module and return 3-component seismograms", "Vertical ntr = trxyz[0] etr = trxyz[1] ztr = trxyz[2] baz = cf.baz", "pws: weight1 = weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2 = np.real(abs(weight2))", "succeed. This is typically ensured through reading of the model file from the", "calculation >>> conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0] = cc >>> conf.rho[0] = rho", "= es.muscovite() elif typ=='ol': C, rho = es.olivine() elif typ=='opx': C, rho =", "\"\"\" Function to store displacement seismograms into ``obspy`` ``Trace`` obsjects and then an", "\\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0] =", "subject to the following conditions: # The above copyright notice and this permission", "``(3, 3, 3, 3)``) Returns: (np.ndarray): C: Stiffness matrix (shape ``(6, 6)``) \"\"\"", "rot[0,1] = np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1]", "model. The bottom layer is irrelevant in this calculation. .. note:: The ``conf``", "np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt', 'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\\ 'lws',", "= set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc", "utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp = np.sqrt((K + 4.*G/3.)/rho)", "# If OBS, then further pass OBS-related paramters to Fortran conf obs2for() #", "use either \"iso\", \"tri\" or one among\\n') print(mins,rocks) print() raise(Exception()) return def check_cf(obs=False):", "(a*1.e3)**2 FF = -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC + 2.*LL) +", "Elastic tensor (shape ``(3, 3, 3, 3)``) Returns: (np.ndarray): C: Stiffness matrix (shape", "with shape ``(3, 3, 3, 3)`` .. note:: The three angles (``alpha``, ``beta``,", "all required global variables are set and throws an Exception if not. Args:", "rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) +", "for an OBS stations Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\"", "(b*1.e3 - db/2.)**2 AC = (a*1.e3)**2 FF = -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA", "anisotropic minerals. The \\ tensor is rotated using the trend and plunge of", "import utils >>> import numpy as np >>> cc, rho = utils.set_aniso_tensor(0., 0.,", "with open(modfile) as fileobj: for line in fileobj: if not line.rstrip().startswith('#'): model =", "model2for() wave2for() # Run the ``plane`` module depending on land or OBS case.", "k, l] = C[Voigt_i, Voigt_j] return cc def cc2voigt(cc): \"\"\" Convert from the", "C, rho = es.serpentinite_37() elif typ=='SP_80': C, rho = es.serpentinite_80() elif typ=='LHZ': C,", "and S wave velocities from given bulk (K) and shear (G) moduli and", "not the analysis is done for an OBS stations Returns: (obspy.stream): trxyz: Stream", "``obspy`` ``Trace`` obsjects and then an ``obspy`` ``Stream`` object. Args: ux (np.ndarray): x-component", "to the full 3x3x3x3 tensor representation. Args: C (np.ndarray): Stiffness matrix (shape ``(6,", "3-component displacement seismograms \"\"\" # Get displacements in time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx))", "pass are ``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx = cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx))", "import Trace, Stream from obspy.signal.rotate import rotate_ne_rt from telewavesim import conf as cf", "nt, dt, slow, baz): \"\"\" Updates the ``stats`` doctionary from an obspy ``Trace``", "stream return tfs def update_stats(tr, nt, dt, slow, baz): \"\"\" Updates the ``stats``", "Greuss (float): Reuss average shear modulus (GPa) * Kvrh (float): Voigt-Reuss-Hill average bulk", "through the configuration module ``conf``. Returns: None: Parameters are now global variables shared", "= np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\", "all of the following variables through the conf module: 'a', 'rho', 'thickn', 'isoflg',", "copy # of this software and associated documentation files (the \"Software\"), to deal", "Voigt notation for indices \"\"\" if i == j: return i return 6-i-j", "Vertical component trR (obspy.trace): Radial component trT (obspy.trace): Transverse component vp (float, optional):", ">>> from telewavesim import utils >>> import numpy as np >>> cc, rho", "trV, trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs) tfr = trV.copy(); tfr.data =", "to Voigt notation for indices \"\"\" if i == j: return i return", "range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i]", "range(3): for s in range(3): asum=0.0 for i in range(3): for j in", "+ db/2.)**2 NN = (b*1.e3 - db/2.)**2 AC = (a*1.e3)**2 FF = -LL", "axis (e.g., tri_tensor): ``alpha`` = plunge ``beta`` = trend \"\"\" rot = np.zeros((3,3))", "fileobj: for line in fileobj: if not line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1]))", "in range(cf.nlay): if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif fl[j]=='tri': cc", "orientation to `P-SV-SH` wave mode. Args: trZ (obspy.trace): Vertical component trR (obspy.trace): Radial", "= -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1] =", "full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of tensor to Voigt notation for indices \"\"\" if", "trxyz = get_trxyz(yx, yy, yz) return trxyz def get_trxyz(yx, yy, yz): \"\"\" Function", "len(h) cf.thickn = h cf.rho = r cf.isoflg = fl cf.a = np.zeros((3,3,3,3,cf.nlay))", "containing: * stack1 (obspy.trace): Stacked trace for Stream 1 * stack2 (obspy.trace): Stacked", "Return tensor return cc def set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function to generate tensor", "weight1 = np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data))", "for f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set. Set", "displacement seismogram uy (np.ndarray): y-component displacement seismogram uz (np.ndarray): z-component displacement seismogram Returns:", "m, n = Voigt_notation[j] C[i,j] = cc[k,l,m,n] return C def VRH_average(C): \"\"\" Performs", "anisotropy da = (a*1.e3)*ani/100. db = (b*1.e3)*ani/100. # Set up matrix elements AA", "es.lherzolite() else: print('type of mineral/rock not implemented') return # Convert Voigt to full", "weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex) # Stack all traces for", "weight2 = np.ones(len(st1[0].data)) # Put back into traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 =", "dtype='float') # print(rtr.data, ttr.data) if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP,", "rho (float): Density (kg/m^3) Returns: (tuple): tuple containing: * Vp (float): P-wave velocity", "beta (float): Angle in radians gam (float): Angle in radians Returns: (np.ndarray): aa:", "``conf`` module. Returns: None Variables to pass are ``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\"", "obspy.signal.rotate import rotate_ne_rt from telewavesim import conf as cf from telewavesim import elast", "trP (obspy.trace): Compressional (P) wave mode * trV (obspy.trace): Vertically polarized shear (SV)", "the variable ``conf.wvtype`` Args: slow (float): Slowness value (s/km) Returns: (float): t1: Time", "Passes global model variables to Fortran ``conf`` module. Returns: None Variables to pass", "rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1]", "sublicense, and/or sell # copies of the Software, and to permit persons to", "Returns: None Variables to pass are ``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt = cf.dt", "cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 -", "pl = (90. - pl)*np.pi/180. # Percent anisotropy da = (a*1.e3)*ani/100. db =", "a = a*1.e3 b = b*1.e3 C = es.iso_tensor(a, b) # Convert Voigt", "rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data)", "C[1,1] + C[2,2] - C[0,1] - C[0,2] - C[1,2] + 3.*C[3,3] + \\", "obs: # If OBS, then further pass OBS-related paramters to Fortran conf obs2for()", "3, 3)``) \"\"\" # Trend and plunge of symmetry axis tr = -tr*np.pi/180.", "typ=='lz': C, rho = es.lizardite() elif typ=='ms': C, rho = es.muscovite() elif typ=='ol':", "to pass are ``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp = cf.dp cf_f.c = cf.c", "3, 3, 3)``) Returns: (np.ndarray): C: Stiffness matrix (shape ``(6, 6)``) \"\"\" Voigt_notation", "optional): Whether or not the analysis is done for an OBS stations Returns:", "rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum + rr*a[i,j,k,l] aa[m,n,r,s] = asum return aa", "3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh = (Kvoigt + Kreuss)/2.", "C[2,2] - C[0,1] - C[0,2] - C[1,2] + 3.*C[3,3] + \\ 3.*C[4,4] +", "Rotate to radial and transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz) a =", "~= BA). In this case we rotate about x_2 first, x_3 second and", "Stream containing Radial and Transverse transfer functions \"\"\" # Extract East, North and", "= es.clinopyroxene_92() elif typ=='dol': C, rho = es.dolomite() elif typ=='ep': C, rho =", "``gam``) correspond to rotation about the x_2, x_3, x_1 axes. Note that the", "pyfftw from scipy.signal import hilbert from obspy.core import Trace, Stream from obspy.signal.rotate import", "trace header tux = update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz) tuy = update_stats(tuy, cf.nt,", "Trace(data=uz) # Update trace header tux = update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz) tuy", "in seconds Example ------- >>> from telewavesim import conf >>> from telewavesim import", "else: # Get the Fourier transform of seismograms for ``land`` case yx, yy,", "= es.epidote() elif typ=='grt': C, rho = es.garnet() elif typ=='gln': C, rho =", "# Return tensor return cc, rho def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of tensor", "'rhof'\") def model2for(): \"\"\" Passes global model variables to Fortran ``conf`` module. Returns:", "# copies of the Software, and to permit persons to whom the Software", "are ``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt = cf.dt cf_f.slow = cf.slow cf_f.baz =", "fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j]", "hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1) for tr in st2: tmp2", "rotation about the x_2, x_3, x_1 axes. Note that the sequence of the", ">>> from telewavesim import conf >>> from telewavesim import utils >>> import numpy", "not line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7]))", "* stack2 (obspy.trace): Stacked trace for Stream 2 \"\"\" print() print('Stacking ALL traces", "model parameters from file that are passed through the configuration module ``conf``. Returns:", "s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 = 0. for i in range(cf.nlay-1): if", "* Gvoigt (float): Voigt average shear modulus (GPa) * Kreuss (float): Reuss average", "if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def wave2for(): \"\"\" Passes global wavefield variables to", "tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh))))", "'SP_37', 'SP_80'] for j in range(cf.nlay): if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] =", "Throws ExceptionError if not all variables are set. \"\"\" lst = [cf.a, cf.rho,", "3)``) Returns: (np.ndarray): C: Stiffness matrix (shape ``(6, 6)``) \"\"\" Voigt_notation = [(0,", "to Fortran ``conf`` module. Returns: None Variables to pass are ``dp``, ``c``, ``rhof``", "(obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" # Get displacements in time", "wave mode Returns: (obspy.stream): tfs: Stream containing Radial and Transverse transfer functions \"\"\"", "# Copyright 2019 <NAME> # This file is part of Telewavesim. # Permission", "to the Voigt notation of the stiffness matrix. Args: cc (np.ndarray): Elastic tensor", "-np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta)", "(GPa) Example ------- >>> from telewavesim import utils >>> cc, rho = utils.set_aniso_tensor(0.,", "Horizontally polarized shear (SH) wave mode \"\"\" # Copy traces trP = trZ.copy()", "shape ``(3, 3, 3, 3)`` .. note:: The three angles (``alpha``, ``beta``, ``gam``)", "m11 = cf.slow*vs*vs/vp m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs)", "open(modfile) except: raise(Exception('model file cannot be opened: ',modfile)) with open(modfile) as fileobj: for", "print(rtr.data, ttr.data) if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH", "+ Kreuss)/2. Gvrh = (Gvoigt + Greuss)/2. return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh,", "for ``obs``case yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get the Fourier", "elif typ=='grt': C, rho = es.garnet() elif typ=='gln': C, rho = es.glaucophane() elif", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "2), (0, 1)] tol = 1e-3 cc = np.asarray(cc) C = np.zeros((6,6)) for", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data =", "= es.blueschist_felsic() elif typ=='BS_m': C, rho = es.blueschist_mafic() elif typ=='EC_f': C, rho =", "the following variables through the conf module: 'dp', 'c', 'rhof'\") def model2for(): \"\"\"", "= np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay)) >>> # Pass", "es.dolomite() elif typ=='ep': C, rho = es.epidote() elif typ=='grt': C, rho = es.garnet()", "np.linalg.inv(C) # Voigt averaging Kvoigt = (C[0,0] + C[1,1] + C[2,2] + 2.*C[0,1]", "61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" # Compliance matrix S = np.linalg.inv(C) # Voigt", "Utility functions to interact with ``telewavesim`` modules. ''' import sys import itertools import", "Voigt_notation[j] C[i,j] = cc[k,l,m,n] return C def VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill average", "tensor with shape ``(3, 3, 3, 3)`` .. note:: The three angles (``alpha``,", "variables to Fortran ``conf`` module. Returns: None Variables to pass are ``dt``, ``slow``,", "+ rr*a[i,j,k,l] aa[m,n,r,s] = asum return aa def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5):", "Get displacements in time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz =", "cf.dt, cf.slow, cf.baz) # Append to stream trxyz = Stream(traces=[tux, tuy, tuz]) return", "None for f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set", "tensor (shape ``(3, 3, 3, 3)``) \"\"\" C = np.asarray(C) cc = np.zeros((3,3,3,3),", "of the model file from the function ``utils.read_model(modfile)``, and setting the variable ``conf.wvtype``", "fl[j] in rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j] = rho", "tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data =", "range(3): for l in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum + rr*a[i,j,k,l]", "``(6, 6)``) Returns: (np.ndarray): cc: Elastic tensor (shape ``(3, 3, 3, 3)``) \"\"\"", "seismograms as an ``obspy`` ``Stream`` object. .. note:: The ``conf`` global variables need", "are ``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp = cf.dp cf_f.c = cf.c cf_f.rhof =", "mins = ['atg', 'bt', 'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\\ 'lws', 'lz',", "component trT (obspy.trace): Transverse component vp (float, optional): P-wave velocity used for rotation", "lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set for OBS case. Set", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "this calculation to succeed. This function first checks to make sure the variables", "rho = es.serpentinite_80() elif typ=='LHZ': C, rho = es.lherzolite() else: print('type of mineral/rock", "modules are used obs (bool, optional): Whether or not the analysis is done", "cf_f.rho = np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int') for i in", "trxyz[2] baz = cf.baz # Copy to radial and transverse rtr = ntr.copy()", "3)`` alpha (float): Angle in radians beta (float): Angle in radians gam (float):", "LL)*(CC + LL)) # eta = FF/(AA - 2.*LL) # Get tensor with", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "= []; a = []; b = []; fl = []; ani =", "all variables are set. \"\"\" lst = [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt,", "es.biotite() elif typ=='cpx': C, rho = es.clinopyroxene_92() elif typ=='dol': C, rho = es.dolomite()", "to Fortran conf model2for() wave2for() # Run the ``plane`` module depending on land", "to be set for this calculation to succeed. This function first checks to", "+= tr.data hilb2 = hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2) #", "and Vertical r_z = np.array([trR.data,trZ.data]) # Rotation vec = np.dot(rot, r_z) # Extract", "def read_model(modfile): \"\"\" Reads model parameters from file that are passed through the", "C = np.zeros((6,6)) for i in range(6): for j in range(6): k, l", "variables to the `conf` module >>> # Only topmost layer is useful for", "C, rho = es.biotite() elif typ=='cpx': C, rho = es.clinopyroxene_92() elif typ=='dol': C,", "cf.dt, cf.slow, cf.baz) tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz) # Append to", "shear (G) moduli and density (rho) in kg/m^3 Args: K (float): Bulk modulus", "pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data) if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3", "read_model(modfile): \"\"\" Reads model parameters from file that are passed through the configuration", "typ=='SP_37': C, rho = es.serpentinite_37() elif typ=='SP_80': C, rho = es.serpentinite_80() elif typ=='LHZ':", "0., typ='atg') >>> C = utils.cc2voigt(cc) >>> K, G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K,", "rho = es.clinopyroxene_92() elif typ=='dol': C, rho = es.dolomite() elif typ=='ep': C, rho", "= np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int') for i in range(cf.nlay):", "that the sequence of the rotation is important: (AB ~= BA). In this", "seismogram uy (np.ndarray): y-component displacement seismogram uz (np.ndarray): z-component displacement seismogram Returns: (obspy.stream):", "Reuss average shear modulus (GPa) * Kvrh (float): Voigt-Reuss-Hill average bulk modulus (GPa)", "= cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1", "plunge of the symmetry \\ axis. Args: tr (float): Trend angle of symmetry", "modulus (GPa) * Kvrh (float): Voigt-Reuss-Hill average bulk modulus (GPa) * Gvrh (float):", "throws an Exception if not. Args: obs (bool, optional): Whether the analysis is", "function first checks to make sure the variables are all set before executing", "Function to generate tensor for anisotropic minerals. The \\ tensor is rotated using", "(rho) in kg/m^3 Args: K (float): Bulk modulus (GPa) G (float): Shear modulus", "pl = (90. - pl)*np.pi/180. # Get tensor with horizontal axis # Minerals", "whom the Software is # furnished to do so, subject to the following", "C[Voigt_i, Voigt_j] return cc def cc2voigt(cc): \"\"\" Convert from the full 3x3x3x3 tensor", "= []; ani = []; tr = []; pl = [] # Read", "in itertools.product(range(3), range(3), range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k, l)", "the Fourier transform of seismograms for ``land`` case yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype,", "all of the following variables through the conf module: 'dp', 'c', 'rhof'\") def", "plane as pw_f def set_iso_tensor(a, b): \"\"\" Function to generate tensor for isotropic", "seconds Example ------- >>> from telewavesim import conf >>> from telewavesim import utils", "obs (bool, optional): Whether or not the analysis is done for an OBS", "cf.slow*vs*vs/vp m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 =", "\"\"\" Conversion of tensor to Voigt notation for indices \"\"\" if i ==", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "K (float): Bulk modulus (GPa) G (float): Shear modulus (GPa) rho (float): Density", "m in range(3): for n in range(3): for r in range(3): for s", "= cf.c cf_f.rhof = cf.rhof def run_plane(obs=False): \"\"\" Function to run the ``plane``", "Fourier transform of seismograms for ``obs``case yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else:", ">>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> K,", "Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr))))", "lst = [cf.dp, cf.c, cf.rhof] check = [f is None for f in", "rho = es.jadeite() elif typ=='lws': C, rho = es.lawsonite() elif typ=='lz': C, rho", "m12], [-m21, m22]]) # Vector of Radial and Vertical r_z = np.array([trR.data,trZ.data]) #", "radians beta (float): Angle in radians gam (float): Angle in radians Returns: (np.ndarray):", "of symmetry axis (degree) ani (float): Percent anisotropy Returns: (np.ndarray): cc: Elastic tensor", "typ='atg') >>> C = utils.cc2voigt(cc) >>> K, G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G,", "is rotated using the trend and plunge of the symmetry \\ axis. Args:", "rho = es.antigorite() elif typ=='bt': C, rho = es.biotite() elif typ=='cpx': C, rho", "cc = rot_tensor(cc, pl, tr, 0.) # Return tensor return cc, rho def", "for s in range(3): asum=0.0 for i in range(3): for j in range(3):", "= np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces tux = Trace(data=ux) tuy", "rtr, ttr, vp=vp, vs=vs) tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft = trH.copy();", "ttr, vp=vp, vs=vs) tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft = trH.copy(); tft.data", "update_stats(tr, nt, dt, slow, baz): \"\"\" Updates the ``stats`` doctionary from an obspy", "\"\"\" Function to run the ``plane`` module and return 3-component seismograms as an", "tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH':", "from file that are passed through the configuration module ``conf``. Returns: None: Parameters", "are ``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx = cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho", "Args: trxyz (obspy.stream): Obspy ``Stream`` object in cartesian coordinate system pvh (bool, optional):", "generate tensor for isotropic material. Args: a (float): P-wave velocity (km/s) b (float):", "'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\\ 'lws', 'lz', 'ms', 'ol', 'opx', 'plag',", "es.harzburgite() elif typ=='SP_37': C, rho = es.serpentinite_37() elif typ=='SP_80': C, rho = es.serpentinite_80()", "lst = [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz] check = [f", "+ 3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh = (Kvoigt + Kreuss)/2. Gvrh = (Gvoigt", "# Get the Fourier transform of seismograms for ``obs``case yx, yy, yz =", "rotation of the tensor cc (c_ijkl) about three angles (alpha, beta, gamma) Args:", "= 'P' >>> slow = 0.06 # s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1", "\\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) # #", "in radians beta (float): Angle in radians gam (float): Angle in radians Returns:", "b, tr, pl, ani): \"\"\" Function to generate tensor for transverse isotropy. The", "horizontal axis # Minerals if typ=='atg': C, rho = es.antigorite() elif typ=='bt': C,", "(float): S-wave velocity (km/s) Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape", "typ=='cpx': C, rho = es.clinopyroxene_92() elif typ=='dol': C, rho = es.dolomite() elif typ=='ep':", "fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif fl[j] in mins or fl[j]", "= cf.dp cf_f.c = cf.c cf_f.rhof = cf.rhof def run_plane(obs=False): \"\"\" Function to", "= pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp))))", "FF, LL, NN) # Rotate tensor using trend and plunge cc = rot_tensor(cc,", "for j in range(6): k, l = Voigt_notation[i] m, n = Voigt_notation[j] C[i,j]", "elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "= plunge ``beta`` = trend \"\"\" rot = np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0]", "``baz`` \"\"\" cf_f.dt = cf.dt cf_f.slow = cf.slow cf_f.baz = cf.baz def obs2for():", "elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft", "NN) # Rotate tensor using trend and plunge cc = rot_tensor(cc, pl, tr,", "Vp (float): P-wave velocity (m/s) * Vs (float): S-wave velocity (m/s) Example -------", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ''' Utility functions to", "from stream str_stats = st1[0].stats # Initialize arrays tmp1 = np.zeros(len(st1[0].data)) tmp2 =", "velocity (km/s) Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape ``(3, 3,", "+= np.exp(1j*phase1) for tr in st2: tmp2 += tr.data hilb2 = hilbert(tr.data) phase2", "= es.eclogite_massive() elif typ=='HB': C, rho = es.harzburgite() elif typ=='SP_37': C, rho =", "Density (kg/m^3) \"\"\" # Trend and plunge of symmetry axis tr = -tr*np.pi/180.", "2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2]) Greuss = 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] -", "= np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float) for i, j, k, l in itertools.product(range(3),", "Put back into traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2", "The three angles (``alpha``, ``beta``, ``gam``) correspond to rotation about the x_2, x_3,", "we rotate about x_2 first, x_3 second and x_1 third. For trend and", "= ['atg'] >>> conf.a[:,:,:,:,0] = cc >>> conf.rho[0] = rho >>> conf.thickn[0] =", "Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (np.ndarray): cc: Elastic tensor", "an OBS case or not. :raises ExceptionError: Throws ExceptionError if not all variables", "symmetry axis. Args: a (float): P-wave velocity (km/s) b (float): S-wave velocity (km/s)", "rtr = ntr.copy() ttr = etr.copy() # Rotate to radial and transverse rtr.data,", "# Update trace header tux = update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz) tuy =", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data", "rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j] = rho else: print('\\nFlag", "on land or OBS case. if obs: # If OBS, then further pass", "Returns: (obspy.trace): tr: Trace with updated stats \"\"\" tr.stats.delta = dt tr.stats.slow =", "(obspy.stream): Obspy ``Stream`` object in cartesian coordinate system pvh (bool, optional): Whether to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "average bulk modulus (GPa) * Gvoigt (float): Voigt average shear modulus (GPa) *", "- C[1,2] + 3.*C[3,3] + \\ 3.*C[4,4] + 3.*C[5,5])/15. # Reuss averaging Kreuss", "10. >>> conf.wvtype = 'P' >>> slow = 0.06 # s/km >>> utils.calc_ttime(slow)", "trH (obspy.trace): Horizontally polarized shear (SH) wave mode \"\"\" # Copy traces trP", ">>> slow = 0.06 # s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 = 0.", "ani = []; tr = []; pl = [] # Read file line", "= update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz) tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz)", "tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy(); tfr.data", "and shear (G) moduli and density (rho) in kg/m^3 Args: K (float): Bulk", "Normalize tmp1 = tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) # Phase-weighting if pws: weight1 =", "# Run the ``plane`` module depending on land or OBS case. if obs:", "cf_f.baz = cf.baz def obs2for(): \"\"\" Passes global OBS-related variables to Fortran ``conf``", "containing: * Kvoigt (float): Voigt average bulk modulus (GPa) * Gvoigt (float): Voigt", "trxyz def get_trxyz(yx, yy, yz): \"\"\" Function to store displacement seismograms into ``obspy``", "'jade',\\ 'lws', 'lz', 'ms', 'ol', 'opx', 'plag', 'qtz', 'zo'] rocks = ['BS_f', 'BS_m',", "Args: st1 (obspy.stream): Stream 1 st2 (obspy.stream,): Stream 2 pws (bool, optional): Enables", "Args: a (float): P-wave velocity (km/s) b (float): S-wave velocity (km/s) Returns: (np.ndarray):", "= tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) # Phase-weighting if pws: weight1 = weight1/np.float(len(st1)) weight2", "def VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill average of the anisotropic stifness matrix to", "two ``Stream`` objects. Args: st1 (obspy.stream): Stream 1 st2 (obspy.stream,): Stream 2 pws", "FF/(AA - 2.*LL) # Get tensor with horizontal axis cc = es.tri_tensor(AA, CC,", "mineral/rock not implemented') return # Convert Voigt to full tensor cc = voigt2cc(C)*1.e9/rho", "trend and plunge cc = rot_tensor(cc, pl, tr, 0.) # Return tensor return", "as np import pyfftw from scipy.signal import hilbert from obspy.core import Trace, Stream", "velocity used for rotation Returns: (tuple): tuple containing: * trP (obspy.trace): Compressional (P)", "+ Greuss)/2. return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\" Calculates", "and SV components trP.data = vec[0,:] trV.data = vec[1,:] trH.data = -trT.data/2. return", "displacement seismograms to an ``obspy`` ``Stream`` object. trxyz = get_trxyz(yx, yy, yz) return", "variables through the conf module: 'dp', 'c', 'rhof'\") def model2for(): \"\"\" Passes global", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "Voigt-Reuss-Hill average shear modulus (GPa) Example ------- >>> from telewavesim import utils >>>", "\"\"\" # Extract East, North and Vertical ntr = trxyz[0] etr = trxyz[1]", "= cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0,", "uy (np.ndarray): y-component displacement seismogram uz (np.ndarray): z-component displacement seismogram Returns: (obspy.stream): trxyz:", "elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr))))", "* Vs (float): S-wave velocity (m/s) Example ------- >>> from telewavesim import utils", "average shear modulus (GPa) * Kvrh (float): Voigt-Reuss-Hill average bulk modulus (GPa) *", "seismograms for ``land`` case yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement", "the trend and plunge of the symmetry \\ axis. Args: tr (float): Trend", "three angles (alpha, beta, gamma) Args: a (np.ndarray): Elastic tensor with shape ``(3,", "in range(3): for r in range(3): for s in range(3): asum=0.0 for i", "in radians Returns: (np.ndarray): aa: Rotated tensor with shape ``(3, 3, 3, 3)``", "doctionary from an obspy ``Trace`` object. Args: tr (obspy.trace): Trace object to update", "(bool, optional): Enables Phase-Weighted Stacking Returns: (tuple): tuple containing: * stack1 (obspy.trace): Stacked", "Read file line by line and populate lists try: open(modfile) except: raise(Exception('model file", "average bulk modulus (GPa) * Gvrh (float): Voigt-Reuss-Hill average shear modulus (GPa) Example", "not all required global variables are set and throws an Exception if not.", "line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) #", "= utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> K, G = utils.VRH_average(C*rho)[4:6]", "not defined: use either \"iso\", \"tri\" or one among\\n') print(mins,rocks) print() raise(Exception()) return", "= hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2) # Normalize tmp1 =", "rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) -", "import utils >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc)", "pl, typ='atg'): \"\"\" Function to generate tensor for anisotropic minerals. The \\ tensor", "trP = trZ.copy() trV = trR.copy() trH = trT.copy() # Vertical slownesses qp", "* Greuss (float): Reuss average shear modulus (GPa) * Kvrh (float): Voigt-Reuss-Hill average", "variables are set. \"\"\" lst = [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow,", "cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH': t1", "ani): \"\"\" Function to generate tensor for transverse isotropy. The tensor is rotated", "C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (tuple): Tuple containing: * Kvoigt", "Gvrh (float): Voigt-Reuss-Hill average shear modulus (GPa) Example ------- >>> from telewavesim import", "'hbl', 'jade',\\ 'lws', 'lz', 'ms', 'ol', 'opx', 'plag', 'qtz', 'zo'] rocks = ['BS_f',", "t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1 def read_model(modfile): \"\"\" Reads model parameters", "Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation of the tensor cc (c_ijkl) about", "pl (float): Plunge angle of symmetry axis (degree) type (str, optional): Type of", "elif typ=='hbl': C, rho = es.hornblende() elif typ=='jade': C, rho = es.jadeite() elif", "to `P-SV-SH` wave mode. Args: trZ (obspy.trace): Vertical component trR (obspy.trace): Radial component", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream tfs = Stream(traces=[tfr,", "= ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80'] for j in range(cf.nlay):", "dt (float): Sampling rate slow (float): Slowness value (s/km) baz (float): Back-azimuth value", "trR (obspy.trace): Radial component trT (obspy.trace): Transverse component vp (float, optional): P-wave velocity", "and x_1 third. For trend and plunge of symmetry axis (e.g., tri_tensor): ``alpha``", "\"\"\" Rotates traces from `Z-R-T` orientation to `P-SV-SH` wave mode. Args: trZ (obspy.trace):", "\"\"\" Performs a Voigt-Reuss-Hill average of the anisotropic stifness matrix to the bulk", "C, rho = es.glaucophane() elif typ=='hbl': C, rho = es.hornblende() elif typ=='jade': C,", "(GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" a = a*1.e3 b", "tensor for transverse isotropy. The tensor is rotated using the trend and plunge", "angles (``alpha``, ``beta``, ``gam``) correspond to rotation about the x_2, x_3, x_1 axes.", "rotated using the trend and plunge of the symmetry \\ axis. Args: tr", "Stream 1 * stack2 (obspy.trace): Stacked trace for Stream 2 \"\"\" print() print('Stacking", "= 0.06 # s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 = 0. for i", "tfs: Stream containing Radial and Transverse transfer functions \"\"\" # Extract East, North", "print('\\nFlag not defined: use either \"iso\", \"tri\" or one among\\n') print(mins,rocks) print() raise(Exception())", "= rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr =", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data =", "(obspy.trace): Horizontally polarized shear (SH) wave mode \"\"\" # Copy traces trP =", "ensured through reading of the model file from the function ``utils.read_model(modfile)``, and setting", "(float): Bulk modulus (GPa) G (float): Shear modulus (GPa) rho (float): Density (kg/m^3)", "Run the ``plane`` module depending on land or OBS case. if obs: #", "= cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho) a0", "3, 3)`` alpha (float): Angle in radians beta (float): Angle in radians gam", "notice shall be included in all # copies or substantial portions of the", ">>> conf.rho = np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay)) >>> # Pass variables to", "typ=='opx': C, rho = es.orthopyroxene() elif typ=='plag': C, rho = es.plagioclase_06() elif typ=='qtz':", "yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get the Fourier transform of seismograms", "Tuple containing: * cc (np.ndarray): Elastic tensor (GPa /density)\\ (shape ``(3, 3, 3,", "= trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp =", "tuple containing: * Vp (float): P-wave velocity (m/s) * Vs (float): S-wave velocity", "-np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) # # c_ijkl ---> c_mnrs #", "object. .. note:: The ``conf`` global variables need to be set for this", "= es.eclogite_foliated() elif typ=='EC_m': C, rho = es.eclogite_massive() elif typ=='HB': C, rho =", "raise Exception(\"global variables not all set. Set all of the following variables through", "LL = (b*1.e3 + db/2.)**2 NN = (b*1.e3 - db/2.)**2 AC = (a*1.e3)**2", "voigt2cc(C) return cc def set_tri_tensor(a, b, tr, pl, ani): \"\"\" Function to generate", "m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs # Rotation matrix rot =", "of mineral/rock not implemented') return # Convert Voigt to full tensor cc =", "\"\"\" Convert the Voigt representation of the stiffness matrix to the full 3x3x3x3", "= update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz) # Append to stream trxyz = Stream(traces=[tux,", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ''' Utility", "return cc def set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function to generate tensor for anisotropic", "es.orthopyroxene() elif typ=='plag': C, rho = es.plagioclase_06() elif typ=='qtz': C, rho = es.quartz()", "transfer functions \"\"\" # Extract East, North and Vertical ntr = trxyz[0] etr", "Vertical slownesses qp = np.sqrt(1/vp/vp - cf.slow*cf.slow) qs = np.sqrt(1/vs/vs - cf.slow*cf.slow) #", "vs (float, optional): S-wave velocity used for rotation Returns: (tuple): tuple containing: *", "cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho) a0 =", "symmetry axis (degree) pl (float): Plunge angle of symmetry axis (degree) type (str,", "and plunge cc = rot_tensor(cc, pl, tr, 0.) # Return tensor return cc,", "copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED \"AS", "pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data", "Args: slow (float): Slowness value (s/km) Returns: (float): t1: Time in seconds Example", "Copy to radial and transverse rtr = ntr.copy() ttr = etr.copy() # Rotate", "from telewavesim.rmat_f import conf as cf_f from telewavesim.rmat_f import plane as pw_f def", "to Fortran conf obs2for() # Get the Fourier transform of seismograms for ``obs``case", "= trT.copy() # Vertical slownesses qp = np.sqrt(1/vp/vp - cf.slow*cf.slow) qs = np.sqrt(1/vs/vs", "component trR (obspy.trace): Radial component trT (obspy.trace): Transverse component vp (float, optional): P-wave", "modulus (GPa) Example ------- >>> from telewavesim import utils >>> cc, rho =", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft))))", "cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def wave2for():", "qp = np.sqrt(1/vp/vp - cf.slow*cf.slow) qs = np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements of", "and transverse rtr = ntr.copy() ttr = etr.copy() # Rotate to radial and", "tensor representation to the Voigt notation of the stiffness matrix. Args: cc (np.ndarray):", "tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration parameters cf.nlay = len(h) cf.thickn = h cf.rho", "Radial and Transverse transfer functions \"\"\" # Extract East, North and Vertical ntr", "3)``) \"\"\" # Trend and plunge of symmetry axis tr = -tr*np.pi/180. pl", "``beta``, ``gam``) correspond to rotation about the x_2, x_3, x_1 axes. Note that", "case or not. :raises ExceptionError: Throws ExceptionError if not all variables are set.", "pass OBS-related paramters to Fortran conf obs2for() # Get the Fourier transform of", "tmp1 += tr.data hilb1 = hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1)", "def stack_all(st1, st2, pws=False): \"\"\" Stacks all traces in two ``Stream`` objects. Args:", "rho = es.hornblende() elif typ=='jade': C, rho = es.jadeite() elif typ=='lws': C, rho", "for m in range(3): for n in range(3): for r in range(3): for", "average bulk modulus (GPa) * Greuss (float): Reuss average shear modulus (GPa) *", "r_z) # Extract P and SV components trP.data = vec[0,:] trV.data = vec[1,:]", "S-wave velocity (km/s) tr (float): Trend angle of symmetry axis (degree) pl (float):", "not all set. Set all of the following variables through the conf module:", "if pws: weight1 = weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2 =", ">>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> # Define two-layer model model", "``Stream`` object in cartesian coordinate system pvh (bool, optional): Whether to rotate from", "= cf.baz def obs2for(): \"\"\" Passes global OBS-related variables to Fortran ``conf`` module.", "the bulk modulus K and the shear modulus G. Args: C (np.ndarray): Stiffness", "(s/km) Returns: (float): t1: Time in seconds Example ------- >>> from telewavesim import", "Args: trZ (obspy.trace): Vertical component trR (obspy.trace): Radial component trT (obspy.trace): Transverse component", "= vec[1,:] trH.data = -trT.data/2. return trP, trV, trH def stack_all(st1, st2, pws=False):", "np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int') for i", "= np.linalg.inv(C) # Voigt averaging Kvoigt = (C[0,0] + C[1,1] + C[2,2] +", "cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def wave2for(): \"\"\" Passes", "Performs a rotation of the tensor cc (c_ijkl) about three angles (alpha, beta,", "deal # in the Software without restriction, including without limitation the rights #", "rot_tensor(cc, pl, tr, 0.) # Return tensor return cc, rho def full_3x3_to_Voigt_6_index(i, j):", "Returns: (float): t1: Time in seconds Example ------- >>> from telewavesim import conf", "vp=6., vs=3.5): \"\"\" Rotates traces from `Z-R-T` orientation to `P-SV-SH` wave mode. Args:", "Rotation vec = np.dot(rot, r_z) # Extract P and SV components trP.data =", "if cf.isoflg[i] == 'iso': a0 = cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else: cc =", "Check if all variables are set. If not, throw an Exception and stop", "np.cos(gam)*np.cos(alpha) # # c_ijkl ---> c_mnrs # for m in range(3): for n", "cf.rhof] check = [f is None for f in lst] if sum(check)/len(check)>0.: raise", "cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] =", "in mins or fl[j] in rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc", ">>> conf.thickn = np.zeros((conf.nlay)) >>> # Pass variables to the `conf` module >>>", "[f is None for f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not", "= cf.rhof def run_plane(obs=False): \"\"\" Function to run the ``plane`` module and return", "ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration parameters cf.nlay = len(h) cf.thickn = h", "2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt = (C[0,0] + C[1,1] + C[2,2] -", "modulus (GPa) rho (float): Density (kg/m^3) Returns: (tuple): tuple containing: * Vp (float):", "(obspy.trace): Trace object to update nt (int): Number of samples dt (float): Sampling", "\"\"\" Vp = np.sqrt((K + 4.*G/3.)/rho) Vs = np.sqrt(G/rho) return Vp, Vs def", "update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz) # Append to stream trxyz = Stream(traces=[tux, tuy,", "slow, baz): \"\"\" Updates the ``stats`` doctionary from an obspy ``Trace`` object. Args:", "elif typ=='qtz': C, rho = es.quartz() elif typ=='zo': C, rho = es.zoisite() #", "tensor is rotated using the trend and plunge of the symmetry \\ axis.", "containing Radial and Transverse transfer functions \"\"\" # Extract East, North and Vertical", "full tensor cc = voigt2cc(C)*1.e9/rho # Rotate tensor using trend and plunge cc", "`Z-R-T` orientation to `P-SV-SH` wave mode. Args: trZ (obspy.trace): Vertical component trR (obspy.trace):", "cf.slow cf_f.baz = cf.baz def obs2for(): \"\"\" Passes global OBS-related variables to Fortran", "depending on land or OBS case. if obs: # If OBS, then further", "update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz) tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz) tuz", "pl, tr, 0.) # Return tensor return cc, rho def full_3x3_to_Voigt_6_index(i, j): \"\"\"", "Stream 2 pws (bool, optional): Enables Phase-Weighted Stacking Returns: (tuple): tuple containing: *", "(float): P-wave velocity (km/s) b (float): S-wave velocity (km/s) tr (float): Trend angle", "np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1) for tr in st2: tmp2 += tr.data hilb2", "in fileobj: if not line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4])", "'lws', 'lz', 'ms', 'ol', 'opx', 'plag', 'qtz', 'zo'] rocks = ['BS_f', 'BS_m', 'EC_f',", "the conf module: 'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'\") if obs:", "(obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" # Check if all variables", "tfr.data = np.zeros(len(tfr.data)) tft = trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh", "variables shared between all other modules \"\"\" h = []; r = [];", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "the analysis is done for an OBS case or not. :raises ExceptionError: Throws", "to whom the Software is # furnished to do so, subject to the", "trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\" Function to generate transfer functions from displacement traces.", "tensor using trend and plunge cc = rot_tensor(cc, pl, tr, 0.) # Return", "Rotate tensor using trend and plunge cc = rot_tensor(cc, pl, tr, 0.) #", "be included in all # copies or substantial portions of the Software. #", "trT, vp=6., vs=3.5): \"\"\" Rotates traces from `Z-R-T` orientation to `P-SV-SH` wave mode.", "NN = (b*1.e3 - db/2.)**2 AC = (a*1.e3)**2 FF = -LL + np.sqrt((2.*AC)**2", "tensor (GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" # Trend and", "average of the anisotropic stifness matrix to the bulk modulus K and the", "Copy stats from stream str_stats = st1[0].stats # Initialize arrays tmp1 = np.zeros(len(st1[0].data))", "= trxyz[0] etr = trxyz[1] ztr = trxyz[2] baz = cf.baz # Copy", "lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set. Set all of the", "(GPa /density)\\ (shape ``(3, 3, 3, 3)``) * rho (float): Density (kg/m^3) \"\"\"", "tft = trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp", "module: 'dp', 'c', 'rhof'\") def model2for(): \"\"\" Passes global model variables to Fortran", "cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream", "the anisotropic stifness matrix to the bulk modulus K and the shear modulus", "C, rho = es.harzburgite() elif typ=='SP_37': C, rho = es.serpentinite_37() elif typ=='SP_80': C,", "OBS-related paramters to Fortran conf obs2for() # Get the Fourier transform of seismograms", "S wave velocities from given bulk (K) and shear (G) moduli and density", "to interact with ``telewavesim`` modules. ''' import sys import itertools import numpy as", "OTHER DEALINGS IN THE # SOFTWARE. ''' Utility functions to interact with ``telewavesim``", "notation for indices \"\"\" if i == j: return i return 6-i-j def", "-tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Get tensor with horizontal axis #", "global variables need to be set for this calculation to succeed. This function", "further pass OBS-related paramters to Fortran conf obs2for() # Get the Fourier transform", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "variables to Fortran ``conf`` module. Returns: None Variables to pass are ``a``, ``rho``,", "Exception(\"global variables not all set for OBS case. Set all of the following", "# furnished to do so, subject to the following conditions: # The above", "np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha)", "(a*1.e3 + da/2.)**2 LL = (b*1.e3 + db/2.)**2 NN = (b*1.e3 - db/2.)**2", "(km/s) tr (float): Trend angle of symmetry axis (degree) pl (float): Plunge angle", "telewavesim.rmat_f import plane as pw_f def set_iso_tensor(a, b): \"\"\" Function to generate tensor", "Stacked trace for Stream 1 * stack2 (obspy.trace): Stacked trace for Stream 2", "yy, yz) return trxyz def get_trxyz(yx, yy, yz): \"\"\" Function to store displacement", "C[2,2] + 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt = (C[0,0] + C[1,1] +", "import rotate_ne_rt from telewavesim import conf as cf from telewavesim import elast as", "(GPa) * Greuss (float): Reuss average shear modulus (GPa) * Kvrh (float): Voigt-Reuss-Hill", "S[2,2] + 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2]) Greuss = 15./(4.*S[0,0] + 4.*S[1,1] +", "tensor cc = voigt2cc(C) return cc def set_tri_tensor(a, b, tr, pl, ani): \"\"\"", "es.garnet() elif typ=='gln': C, rho = es.glaucophane() elif typ=='hbl': C, rho = es.hornblende()", "optional): Enables Phase-Weighted Stacking Returns: (tuple): tuple containing: * stack1 (obspy.trace): Stacked trace", "``(3, 3, 3, 3)``) \"\"\" # Trend and plunge of symmetry axis tr", "Vp = np.sqrt((K + 4.*G/3.)/rho) Vs = np.sqrt(G/rho) return Vp, Vs def rot_tensor(a,alpha,beta,gam):", "- \\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) #", "else: print('\\nFlag not defined: use either \"iso\", \"tri\" or one among\\n') print(mins,rocks) print()", "- pl)*np.pi/180. # Get tensor with horizontal axis # Minerals if typ=='atg': C,", "into traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def calc_ttime(slow):", "succeed. This function first checks to make sure the variables are all set", "if not line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6]))", "elif typ=='lws': C, rho = es.lawsonite() elif typ=='lz': C, rho = es.lizardite() elif", "full 3x3x3x3 tensor representation to the Voigt notation of the stiffness matrix. Args:", "matrix (shape ``(6, 6)``) Returns: (tuple): Tuple containing: * Kvoigt (float): Voigt average", "variables not all set for OBS case. Set all of the following variables", "pl, tr, 0.) # Return tensor return cc def set_aniso_tensor(tr, pl, typ='atg'): \"\"\"", "cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data", "\"\"\" Performs a rotation of the tensor cc (c_ijkl) about three angles (alpha,", "``(3, 3, 3, 3)``) \"\"\" C = np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float) for", "Kvoigt (float): Voigt average bulk modulus (GPa) * Gvoigt (float): Voigt average shear", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "or substantial portions of the Software. # THE SOFTWARE IS PROVIDED \"AS IS\",", "------- >>> from telewavesim import utils >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg')", ".. note:: The three angles (``alpha``, ``beta``, ``gam``) correspond to rotation about the", "print() raise(Exception()) return def check_cf(obs=False): \"\"\" Checks whether or not all required global", "def update_stats(tr, nt, dt, slow, baz): \"\"\" Updates the ``stats`` doctionary from an", "(Kvoigt + Kreuss)/2. Gvrh = (Gvoigt + Greuss)/2. return Kvoigt, Gvoigt, Kreuss, Greuss,", "weight2 += np.exp(1j*phase2) # Normalize tmp1 = tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) # Phase-weighting", "cf.dt, cf.nt, cf.slow, cf.baz] check = [f is None for f in lst]", "sell # copies of the Software, and to permit persons to whom the", "about three angles (alpha, beta, gamma) Args: a (np.ndarray): Elastic tensor with shape", "for this calculation to succeed. This function first checks to make sure the", "(obspy.trace): Transverse component vp (float, optional): P-wave velocity used for rotation vs (float,", "Returns: None Variables to pass are ``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp = cf.dp", "global variables need to be set for this calculation to succeed. This is", "elif typ=='cpx': C, rho = es.clinopyroxene_92() elif typ=='dol': C, rho = es.dolomite() elif", "Voigt-Reuss-Hill average of the anisotropic stifness matrix to the bulk modulus K and", "np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay)) >>> # Pass variables", "dtype='c')) # Transfer displacement seismograms to an ``obspy`` ``Stream`` object. trxyz = get_trxyz(yx,", "\\ 3.*C[4,4] + 3.*C[5,5])/15. # Reuss averaging Kreuss = 1./(S[0,0] + S[1,1] +", "(SH) wave mode \"\"\" # Copy traces trP = trZ.copy() trV = trR.copy()", "the shear modulus G. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns:", "calculation to succeed. This function first checks to make sure the variables are", "# # c_ijkl ---> c_mnrs # for m in range(3): for n in", "to be set for this calculation to succeed. This is typically ensured through", "note:: The ``conf`` global variables need to be set for this calculation to", "Returns: None: Parameters are now global variables shared between all other modules \"\"\"", "Vertical r_z = np.array([trR.data,trZ.data]) # Rotation vec = np.dot(rot, r_z) # Extract P", "3)``) * rho (float): Density (kg/m^3) \"\"\" # Trend and plunge of symmetry", "+ 2.*C[1,2])/9. Gvoigt = (C[0,0] + C[1,1] + C[2,2] - C[0,1] - C[0,2]", "set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif fl[j] in mins or fl[j] in rocks: cc,", "(75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" # Compliance matrix S = np.linalg.inv(C)", "is None for f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all", "in st2: tmp2 += tr.data hilb2 = hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2", "vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH = rotate_zrt_pvh(ztr, rtr, ttr,", "this calculation. .. note:: The ``conf`` global variables need to be set for", "(bool, optional): Whether or not the analysis is done for an OBS stations", "trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data)", "range(3): for j in range(3): for k in range(3): for l in range(3):", "Stream containing 3-component displacement seismograms \"\"\" # Check if all variables are set.", "shear modulus G. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (tuple):", "Greuss = 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] - \\", "domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in", "utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp = np.sqrt((K + 4.*G/3.)/rho) Vs =", "OR OTHER DEALINGS IN THE # SOFTWARE. ''' Utility functions to interact with", "def tf_from_xyz(trxyz, pvh=False): \"\"\" Function to generate transfer functions from displacement traces. Args:", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "stack1 (obspy.trace): Stacked trace for Stream 1 * stack2 (obspy.trace): Stacked trace for", "this permission notice shall be included in all # copies or substantial portions", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ''' Utility functions", "3.*C[5,5])/15. # Reuss averaging Kreuss = 1./(S[0,0] + S[1,1] + S[2,2] + 2.*S[0,1]", "2.*S[0,2] + 2.*S[1,2]) Greuss = 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] -", "elif typ=='ms': C, rho = es.muscovite() elif typ=='ol': C, rho = es.olivine() elif", "return def check_cf(obs=False): \"\"\" Checks whether or not all required global variables are", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "= (a*1.e3)*ani/100. db = (b*1.e3)*ani/100. # Set up matrix elements AA = (a*1.e3", "= cf.slow*vs*vs/vp m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22", "j in range(3): for k in range(3): for l in range(3): rr =", "files (the \"Software\"), to deal # in the Software without restriction, including without", "Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" # Get displacements in", "# for m in range(3): for n in range(3): for r in range(3):", "cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz] check = [f is None for", "(float): Reuss average shear modulus (GPa) * Kvrh (float): Voigt-Reuss-Hill average bulk modulus", "``land`` case yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement seismograms to", "(float): Angle in radians gam (float): Angle in radians Returns: (np.ndarray): aa: Rotated", "-LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC + 2.*LL) + (AA + LL)*(CC", "cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt', 'cpx', 'dol', 'ep',", "through the conf module: 'dp', 'c', 'rhof'\") def model2for(): \"\"\" Passes global model", "seismograms into ``obspy`` ``Trace`` obsjects and then an ``obspy`` ``Stream`` object. Args: ux", "Elastic tensor (GPa /density)\\ (shape ``(3, 3, 3, 3)``) * rho (float): Density", "Whether to rotate from Z-R-T coordinate system to P-SV-SH wave mode Returns: (obspy.stream):", "+ 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt = (C[0,0] + C[1,1] + C[2,2] - C[0,1]", "tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data =", "of the symmetry axis. Args: a (float): P-wave velocity (km/s) b (float): S-wave", "typ=='dol': C, rho = es.dolomite() elif typ=='ep': C, rho = es.epidote() elif typ=='grt':", "= np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int') for i in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i]", "def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of tensor to Voigt notation for indices \"\"\"", "tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft = ttr.copy();", "tuple containing: * trP (obspy.trace): Compressional (P) wave mode * trV (obspy.trace): Vertically", "(GPa) * Kvrh (float): Voigt-Reuss-Hill average bulk modulus (GPa) * Gvrh (float): Voigt-Reuss-Hill", "['atg', 'bt', 'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\\ 'lws', 'lz', 'ms', 'ol',", "tft = ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz", "the trend and plunge of the symmetry axis. Args: a (float): P-wave velocity", "+ 2.*S[0,2] + 2.*S[1,2]) Greuss = 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1]", "typ=='lws': C, rho = es.lawsonite() elif typ=='lz': C, rho = es.lizardite() elif typ=='ms':", "(GPa) rho (float): Density (kg/m^3) Returns: (tuple): tuple containing: * Vp (float): P-wave", "object. trxyz = get_trxyz(yx, yy, yz) return trxyz def get_trxyz(yx, yy, yz): \"\"\"", "typ=='ep': C, rho = es.epidote() elif typ=='grt': C, rho = es.garnet() elif typ=='gln':", "tensor representation. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (np.ndarray): cc:", "= np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1) for tr in st2: tmp2 += tr.data", "angles (alpha, beta, gamma) Args: a (np.ndarray): Elastic tensor with shape ``(3, 3,", "parameters from file that are passed through the configuration module ``conf``. Returns: None:", "of the anisotropic stifness matrix to the bulk modulus K and the shear", "rotate_ne_rt(ntr.data, etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data) if pvh: vp", "to generate tensor for isotropic material. Args: a (float): P-wave velocity (km/s) b", "trend and plunge of the symmetry \\ axis. Args: tr (float): Trend angle", "Stacked trace for Stream 2 \"\"\" print() print('Stacking ALL traces in streams') #", "Note that the sequence of the rotation is important: (AB ~= BA). In", "ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data)", "* trH (obspy.trace): Horizontally polarized shear (SH) wave mode \"\"\" # Copy traces", "cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi", "set for this calculation to succeed. This is typically ensured through reading of", "rho = es.olivine() elif typ=='opx': C, rho = es.orthopyroxene() elif typ=='plag': C, rho", "Time in seconds Example ------- >>> from telewavesim import conf >>> from telewavesim", "Function to run the ``plane`` module and return 3-component seismograms as an ``obspy``", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "axis tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Get tensor with", "elif typ=='jade': C, rho = es.jadeite() elif typ=='lws': C, rho = es.lawsonite() elif", "rho = es.plagioclase_06() elif typ=='qtz': C, rho = es.quartz() elif typ=='zo': C, rho", "Voigt_notation = [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0,", "- da/2.)**2 CC = (a*1.e3 + da/2.)**2 LL = (b*1.e3 + db/2.)**2 NN", "c_ijkl ---> c_mnrs # for m in range(3): for n in range(3): for", "this calculation to succeed. This is typically ensured through reading of the model", "etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data) if pvh: vp =", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr", "copyright notice and this permission notice shall be included in all # copies", "== j: return i return 6-i-j def voigt2cc(C): \"\"\" Convert the Voigt representation", "\"\"\" if i == j: return i return 6-i-j def voigt2cc(C): \"\"\" Convert", "n = Voigt_notation[j] C[i,j] = cc[k,l,m,n] return C def VRH_average(C): \"\"\" Performs a", "elif typ=='gln': C, rho = es.glaucophane() elif typ=='hbl': C, rho = es.hornblende() elif", "hilb2.real) weight2 += np.exp(1j*phase2) # Normalize tmp1 = tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) #", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1 def read_model(modfile): \"\"\" Reads model parameters from file", "es.lawsonite() elif typ=='lz': C, rho = es.lizardite() elif typ=='ms': C, rho = es.muscovite()", "``plane`` module depending on land or OBS case. if obs: # If OBS,", "3)``) \"\"\" a = a*1.e3 b = b*1.e3 C = es.iso_tensor(a, b) #", "np.zeros((3,3,3,3), dtype=float) for i, j, k, l in itertools.product(range(3), range(3), range(3), range(3)): Voigt_i", "rho = es.serpentinite_37() elif typ=='SP_80': C, rho = es.serpentinite_80() elif typ=='LHZ': C, rho", "analysis is done for an OBS case or not. :raises ExceptionError: Throws ExceptionError", "voigt2cc(C)*1.e9/rho # Rotate tensor using trend and plunge cc = rot_tensor(cc, pl, tr,", "full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i, j, k, l] = C[Voigt_i, Voigt_j]", "OBS case. Set all of the following variables through the conf module: 'dp',", "rotate about x_2 first, x_3 second and x_1 third. For trend and plunge", "Returns: None Variables to pass are ``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx =", "t1 = 0. for i in range(cf.nlay-1): if cf.isoflg[i] == 'iso': a0 =", "1), (2, 2), (1, 2), (0, 2), (0, 1)] tol = 1e-3 cc", "= (C[0,0] + C[1,1] + C[2,2] - C[0,1] - C[0,2] - C[1,2] +", "in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso':", "pws (bool, optional): Enables Phase-Weighted Stacking Returns: (tuple): tuple containing: * stack1 (obspy.trace):", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "Trace with updated stats \"\"\" tr.stats.delta = dt tr.stats.slow = slow tr.stats.baz =", "tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH':", "ztr = trxyz[2] baz = cf.baz # Copy to radial and transverse rtr", "+ da/2.)**2 LL = (b*1.e3 + db/2.)**2 NN = (b*1.e3 - db/2.)**2 AC", "= (b*1.e3)*ani/100. # Set up matrix elements AA = (a*1.e3 - da/2.)**2 CC", "rot = np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2]", "\"\"\" nlaymx = cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx))", "global model variables to Fortran ``conf`` module. Returns: None Variables to pass are", "r_z = np.array([trR.data,trZ.data]) # Rotation vec = np.dot(rot, r_z) # Extract P and", "(shape ``(6, 6)``) Returns: (tuple): Tuple containing: * Kvoigt (float): Voigt average bulk", "required global variables are set and throws an Exception if not. Args: obs", "\"\"\" Passes global OBS-related variables to Fortran ``conf`` module. Returns: None Variables to", "part of Telewavesim. # Permission is hereby granted, free of charge, to any", "# Compliance matrix S = np.linalg.inv(C) # Voigt averaging Kvoigt = (C[0,0] +", "Greuss, Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\" Calculates the isotropic P and S wave", "in streams') # Copy stats from stream str_stats = st1[0].stats # Initialize arrays", "a (float): P-wave velocity (km/s) b (float): S-wave velocity (km/s) tr (float): Trend", "- db/2.)**2 AC = (a*1.e3)**2 FF = -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA +", "be set for this calculation to succeed. This function first checks to make", "3832.0771334254896) \"\"\" Vp = np.sqrt((K + 4.*G/3.)/rho) Vs = np.sqrt(G/rho) return Vp, Vs", "yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement seismograms to an ``obspy``", "'iso': a0 = cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho =", "'ol', 'opx', 'plag', 'qtz', 'zo'] rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ',", "an OBS stations Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" #", "m22]]) # Vector of Radial and Vertical r_z = np.array([trR.data,trZ.data]) # Rotation vec", "set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif", "= pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp))))", "Returns: (tuple): Tuple containing: * cc (np.ndarray): Elastic tensor (GPa /density)\\ (shape ``(3,", "ux (np.ndarray): x-component displacement seismogram uy (np.ndarray): y-component displacement seismogram uz (np.ndarray): z-component", "symmetry axis (degree) pl (float): Plunge angle of symmetry axis (degree) ani (float):", "raise Exception(\"global variables not all set for OBS case. Set all of the", "(np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\"", "Extract East, North and Vertical ntr = trxyz[0] etr = trxyz[1] ztr =", "Kvoigt = (C[0,0] + C[1,1] + C[2,2] + 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9.", "= np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] =", "K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho) a0 = a0**2 b0 = b0**2", "stiffness matrix to the full 3x3x3x3 tensor representation. Args: C (np.ndarray): Stiffness matrix", "elif typ=='SP_37': C, rho = es.serpentinite_37() elif typ=='SP_80': C, rho = es.serpentinite_80() elif", "Vs = np.sqrt(G/rho) return Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation of", "[cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz] check = [f is None", "(GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" # Trend and plunge", "cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt',", "set. Set all of the following variables through the conf module: 'a', 'rho',", "are now global variables shared between all other modules \"\"\" h = [];", "to the following conditions: # The above copyright notice and this permission notice", "print('type of mineral/rock not implemented') return # Convert Voigt to full tensor cc", "Args: K (float): Bulk modulus (GPa) G (float): Shear modulus (GPa) rho (float):", "2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay))", "through model. The bottom layer is irrelevant in this calculation. .. note:: The", "tensor with horizontal axis # Minerals if typ=='atg': C, rho = es.antigorite() elif", "irrelevant in this calculation. .. note:: The ``conf`` global variables need to be", "Conversion of tensor to Voigt notation for indices \"\"\" if i == j:", "(obspy.trace): Vertically polarized shear (SV) wave mode * trH (obspy.trace): Horizontally polarized shear", "= np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) #", "(float): S-wave velocity (m/s) Example ------- >>> from telewavesim import utils >>> cc,", "are used obs (bool, optional): Whether or not the analysis is done for", "range(3): for n in range(3): for r in range(3): for s in range(3):", "# Percent anisotropy da = (a*1.e3)*ani/100. db = (b*1.e3)*ani/100. # Set up matrix", "Update trace header tux = update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz) tuy = update_stats(tuy,", "stop check_cf(obs) # Pass variables to Fortran conf model2for() wave2for() # Run the", "displacements in time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz))", "# Voigt averaging Kvoigt = (C[0,0] + C[1,1] + C[2,2] + 2.*C[0,1] +", "2), (1, 2), (0, 2), (0, 1)] tol = 1e-3 cc = np.asarray(cc)", "component vp (float, optional): P-wave velocity used for rotation vs (float, optional): S-wave", "(float): S-wave velocity (km/s) tr (float): Trend angle of symmetry axis (degree) pl", "matrix (shape ``(6, 6)``) Returns: (np.ndarray): cc: Elastic tensor (shape ``(3, 3, 3,", "= (C[0,0] + C[1,1] + C[2,2] + 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt", "pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer function tfr.data", "tfs = Stream(traces=[tfr, tft]) # Return stream return tfs def update_stats(tr, nt, dt,", "tmp2 = tmp2/np.float(len(st2)) # Phase-weighting if pws: weight1 = weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2))", "AC = (a*1.e3)**2 FF = -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC +", "variables are all set before executing the main ``telewavesim.rmat_f.plane_****`` function. Args: fortran (book,", "* trV (obspy.trace): Vertically polarized shear (SV) wave mode * trH (obspy.trace): Horizontally", "cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int')", "check_cf(obs) # Pass variables to Fortran conf model2for() wave2for() # Run the ``plane``", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "numpy as np import pyfftw from scipy.signal import hilbert from obspy.core import Trace,", "optional): Type of elastic material Returns: (tuple): Tuple containing: * cc (np.ndarray): Elastic", "3, 3)`` .. note:: The three angles (``alpha``, ``beta``, ``gam``) correspond to rotation", "= b*1.e3 C = es.iso_tensor(a, b) # Convert Voigt to full tensor cc", "first checks to make sure the variables are all set before executing the", "# Phase-weighting if pws: weight1 = weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1 = np.real(abs(weight1))", "notation of the stiffness matrix. Args: cc (np.ndarray): Elastic tensor (shape ``(3, 3,", "(P) wave mode * trV (obspy.trace): Vertically polarized shear (SV) wave mode *", "from telewavesim import utils >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C", "cc = np.asarray(cc) C = np.zeros((6,6)) for i in range(6): for j in", "seismograms \"\"\" # Get displacements in time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy =", "cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins", "(float): Voigt average shear modulus (GPa) * Kreuss (float): Reuss average bulk modulus", "``utils.read_model(modfile)``, and setting the variable ``conf.wvtype`` Args: slow (float): Slowness value (s/km) Returns:", "= (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs # Rotation matrix rot = np.array([[-m11,", "Args: a (np.ndarray): Elastic tensor with shape ``(3, 3, 3, 3)`` alpha (float):", "Gvoigt = (C[0,0] + C[1,1] + C[2,2] - C[0,1] - C[0,2] - C[1,2]", "return # Convert Voigt to full tensor cc = voigt2cc(C)*1.e9/rho # Rotate tensor", "[]; b = []; fl = []; ani = []; tr = [];", "The \\ tensor is rotated using the trend and plunge of the symmetry", "Gvrh = (Gvoigt + Greuss)/2. return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh def", "j, k, l] = C[Voigt_i, Voigt_j] return cc def cc2voigt(cc): \"\"\" Convert from", "import conf >>> from telewavesim import utils >>> import numpy as np >>>", "to P-SV-SH wave mode Returns: (obspy.stream): tfs: Stream containing Radial and Transverse transfer", "defined: use either \"iso\", \"tri\" or one among\\n') print(mins,rocks) print() raise(Exception()) return def", "an Exception if not. Args: obs (bool, optional): Whether the analysis is done", "pass are ``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt = cf.dt cf_f.slow = cf.slow cf_f.baz", "(a*1.e3 - da/2.)**2 CC = (a*1.e3 + da/2.)**2 LL = (b*1.e3 + db/2.)**2", "correspond to rotation about the x_2, x_3, x_1 axes. Note that the sequence", "tuple containing: * stack1 (obspy.trace): Stacked trace for Stream 1 * stack2 (obspy.trace):", "telewavesim import utils >>> import numpy as np >>> cc, rho = utils.set_aniso_tensor(0.,", "global OBS-related variables to Fortran ``conf`` module. Returns: None Variables to pass are", "from the function ``utils.read_model(modfile)``, and setting the variable ``conf.wvtype`` Args: slow (float): Slowness", "(int): Number of samples dt (float): Sampling rate slow (float): Slowness value (s/km)", "[] # Read file line by line and populate lists try: open(modfile) except:", "rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum + rr*a[i,j,k,l] aa[m,n,r,s] = asum return aa def rotate_zrt_pvh(trZ,", "cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz] check = [f is None for f in", "Function to store displacement seismograms into ``obspy`` ``Trace`` obsjects and then an ``obspy``", "elif typ=='SP_80': C, rho = es.serpentinite_80() elif typ=='LHZ': C, rho = es.lherzolite() else:", "cf.rho = r cf.isoflg = fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals", "Voigt notation of the stiffness matrix. Args: cc (np.ndarray): Elastic tensor (shape ``(3,", "anisotropic stifness matrix to the bulk modulus K and the shear modulus G.", "cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1 def", "matrix elements AA = (a*1.e3 - da/2.)**2 CC = (a*1.e3 + da/2.)**2 LL", "i in range(6): for j in range(6): k, l = Voigt_notation[i] m, n", "``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx = cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho =", "cc: Elastic tensor (GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" #", "`P-SV-SH` wave mode. Args: trZ (obspy.trace): Vertical component trR (obspy.trace): Radial component trT", "header tux = update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz) tuy = update_stats(tuy, cf.nt, cf.dt,", "C, rho = es.epidote() elif typ=='grt': C, rho = es.garnet() elif typ=='gln': C,", "Kreuss)/2. Gvrh = (Gvoigt + Greuss)/2. return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh", "['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80'] for j in range(cf.nlay): if", "cf.baz # Copy to radial and transverse rtr = ntr.copy() ttr = etr.copy()", "pl, ani): \"\"\" Function to generate tensor for transverse isotropy. The tensor is", "Stream 2 \"\"\" print() print('Stacking ALL traces in streams') # Copy stats from", "Slowness value (s/km) Returns: (float): t1: Time in seconds Example ------- >>> from", "cc: Elastic tensor (shape ``(3, 3, 3, 3)``) \"\"\" C = np.asarray(C) cc", "nt (int): Number of samples dt (float): Sampling rate slow (float): Slowness value", "mode * trV (obspy.trace): Vertically polarized shear (SV) wave mode * trH (obspy.trace):", "configuration module ``conf``. Returns: None: Parameters are now global variables shared between all", "is # furnished to do so, subject to the following conditions: # The", "x_1 third. For trend and plunge of symmetry axis (e.g., tri_tensor): ``alpha`` =", "cf_f.dp = cf.dp cf_f.c = cf.c cf_f.rhof = cf.rhof def run_plane(obs=False): \"\"\" Function", "in all # copies or substantial portions of the Software. # THE SOFTWARE", "stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def calc_ttime(slow): \"\"\" Calculates", "pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif", "function. Args: fortran (book, option): Whether or not the Fortran modules are used", "modules. ''' import sys import itertools import numpy as np import pyfftw from", "= a*1.e3 b = b*1.e3 C = es.iso_tensor(a, b) # Convert Voigt to", "Phase-weighting if pws: weight1 = weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2", "cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557,", "da/2.)**2 LL = (b*1.e3 + db/2.)**2 NN = (b*1.e3 - db/2.)**2 AC =", "object. Args: ux (np.ndarray): x-component displacement seismogram uy (np.ndarray): y-component displacement seismogram uz", "Get tensor with horizontal axis # Minerals if typ=='atg': C, rho = es.antigorite()", "as es from telewavesim.rmat_f import conf as cf_f from telewavesim.rmat_f import plane as", ">>> conf.a[:,:,:,:,0] = cc >>> conf.rho[0] = rho >>> conf.thickn[0] = 10. >>>", "Trace object to update nt (int): Number of samples dt (float): Sampling rate", "pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get the Fourier transform of seismograms for ``land`` case", "Software is # furnished to do so, subject to the following conditions: #", "np.sqrt((K + 4.*G/3.)/rho) Vs = np.sqrt(G/rho) return Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs", "time through model. The bottom layer is irrelevant in this calculation. .. note::", "not. :raises ExceptionError: Throws ExceptionError if not all variables are set. \"\"\" lst", "np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg',", "a = pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data) if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs", "(shape ``(3, 3, 3, 3)``) \"\"\" a = a*1.e3 b = b*1.e3 C", "C, rho = es.orthopyroxene() elif typ=='plag': C, rho = es.plagioclase_06() elif typ=='qtz': C,", "``(6, 6)``) Returns: (tuple): Tuple containing: * Kvoigt (float): Voigt average bulk modulus", "= trR.copy() trH = trT.copy() # Vertical slownesses qp = np.sqrt(1/vp/vp - cf.slow*cf.slow)", "+ LL)) # eta = FF/(AA - 2.*LL) # Get tensor with horizontal", "axes. Note that the sequence of the rotation is important: (AB ~= BA).", "+= cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0", "Whether or not the analysis is done for an OBS stations Returns: (obspy.stream):", "plunge cc = rot_tensor(cc, pl, tr, 0.) # Return tensor return cc, rho", "(K) and shear (G) moduli and density (rho) in kg/m^3 Args: K (float):", "Minerals if typ=='atg': C, rho = es.antigorite() elif typ=='bt': C, rho = es.biotite()", "Variables to pass are ``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt = cf.dt cf_f.slow =", "seismograms for ``obs``case yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get the", "return aa def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5): \"\"\" Rotates traces from `Z-R-T`", "6-i-j def voigt2cc(C): \"\"\" Convert the Voigt representation of the stiffness matrix to", "trxyz[0] etr = trxyz[1] ztr = trxyz[2] baz = cf.baz # Copy to", "- \\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0]", "of symmetry axis (e.g., tri_tensor): ``alpha`` = plunge ``beta`` = trend \"\"\" rot", "(SV) wave mode * trH (obspy.trace): Horizontally polarized shear (SH) wave mode \"\"\"", "# Rocks elif typ=='BS_f': C, rho = es.blueschist_felsic() elif typ=='BS_m': C, rho =", "= []; b = []; fl = []; ani = []; tr =", "import hilbert from obspy.core import Trace, Stream from obspy.signal.rotate import rotate_ne_rt from telewavesim", "3.*C[3,3] + \\ 3.*C[4,4] + 3.*C[5,5])/15. # Reuss averaging Kreuss = 1./(S[0,0] +", "Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\" Calculates the isotropic P", "cf.nlay = len(h) cf.thickn = h cf.rho = r cf.isoflg = fl cf.a", "from given bulk (K) and shear (G) moduli and density (rho) in kg/m^3", "of symmetry axis tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Percent", "es.muscovite() elif typ=='ol': C, rho = es.olivine() elif typ=='opx': C, rho = es.orthopyroxene()", "+ LL)*(CC + LL)) # eta = FF/(AA - 2.*LL) # Get tensor", "utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" # Compliance matrix S =", "obs2for() # Get the Fourier transform of seismograms for ``obs``case yx, yy, yz", "es.clinopyroxene_92() elif typ=='dol': C, rho = es.dolomite() elif typ=='ep': C, rho = es.epidote()", "G (float): Shear modulus (GPa) rho (float): Density (kg/m^3) Returns: (tuple): tuple containing:", "# Get the Fourier transform of seismograms for ``land`` case yx, yy, yz", "range(3), range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i, j,", "passed through the configuration module ``conf``. Returns: None: Parameters are now global variables", "tr.data hilb1 = hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1) for tr", "rho = es.blueschist_felsic() elif typ=='BS_m': C, rho = es.blueschist_mafic() elif typ=='EC_f': C, rho", "the stiffness matrix. Args: cc (np.ndarray): Elastic tensor (shape ``(3, 3, 3, 3)``)", "symmetry axis (degree) ani (float): Percent anisotropy Returns: (np.ndarray): cc: Elastic tensor (GPa", "Example ------- >>> from telewavesim import conf >>> from telewavesim import utils >>>", "yy, yz): \"\"\" Function to store displacement seismograms into ``obspy`` ``Trace`` obsjects and", "3, 3, 3)``) \"\"\" C = np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float) for i,", "permission notice shall be included in all # copies or substantial portions of", "C, rho = es.blueschist_felsic() elif typ=='BS_m': C, rho = es.blueschist_mafic() elif typ=='EC_f': C,", "the variables are all set before executing the main ``telewavesim.rmat_f.plane_****`` function. Args: fortran", "# Voigt-Reuss-Hill average Kvrh = (Kvoigt + Kreuss)/2. Gvrh = (Gvoigt + Greuss)/2.", "\"\"\" # Get displacements in time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy))", "typ=='EC_f': C, rho = es.eclogite_foliated() elif typ=='EC_m': C, rho = es.eclogite_massive() elif typ=='HB':", "(obspy.trace): tr: Trace with updated stats \"\"\" tr.stats.delta = dt tr.stats.slow = slow", "(alpha, beta, gamma) Args: a (np.ndarray): Elastic tensor with shape ``(3, 3, 3,", "stream trxyz = Stream(traces=[tux, tuy, tuz]) return trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\" Function", "horizontal axis cc = es.tri_tensor(AA, CC, FF, LL, NN) # Rotate tensor using", "aa[m,n,r,s] = asum return aa def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5): \"\"\" Rotates", "= (a*1.e3)**2 FF = -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC + 2.*LL)", "rho = cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho) a0 = a0**2", "in cartesian coordinate system pvh (bool, optional): Whether to rotate from Z-R-T coordinate", "= []; pl = [] # Read file line by line and populate", "Function to generate tensor for transverse isotropy. The tensor is rotated using the", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "P-wave velocity (km/s) b (float): S-wave velocity (km/s) Returns: (np.ndarray): cc: Elastic tensor", "mod2vel(K,G,rho): \"\"\" Calculates the isotropic P and S wave velocities from given bulk", "shape ``(3, 3, 3, 3)`` alpha (float): Angle in radians beta (float): Angle", "obs: lst = [cf.dp, cf.c, cf.rhof] check = [f is None for f", "cf.dt, cf.slow, cf.baz) tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz) tuz = update_stats(tuz,", "dtype=float) for i, j, k, l in itertools.product(range(3), range(3), range(3), range(3)): Voigt_i =", "Fourier transform of seismograms for ``land`` case yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c'))", "\"\"\" # Compliance matrix S = np.linalg.inv(C) # Voigt averaging Kvoigt = (C[0,0]", "reading of the model file from the function ``utils.read_model(modfile)``, and setting the variable", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "important: (AB ~= BA). In this case we rotate about x_2 first, x_3", "if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH = rotate_zrt_pvh(ztr,", "S = np.linalg.inv(C) # Voigt averaging Kvoigt = (C[0,0] + C[1,1] + C[2,2]", "set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j] = rho else: print('\\nFlag not defined: use either", "wave2for() # Run the ``plane`` module depending on land or OBS case. if", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "Convert from the full 3x3x3x3 tensor representation to the Voigt notation of the", "= Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def calc_ttime(slow): \"\"\" Calculates total propagation time through", "through the conf module: 'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'\") if", "K and the shear modulus G. Args: C (np.ndarray): Stiffness matrix (shape ``(6,", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "* Kreuss (float): Reuss average bulk modulus (GPa) * Greuss (float): Reuss average", "optional): P-wave velocity used for rotation vs (float, optional): S-wave velocity used for", "Vs (float): S-wave velocity (m/s) Example ------- >>> from telewavesim import utils >>>", "trxyz = Stream(traces=[tux, tuy, tuz]) return trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\" Function to", "cf.isoflg = fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui", "da = (a*1.e3)*ani/100. db = (b*1.e3)*ani/100. # Set up matrix elements AA =", "OBS, then further pass OBS-related paramters to Fortran conf obs2for() # Get the", "angle of symmetry axis (degree) ani (float): Percent anisotropy Returns: (np.ndarray): cc: Elastic", "# Get tensor with horizontal axis # Minerals if typ=='atg': C, rho =", "\"\"\" # Copy traces trP = trZ.copy() trV = trR.copy() trH = trT.copy()", "typ=='gln': C, rho = es.glaucophane() elif typ=='hbl': C, rho = es.hornblende() elif typ=='jade':", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "file line by line and populate lists try: open(modfile) except: raise(Exception('model file cannot", "'LHZ', 'SP_37', 'SP_80'] for j in range(cf.nlay): if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j]", "import conf as cf from telewavesim import elast as es from telewavesim.rmat_f import", "'isoflg', 'dt', 'nt', 'slow', 'baz'\") if obs: lst = [cf.dp, cf.c, cf.rhof] check", "in two ``Stream`` objects. Args: st1 (obspy.stream): Stream 1 st2 (obspy.stream,): Stream 2", "(1, 2), (0, 2), (0, 1)] tol = 1e-3 cc = np.asarray(cc) C", "l in itertools.product(range(3), range(3), range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k,", "cf.nt, cf.dt, cf.slow, cf.baz) tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz) tuz =", "typ=='BS_m': C, rho = es.blueschist_mafic() elif typ=='EC_f': C, rho = es.eclogite_foliated() elif typ=='EC_m':", "(shape ``(3, 3, 3, 3)``) Returns: (np.ndarray): C: Stiffness matrix (shape ``(6, 6)``)", "bulk (K) and shear (G) moduli and density (rho) in kg/m^3 Args: K", "stack_all(st1, st2, pws=False): \"\"\" Stacks all traces in two ``Stream`` objects. Args: st1", "is useful for travel time calculation >>> conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0] =", "etr = trxyz[1] ztr = trxyz[2] baz = cf.baz # Copy to radial", "-np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces tux = Trace(data=ux) tuy = Trace(data=uy) tuz =", "-np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha)", "Variables to pass are ``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp = cf.dp cf_f.c =", "trZ.copy() trV = trR.copy() trH = trT.copy() # Vertical slownesses qp = np.sqrt(1/vp/vp", "uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces tux = Trace(data=ux) tuy = Trace(data=uy)", "np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC + 2.*LL) + (AA + LL)*(CC + LL))", "interact with ``telewavesim`` modules. ''' import sys import itertools import numpy as np", "is done for an OBS stations Returns: (obspy.stream): trxyz: Stream containing 3-component displacement", "to stream trxyz = Stream(traces=[tux, tuy, tuz]) return trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\"", "back into traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def", "= rot_tensor(cc, pl, tr, 0.) # Return tensor return cc, rho def full_3x3_to_Voigt_6_index(i,", "transverse rtr = ntr.copy() ttr = etr.copy() # Rotate to radial and transverse", "layer is irrelevant in this calculation. .. note:: The ``conf`` global variables need", "(float): Voigt-Reuss-Hill average bulk modulus (GPa) * Gvrh (float): Voigt-Reuss-Hill average shear modulus", "Stream(traces=[tux, tuy, tuz]) return trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\" Function to generate transfer", "variables not all set. Set all of the following variables through the conf", "np.zeros((6,6)) for i in range(6): for j in range(6): k, l = Voigt_notation[i]", "telewavesim import conf as cf from telewavesim import elast as es from telewavesim.rmat_f", "minerals. The \\ tensor is rotated using the trend and plunge of the", "axis (degree) pl (float): Plunge angle of symmetry axis (degree) ani (float): Percent", "',modfile)) with open(modfile) as fileobj: for line in fileobj: if not line.rstrip().startswith('#'): model", "= Trace(data=uy) tuz = Trace(data=uz) # Update trace header tux = update_stats(tux, cf.nt,", "tr, pl, ani): \"\"\" Function to generate tensor for transverse isotropy. The tensor", "Parameters are now global variables shared between all other modules \"\"\" h =", "= 0. for i in range(cf.nlay-1): if cf.isoflg[i] == 'iso': a0 = cf.a[2,2,2,2,i]", "Software. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "modulus G. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (tuple): Tuple", "cannot be opened: ',modfile)) with open(modfile) as fileobj: for line in fileobj: if", "for k in range(3): for l in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum =", "(tuple): tuple containing: * trP (obspy.trace): Compressional (P) wave mode * trV (obspy.trace):", "Store in traces tux = Trace(data=ux) tuy = Trace(data=uy) tuz = Trace(data=uz) #", "raise(Exception('model file cannot be opened: ',modfile)) with open(modfile) as fileobj: for line in", "for line in fileobj: if not line.rstrip().startswith('#'): model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2]))", "rotation is important: (AB ~= BA). In this case we rotate about x_2", "global variables shared between all other modules \"\"\" h = []; r =", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "trend and plunge of the symmetry axis. Args: a (float): P-wave velocity (km/s)", "def check_cf(obs=False): \"\"\" Checks whether or not all required global variables are set", "cf.baz) tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz) tuz = update_stats(tuz, cf.nt, cf.dt,", "k, l in itertools.product(range(3), range(3), range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j =", "j): \"\"\" Conversion of tensor to Voigt notation for indices \"\"\" if i", "for j in range(cf.nlay): if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif", "modulus (GPa) * Gvrh (float): Voigt-Reuss-Hill average shear modulus (GPa) Example ------- >>>", "get_trxyz(yx, yy, yz) return trxyz def get_trxyz(yx, yy, yz): \"\"\" Function to store", "6)``) Returns: (tuple): Tuple containing: * Kvoigt (float): Voigt average bulk modulus (GPa)", "OBS stations Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" # Check", "tuy, tuz]) return trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\" Function to generate transfer functions", "checks to make sure the variables are all set before executing the main", "Gvoigt (float): Voigt average shear modulus (GPa) * Kreuss (float): Reuss average bulk", "symmetry \\ axis. Args: tr (float): Trend angle of symmetry axis (degree) pl", "plunge of the symmetry axis. Args: a (float): P-wave velocity (km/s) b (float):", "# Get tensor with horizontal axis cc = es.tri_tensor(AA, CC, FF, LL, NN)", "# SOFTWARE. ''' Utility functions to interact with ``telewavesim`` modules. ''' import sys", "rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336,", "(tuple): Tuple containing: * cc (np.ndarray): Elastic tensor (GPa /density)\\ (shape ``(3, 3,", "* cc (np.ndarray): Elastic tensor (GPa /density)\\ (shape ``(3, 3, 3, 3)``) *", "(slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return", "(bool, optional): Whether the analysis is done for an OBS case or not.", "r cf.isoflg = fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex)", "bulk modulus (GPa) * Gvoigt (float): Voigt average shear modulus (GPa) * Kreuss", "+ \\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2]", "b): \"\"\" Function to generate tensor for isotropic material. Args: a (float): P-wave", "= np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] =", "stations Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" # Check if", "= es.olivine() elif typ=='opx': C, rho = es.orthopyroxene() elif typ=='plag': C, rho =", "= es.glaucophane() elif typ=='hbl': C, rho = es.hornblende() elif typ=='jade': C, rho =", "Percent anisotropy Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape ``(3, 3,", "cc (np.ndarray): Elastic tensor (shape ``(3, 3, 3, 3)``) Returns: (np.ndarray): C: Stiffness", "aa def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5): \"\"\" Rotates traces from `Z-R-T` orientation", "tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz) tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow,", "Args: obs (bool, optional): Whether the analysis is done for an OBS case", "weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else: weight1 =", "This is typically ensured through reading of the model file from the function", "cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def", "time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store", "elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif fl[j] in mins or", "= mod2vel(K,G,rho) a0 = a0**2 b0 = b0**2 if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0", "Convert Voigt to full tensor cc = voigt2cc(C)*1.e9/rho # Rotate tensor using trend", "not all set for OBS case. Set all of the following variables through", "= np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1] =", "\"\"\" h = []; r = []; a = []; b = [];", "used obs (bool, optional): Whether or not the analysis is done for an", "components trP.data = vec[0,:] trV.data = vec[1,:] trH.data = -trT.data/2. return trP, trV,", "to Fortran ``conf`` module. Returns: None Variables to pass are ``dt``, ``slow``, ``baz``", "Rotates traces from `Z-R-T` orientation to `P-SV-SH` wave mode. Args: trZ (obspy.trace): Vertical", "note:: The three angles (``alpha``, ``beta``, ``gam``) correspond to rotation about the x_2,", "ExceptionError: Throws ExceptionError if not all variables are set. \"\"\" lst = [cf.a,", "conf >>> from telewavesim import utils >>> import numpy as np >>> cc,", "-tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Percent anisotropy da = (a*1.e3)*ani/100. db", "from telewavesim import conf >>> from telewavesim import utils >>> import numpy as", "Voigt representation of the stiffness matrix to the full 3x3x3x3 tensor representation. Args:", "The bottom layer is irrelevant in this calculation. .. note:: The ``conf`` global", "to update nt (int): Number of samples dt (float): Sampling rate slow (float):", "C, rho = es.clinopyroxene_92() elif typ=='dol': C, rho = es.dolomite() elif typ=='ep': C,", "= es.antigorite() elif typ=='bt': C, rho = es.biotite() elif typ=='cpx': C, rho =", ">>> import numpy as np >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>>", "typ=='LHZ': C, rho = es.lherzolite() else: print('type of mineral/rock not implemented') return #", "pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer function tfr.data", "is important: (AB ~= BA). In this case we rotate about x_2 first,", "cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int') for i in range(cf.nlay): cf_f.a[:,:,:,:,i] =", "optional): Whether the analysis is done for an OBS case or not. :raises", "Calculates total propagation time through model. The bottom layer is irrelevant in this", "np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) # Put back into traces stack1 = Trace(data=weight1*tmp1,header=str_stats) stack2", "C, rho = es.garnet() elif typ=='gln': C, rho = es.glaucophane() elif typ=='hbl': C,", "asum = asum + rr*a[i,j,k,l] aa[m,n,r,s] = asum return aa def rotate_zrt_pvh(trZ, trR,", "and throws an Exception if not. Args: obs (bool, optional): Whether the analysis", "(float): Angle in radians beta (float): Angle in radians gam (float): Angle in", "tensor to Voigt notation for indices \"\"\" if i == j: return i", "typ=='BS_f': C, rho = es.blueschist_felsic() elif typ=='BS_m': C, rho = es.blueschist_mafic() elif typ=='EC_f':", "asum + rr*a[i,j,k,l] aa[m,n,r,s] = asum return aa def rotate_zrt_pvh(trZ, trR, trT, vp=6.,", "# print(rtr.data, ttr.data) if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV,", "Kvrh = (Kvoigt + Kreuss)/2. Gvrh = (Gvoigt + Greuss)/2. return Kvoigt, Gvoigt,", "= np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements of rotation matrix m11 = cf.slow*vs*vs/vp m12", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "mod2vel(K,G,rho) a0 = a0**2 b0 = b0**2 if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 -", "model2for(): \"\"\" Passes global model variables to Fortran ``conf`` module. Returns: None Variables", "run the ``plane`` module and return 3-component seismograms as an ``obspy`` ``Stream`` object.", "(``alpha``, ``beta``, ``gam``) correspond to rotation about the x_2, x_3, x_1 axes. Note", "C, rho = es.zoisite() # Rocks elif typ=='BS_f': C, rho = es.blueschist_felsic() elif", "file that are passed through the configuration module ``conf``. Returns: None: Parameters are", "is rotated using the trend and plunge of the symmetry axis. Args: a", "- (slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2)", "baz): \"\"\" Updates the ``stats`` doctionary from an obspy ``Trace`` object. Args: tr", "rot_tensor(cc, pl, tr, 0.) # Return tensor return cc def set_aniso_tensor(tr, pl, typ='atg'):", "return C def VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill average of the anisotropic stifness", "ExceptionError if not all variables are set. \"\"\" lst = [cf.a, cf.rho, cf.thickn,", "cf.rhof def run_plane(obs=False): \"\"\" Function to run the ``plane`` module and return 3-component", "``Trace`` object. Args: tr (obspy.trace): Trace object to update nt (int): Number of", "Args: cc (np.ndarray): Elastic tensor (shape ``(3, 3, 3, 3)``) Returns: (np.ndarray): C:", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) # Put", "b = []; fl = []; ani = []; tr = []; pl", "obs2for(): \"\"\" Passes global OBS-related variables to Fortran ``conf`` module. Returns: None Variables", "phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2) # Normalize tmp1 = tmp1/np.float(len(st1)) tmp2", "SV components trP.data = vec[0,:] trV.data = vec[1,:] trH.data = -trT.data/2. return trP,", "(float): Density (kg/m^3) \"\"\" # Trend and plunge of symmetry axis tr =", "return trxyz def tf_from_xyz(trxyz, pvh=False): \"\"\" Function to generate transfer functions from displacement", "East, North and Vertical ntr = trxyz[0] etr = trxyz[1] ztr = trxyz[2]", "``Trace`` obsjects and then an ``obspy`` ``Stream`` object. Args: ux (np.ndarray): x-component displacement", "b0**2 if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV' or", "is irrelevant in this calculation. .. note:: The ``conf`` global variables need to", "ALL traces in streams') # Copy stats from stream str_stats = st1[0].stats #", "all variables are set. If not, throw an Exception and stop check_cf(obs) #", "C = np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float) for i, j, k, l in", "(np.ndarray): cc: Elastic tensor (shape ``(3, 3, 3, 3)``) \"\"\" C = np.asarray(C)", "following conditions: # The above copyright notice and this permission notice shall be", "(tuple): tuple containing: * stack1 (obspy.trace): Stacked trace for Stream 1 * stack2", "to the bulk modulus K and the shear modulus G. Args: C (np.ndarray):", "averaging Kvoigt = (C[0,0] + C[1,1] + C[2,2] + 2.*C[0,1] + 2.*C[0,2] +", "that are passed through the configuration module ``conf``. Returns: None: Parameters are now", "tux = Trace(data=ux) tuy = Trace(data=uy) tuz = Trace(data=uz) # Update trace header", "np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': #", "(float): Slowness value (s/km) Returns: (float): t1: Time in seconds Example ------- >>>", "and then an ``obspy`` ``Stream`` object. Args: ux (np.ndarray): x-component displacement seismogram uy", "C, rho = es.lherzolite() else: print('type of mineral/rock not implemented') return # Convert", "4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] - \\ 4.*S[1,2] + 3.*S[3,3] +", "s in range(3): asum=0.0 for i in range(3): for j in range(3): for", "the configuration module ``conf``. Returns: None: Parameters are now global variables shared between", "range(cf.nlay): if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif fl[j]=='tri': cc =", "CC, FF, LL, NN) # Rotate tensor using trend and plunge cc =", "(obspy.trace): Compressional (P) wave mode * trV (obspy.trace): Vertically polarized shear (SV) wave", "ntr = trxyz[0] etr = trxyz[1] ztr = trxyz[2] baz = cf.baz #", "representation. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (np.ndarray): cc: Elastic", "pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif", "# Return tensor return cc def set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function to generate", "typ='atg') >>> # Define two-layer model model with identical material >>> conf.nlay =", "cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] =", "Voigt_j] return cc def cc2voigt(cc): \"\"\" Convert from the full 3x3x3x3 tensor representation", "elements AA = (a*1.e3 - da/2.)**2 CC = (a*1.e3 + da/2.)**2 LL =", "from an obspy ``Trace`` object. Args: tr (obspy.trace): Trace object to update nt", "\"\"\" lst = [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz] check =", "t1 def read_model(modfile): \"\"\" Reads model parameters from file that are passed through", "if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j]) cf.a[:,:,:,:,j] = cc elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j])", "cf.slow, cf.baz) tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz) # Append to stream", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", ">>> utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp = np.sqrt((K + 4.*G/3.)/rho) Vs", "axis tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Percent anisotropy da", "rotate from Z-R-T coordinate system to P-SV-SH wave mode Returns: (obspy.stream): tfs: Stream", "def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation of the tensor cc (c_ijkl) about three", "VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill average of the anisotropic stifness matrix to the", "- 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs # Rotation matrix rot = np.array([[-m11, m12], [-m21,", "tr.data hilb2 = hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2) # Normalize", "cf.baz def obs2for(): \"\"\" Passes global OBS-related variables to Fortran ``conf`` module. Returns:", "containing 3-component displacement seismograms \"\"\" # Get displacements in time domain ux =", "= pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer function", "np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta)", "used for rotation vs (float, optional): S-wave velocity used for rotation Returns: (tuple):", "= voigt2cc(C)*1.e9/rho # Rotate tensor using trend and plunge cc = rot_tensor(cc, pl,", "= np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P':", "first, x_3 second and x_1 third. For trend and plunge of symmetry axis", "not. Args: obs (bool, optional): Whether the analysis is done for an OBS", "and plunge of the symmetry \\ axis. Args: tr (float): Trend angle of", "object. Args: tr (obspy.trace): Trace object to update nt (int): Number of samples", "= [cf.dp, cf.c, cf.rhof] check = [f is None for f in lst]", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream tfs =", "# Define two-layer model model with identical material >>> conf.nlay = 2 >>>", "(obspy.trace): Radial component trT (obspy.trace): Transverse component vp (float, optional): P-wave velocity used", "voigt2cc(C): \"\"\" Convert the Voigt representation of the stiffness matrix to the full", "a0**2 b0 = b0**2 if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si'", "(obspy.stream): Stream 1 st2 (obspy.stream,): Stream 2 pws (bool, optional): Enables Phase-Weighted Stacking", "cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi", "cf_f.c = cf.c cf_f.rhof = cf.rhof def run_plane(obs=False): \"\"\" Function to run the", "the isotropic P and S wave velocities from given bulk (K) and shear", "tensor (shape ``(3, 3, 3, 3)``) Returns: (np.ndarray): C: Stiffness matrix (shape ``(6,", "sum(check)/len(check)>0.: raise Exception(\"global variables not all set. Set all of the following variables", "Elastic tensor (GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" # Trend", "all set for OBS case. Set all of the following variables through the", "or OBS case. if obs: # If OBS, then further pass OBS-related paramters", "matrix. Args: cc (np.ndarray): Elastic tensor (shape ``(3, 3, 3, 3)``) Returns: (np.ndarray):", "FF = -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC + 2.*LL) + (AA", "'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80'] for j in range(cf.nlay): if fl[j]=='iso': cc =", "``slow``, ``baz`` \"\"\" cf_f.dt = cf.dt cf_f.slow = cf.slow cf_f.baz = cf.baz def", "1 def wave2for(): \"\"\" Passes global wavefield variables to Fortran ``conf`` module. Returns:", "and plunge cc = rot_tensor(cc, pl, tr, 0.) # Return tensor return cc", "cc: Elastic tensor (GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" a", "G. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (tuple): Tuple containing:", "# in the Software without restriction, including without limitation the rights # to", "cf.nt, cf.slow, cf.baz] check = [f is None for f in lst] if", "of symmetry axis (degree) pl (float): Plunge angle of symmetry axis (degree) type", "in range(3): for k in range(3): for l in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l]", "cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt', 'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl',", "full 3x3x3x3 tensor representation. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns:", "total propagation time through model. The bottom layer is irrelevant in this calculation.", "two-layer model model with identical material >>> conf.nlay = 2 >>> conf.a =", "\"\"\" Function to generate tensor for anisotropic minerals. The \\ tensor is rotated", "\"\"\" print() print('Stacking ALL traces in streams') # Copy stats from stream str_stats", "+ 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh = (Kvoigt +", "rho = es.muscovite() elif typ=='ol': C, rho = es.olivine() elif typ=='opx': C, rho", "stack1, stack2 def calc_ttime(slow): \"\"\" Calculates total propagation time through model. The bottom", "elif typ=='HB': C, rho = es.harzburgite() elif typ=='SP_37': C, rho = es.serpentinite_37() elif", "3)``) \"\"\" C = np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float) for i, j, k,", "return 6-i-j def voigt2cc(C): \"\"\" Convert the Voigt representation of the stiffness matrix", "Passes global wavefield variables to Fortran ``conf`` module. Returns: None Variables to pass", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "np.asarray(cc) C = np.zeros((6,6)) for i in range(6): for j in range(6): k,", "or not the analysis is done for an OBS stations Returns: (obspy.stream): trxyz:", "radians Returns: (np.ndarray): aa: Rotated tensor with shape ``(3, 3, 3, 3)`` ..", "variables need to be set for this calculation to succeed. This is typically", "variables need to be set for this calculation to succeed. This function first", "trend \"\"\" rot = np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] =", "a0 = cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i]", "= Voigt_notation[i] m, n = Voigt_notation[j] C[i,j] = cc[k,l,m,n] return C def VRH_average(C):", "pl (float): Plunge angle of symmetry axis (degree) ani (float): Percent anisotropy Returns:", "+ C[2,2] - C[0,1] - C[0,2] - C[1,2] + 3.*C[3,3] + \\ 3.*C[4,4]", "es.jadeite() elif typ=='lws': C, rho = es.lawsonite() elif typ=='lz': C, rho = es.lizardite()", "configuration parameters cf.nlay = len(h) cf.thickn = h cf.rho = r cf.isoflg =", "following variables through the conf module: 'dp', 'c', 'rhof'\") def model2for(): \"\"\" Passes", "= es.jadeite() elif typ=='lws': C, rho = es.lawsonite() elif typ=='lz': C, rho =", "numpy as np >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> # Define", "Define two-layer model model with identical material >>> conf.nlay = 2 >>> conf.a", "Slowness value (s/km) baz (float): Back-azimuth value (degree) Returns: (obspy.trace): tr: Trace with", "Elastic tensor with shape ``(3, 3, 3, 3)`` alpha (float): Angle in radians", "for i in range(3): for j in range(3): for k in range(3): for", "# Store in traces tux = Trace(data=ux) tuy = Trace(data=uy) tuz = Trace(data=uz)", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "displacement seismograms \"\"\" # Get displacements in time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy", "for this calculation to succeed. This is typically ensured through reading of the", "typ=='zo': C, rho = es.zoisite() # Rocks elif typ=='BS_f': C, rho = es.blueschist_felsic()", "n in range(3): for r in range(3): for s in range(3): asum=0.0 for", "uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces tux = Trace(data=ux)", "anisotropy Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape ``(3, 3, 3,", "(kg/m^3) \"\"\" # Trend and plunge of symmetry axis tr = -tr*np.pi/180. pl", "tux = update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz) tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow,", "and transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz) a = pyfftw.empty_aligned(len(rtr.data), dtype='float') #", "following variables through the conf module: 'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow',", "# Read file line by line and populate lists try: open(modfile) except: raise(Exception('model", "stifness matrix to the bulk modulus K and the shear modulus G. Args:", "es.antigorite() elif typ=='bt': C, rho = es.biotite() elif typ=='cpx': C, rho = es.clinopyroxene_92()", "= [cf.a, cf.rho, cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz] check = [f is", "cf.slow, cf.baz) tuy = update_stats(tuy, cf.nt, cf.dt, cf.slow, cf.baz) tuz = update_stats(tuz, cf.nt,", "(km/s) b (float): S-wave velocity (km/s) tr (float): Trend angle of symmetry axis", "np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) # # c_ijkl", "propagation time through model. The bottom layer is irrelevant in this calculation. ..", "cc def set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function to generate tensor for anisotropic minerals.", ":raises ExceptionError: Throws ExceptionError if not all variables are set. \"\"\" lst =", "np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta)", "# Reuss averaging Kreuss = 1./(S[0,0] + S[1,1] + S[2,2] + 2.*S[0,1] +", "in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set for OBS case.", "# Pass configuration parameters cf.nlay = len(h) cf.thickn = h cf.rho = r", "cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def wave2for(): \"\"\" Passes global", ">>> K, G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp", "radial and transverse rtr = ntr.copy() ttr = etr.copy() # Rotate to radial", "(GPa) * Kreuss (float): Reuss average bulk modulus (GPa) * Greuss (float): Reuss", "tol = 1e-3 cc = np.asarray(cc) C = np.zeros((6,6)) for i in range(6):", "containing: * cc (np.ndarray): Elastic tensor (GPa /density)\\ (shape ``(3, 3, 3, 3)``)", "for transverse isotropy. The tensor is rotated using the trend and plunge of", "6)``) \"\"\" Voigt_notation = [(0, 0), (1, 1), (2, 2), (1, 2), (0,", "the model file from the function ``utils.read_model(modfile)``, and setting the variable ``conf.wvtype`` Args:", "VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho) a0 = a0**2 b0 = b0**2 if cf.wvtype=='P':", "import pyfftw from scipy.signal import hilbert from obspy.core import Trace, Stream from obspy.signal.rotate", "typ=='EC_m': C, rho = es.eclogite_massive() elif typ=='HB': C, rho = es.harzburgite() elif typ=='SP_37':", "# Transfer displacement seismograms to an ``obspy`` ``Stream`` object. trxyz = get_trxyz(yx, yy,", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "Whether or not the Fortran modules are used obs (bool, optional): Whether or", "'c', 'rhof'\") def model2for(): \"\"\" Passes global model variables to Fortran ``conf`` module.", "module >>> # Only topmost layer is useful for travel time calculation >>>", "# Initialize arrays tmp1 = np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex)", "axis (degree) pl (float): Plunge angle of symmetry axis (degree) type (str, optional):", "Density (kg/m^3) Returns: (tuple): tuple containing: * Vp (float): P-wave velocity (m/s) *", "tensor with horizontal axis cc = es.tri_tensor(AA, CC, FF, LL, NN) # Rotate", "'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80'] for j in range(cf.nlay): if fl[j]=='iso':", "the ``plane`` module depending on land or OBS case. if obs: # If", "cc = cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho)", "implemented') return # Convert Voigt to full tensor cc = voigt2cc(C)*1.e9/rho # Rotate", "= np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt', 'cpx', 'dol', 'ep', 'grt',", "= Trace(data=uz) # Update trace header tux = update_stats(tux, cf.nt, cf.dt, cf.slow, cf.baz)", "from obspy.core import Trace, Stream from obspy.signal.rotate import rotate_ne_rt from telewavesim import conf", "(book, option): Whether or not the Fortran modules are used obs (bool, optional):", "rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) # # c_ijkl ---> c_mnrs # for", "``c``, ``rhof`` \"\"\" cf_f.dp = cf.dp cf_f.c = cf.c cf_f.rhof = cf.rhof def", "- \\ 4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "then an ``obspy`` ``Stream`` object. Args: ux (np.ndarray): x-component displacement seismogram uy (np.ndarray):", "need to be set for this calculation to succeed. This is typically ensured", "from the full 3x3x3x3 tensor representation to the Voigt notation of the stiffness", "Angle in radians beta (float): Angle in radians gam (float): Angle in radians", "return i return 6-i-j def voigt2cc(C): \"\"\" Convert the Voigt representation of the", "da/2.)**2 CC = (a*1.e3 + da/2.)**2 LL = (b*1.e3 + db/2.)**2 NN =", "trxyz[1] ztr = trxyz[2] baz = cf.baz # Copy to radial and transverse", "cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif", "as cf from telewavesim import elast as es from telewavesim.rmat_f import conf as", "tensor with shape ``(3, 3, 3, 3)`` alpha (float): Angle in radians beta", "for rotation vs (float, optional): S-wave velocity used for rotation Returns: (tuple): tuple", "'ep', 'grt', 'gln', 'hbl', 'jade',\\ 'lws', 'lz', 'ms', 'ol', 'opx', 'plag', 'qtz', 'zo']", "2 pws (bool, optional): Enables Phase-Weighted Stacking Returns: (tuple): tuple containing: * stack1", "set for OBS case. Set all of the following variables through the conf", "file is part of Telewavesim. # Permission is hereby granted, free of charge,", "= np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) # # c_ijkl ---> c_mnrs # for m", "symmetry axis tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Get tensor", "(0, 1)] tol = 1e-3 cc = np.asarray(cc) C = np.zeros((6,6)) for i", "typ=='atg': C, rho = es.antigorite() elif typ=='bt': C, rho = es.biotite() elif typ=='cpx':", "Exception(\"global variables not all set. Set all of the following variables through the", "paramters to Fortran conf obs2for() # Get the Fourier transform of seismograms for", "Stream(traces=[tfr, tft]) # Return stream return tfs def update_stats(tr, nt, dt, slow, baz):", "the symmetry \\ axis. Args: tr (float): Trend angle of symmetry axis (degree)", "= weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else: weight1", "C, rho = es.muscovite() elif typ=='ol': C, rho = es.olivine() elif typ=='opx': C,", "Radial component trT (obspy.trace): Transverse component vp (float, optional): P-wave velocity used for", "C = es.iso_tensor(a, b) # Convert Voigt to full tensor cc = voigt2cc(C)", "cc (np.ndarray): Elastic tensor (GPa /density)\\ (shape ``(3, 3, 3, 3)``) * rho", "= cc cf.rho[j] = rho else: print('\\nFlag not defined: use either \"iso\", \"tri\"", "conf module: 'dp', 'c', 'rhof'\") def model2for(): \"\"\" Passes global model variables to", "dtype='int') for i in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] =", "range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i, j, k, l]", "(np.ndarray): Elastic tensor (shape ``(3, 3, 3, 3)``) Returns: (np.ndarray): C: Stiffness matrix", "cf.c cf_f.rhof = cf.rhof def run_plane(obs=False): \"\"\" Function to run the ``plane`` module", "Permission is hereby granted, free of charge, to any person obtaining a copy", "in this calculation. .. note:: The ``conf`` global variables need to be set", "Get the Fourier transform of seismograms for ``land`` case yx, yy, yz =", "= np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex)", "conf.rho[0] = rho >>> conf.thickn[0] = 10. >>> conf.wvtype = 'P' >>> slow", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "shear modulus (GPa) * Kreuss (float): Reuss average bulk modulus (GPa) * Greuss", "modulus (GPa) * Greuss (float): Reuss average shear modulus (GPa) * Kvrh (float):", "Software without restriction, including without limitation the rights # to use, copy, modify,", "furnished to do so, subject to the following conditions: # The above copyright", "- 4.*S[0,2] - \\ 4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill", "the ``plane`` module and return 3-component seismograms as an ``obspy`` ``Stream`` object. ..", "fl = []; ani = []; tr = []; pl = [] #", "``conf``. Returns: None: Parameters are now global variables shared between all other modules", "substantial portions of the Software. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set for OBS", "cf.slow*cf.slow) qs = np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements of rotation matrix m11 =", "``alpha`` = plunge ``beta`` = trend \"\"\" rot = np.zeros((3,3)) aa = np.zeros((3,3,3,3))", "# The above copyright notice and this permission notice shall be included in", "2.*AC*(AA + CC + 2.*LL) + (AA + LL)*(CC + LL)) # eta", "C def VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill average of the anisotropic stifness matrix", "# of this software and associated documentation files (the \"Software\"), to deal #", "(float): Density (kg/m^3) Returns: (tuple): tuple containing: * Vp (float): P-wave velocity (m/s)", "get_trxyz(yx, yy, yz): \"\"\" Function to store displacement seismograms into ``obspy`` ``Trace`` obsjects", "object to update nt (int): Number of samples dt (float): Sampling rate slow", "rate slow (float): Slowness value (s/km) baz (float): Back-azimuth value (degree) Returns: (obspy.trace):", "range(6): for j in range(6): k, l = Voigt_notation[i] m, n = Voigt_notation[j]", "Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def calc_ttime(slow): \"\"\" Calculates total propagation", "(m/s) * Vs (float): S-wave velocity (m/s) Example ------- >>> from telewavesim import", "displacement seismogram uz (np.ndarray): z-component displacement seismogram Returns: (obspy.stream): trxyz: Stream containing 3-component", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "(float, optional): S-wave velocity used for rotation Returns: (tuple): tuple containing: * trP", "lists try: open(modfile) except: raise(Exception('model file cannot be opened: ',modfile)) with open(modfile) as", "OBS case or not. :raises ExceptionError: Throws ExceptionError if not all variables are", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif", "to radial and transverse rtr = ntr.copy() ttr = etr.copy() # Rotate to", "- 4.*S[0,1] - 4.*S[0,2] - \\ 4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5])", "``thickn``, ``isoflg`` \"\"\" nlaymx = cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn", "one among\\n') print(mins,rocks) print() raise(Exception()) return def check_cf(obs=False): \"\"\" Checks whether or not", "= -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) + \\ np.cos(gam)*np.cos(alpha) # # c_ijkl ---> c_mnrs", "2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs # Rotation matrix rot", "= np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt', 'cpx',", "for anisotropic minerals. The \\ tensor is rotated using the trend and plunge", "the function ``utils.read_model(modfile)``, and setting the variable ``conf.wvtype`` Args: slow (float): Slowness value", "sum(check)/len(check)>0.: raise Exception(\"global variables not all set for OBS case. Set all of", "symmetry axis tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Percent anisotropy", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "= asum + rr*a[i,j,k,l] aa[m,n,r,s] = asum return aa def rotate_zrt_pvh(trZ, trR, trT,", "= rot_tensor(cc, pl, tr, 0.) # Return tensor return cc def set_aniso_tensor(tr, pl,", "transform of seismograms for ``land`` case yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) #", "\"tri\" or one among\\n') print(mins,rocks) print() raise(Exception()) return def check_cf(obs=False): \"\"\" Checks whether", "``Stream`` object. trxyz = get_trxyz(yx, yy, yz) return trxyz def get_trxyz(yx, yy, yz):", "# Convert Voigt to full tensor cc = voigt2cc(C) return cc def set_tri_tensor(a,", "rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2]", "``stats`` doctionary from an obspy ``Trace`` object. Args: tr (obspy.trace): Trace object to", "be set for this calculation to succeed. This is typically ensured through reading", "= ['atg', 'bt', 'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\\ 'lws', 'lz', 'ms',", "make sure the variables are all set before executing the main ``telewavesim.rmat_f.plane_****`` function.", "utils >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>>", "(obspy.trace): Stacked trace for Stream 1 * stack2 (obspy.trace): Stacked trace for Stream", "for Stream 1 * stack2 (obspy.trace): Stacked trace for Stream 2 \"\"\" print()", "using the trend and plunge of the symmetry \\ axis. Args: tr (float):", "= [] # Read file line by line and populate lists try: open(modfile)", "np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex)", "= full_3x3_to_Voigt_6_index(k, l) cc[i, j, k, l] = C[Voigt_i, Voigt_j] return cc def", "[]; ani = []; tr = []; pl = [] # Read file", "C[0,1] - C[0,2] - C[1,2] + 3.*C[3,3] + \\ 3.*C[4,4] + 3.*C[5,5])/15. #", "to pass are ``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt = cf.dt cf_f.slow = cf.slow", "Args: ux (np.ndarray): x-component displacement seismogram uy (np.ndarray): y-component displacement seismogram uz (np.ndarray):", "utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" # Compliance matrix", "traces trP = trZ.copy() trV = trR.copy() trH = trT.copy() # Vertical slownesses", "else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0 =", "0. for i in range(cf.nlay-1): if cf.isoflg[i] == 'iso': a0 = cf.a[2,2,2,2,i] b0", "= -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC + 2.*LL) + (AA +", "a = []; b = []; fl = []; ani = []; tr", "of seismograms for ``obs``case yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get", "``(3, 3, 3, 3)``) \"\"\" a = a*1.e3 b = b*1.e3 C =", "for rotation Returns: (tuple): tuple containing: * trP (obspy.trace): Compressional (P) wave mode", "= np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins =", "and plunge of symmetry axis (e.g., tri_tensor): ``alpha`` = plunge ``beta`` = trend", "tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr", "tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream tfs", "3, 3)``) \"\"\" a = a*1.e3 b = b*1.e3 C = es.iso_tensor(a, b)", "velocities from given bulk (K) and shear (G) moduli and density (rho) in", "whether or not all required global variables are set and throws an Exception", "return trxyz def get_trxyz(yx, yy, yz): \"\"\" Function to store displacement seismograms into", "def set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function to generate tensor for anisotropic minerals. The", "= np.zeros((6,6)) for i in range(6): for j in range(6): k, l =", "= es.lherzolite() else: print('type of mineral/rock not implemented') return # Convert Voigt to", "= es.orthopyroxene() elif typ=='plag': C, rho = es.plagioclase_06() elif typ=='qtz': C, rho =", "cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G =", "line by line and populate lists try: open(modfile) except: raise(Exception('model file cannot be", "Obspy ``Stream`` object in cartesian coordinate system pvh (bool, optional): Whether to rotate", "= rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum + rr*a[i,j,k,l] aa[m,n,r,s] = asum return aa def", "'gln', 'hbl', 'jade',\\ 'lws', 'lz', 'ms', 'ol', 'opx', 'plag', 'qtz', 'zo'] rocks =", "shear modulus (GPa) Example ------- >>> from telewavesim import utils >>> cc, rho", "j) Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i, j, k, l] = C[Voigt_i, Voigt_j] return", "to rotation about the x_2, x_3, x_1 axes. Note that the sequence of", "es.hornblende() elif typ=='jade': C, rho = es.jadeite() elif typ=='lws': C, rho = es.lawsonite()", "among\\n') print(mins,rocks) print() raise(Exception()) return def check_cf(obs=False): \"\"\" Checks whether or not all", "rho = es.orthopyroxene() elif typ=='plag': C, rho = es.plagioclase_06() elif typ=='qtz': C, rho", "rr*a[i,j,k,l] aa[m,n,r,s] = asum return aa def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5): \"\"\"", "if not. Args: obs (bool, optional): Whether the analysis is done for an", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration parameters", "st1[0].stats # Initialize arrays tmp1 = np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data),", "(shape ``(3, 3, 3, 3)``) * rho (float): Density (kg/m^3) \"\"\" # Trend", "= -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\", "tr (float): Trend angle of symmetry axis (degree) pl (float): Plunge angle of", "cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store in stream tfs = Stream(traces=[tfr, tft]) #", "x_2 first, x_3 second and x_1 third. For trend and plunge of symmetry", "(Gvoigt + Greuss)/2. return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\"", "Stream containing 3-component displacement seismograms \"\"\" # Get displacements in time domain ux", "``plane`` module and return 3-component seismograms as an ``obspy`` ``Stream`` object. .. note::", "the Fortran modules are used obs (bool, optional): Whether or not the analysis", "axis # Minerals if typ=='atg': C, rho = es.antigorite() elif typ=='bt': C, rho", "for l in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum + rr*a[i,j,k,l] aa[m,n,r,s]", "raise(Exception()) return def check_cf(obs=False): \"\"\" Checks whether or not all required global variables", "+= np.exp(1j*phase2) # Normalize tmp1 = tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) # Phase-weighting if", "# Vector of Radial and Vertical r_z = np.array([trR.data,trZ.data]) # Rotation vec =", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "try: open(modfile) except: raise(Exception('model file cannot be opened: ',modfile)) with open(modfile) as fileobj:", "polarized shear (SH) wave mode \"\"\" # Copy traces trP = trZ.copy() trV", "pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer displacement seismograms to an ``obspy`` ``Stream`` object. trxyz =", "C[0,2] - C[1,2] + 3.*C[3,3] + \\ 3.*C[4,4] + 3.*C[5,5])/15. # Reuss averaging", "Compliance matrix S = np.linalg.inv(C) # Voigt averaging Kvoigt = (C[0,0] + C[1,1]", "np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha)", "for indices \"\"\" if i == j: return i return 6-i-j def voigt2cc(C):", "C = utils.cc2voigt(cc) >>> K, G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho) (6760.617471753726,", "cf.nt, cf.dt, cf.slow, cf.baz) # Append to stream trxyz = Stream(traces=[tux, tuy, tuz])", "- (slow*1.e-3)**2) return t1 def read_model(modfile): \"\"\" Reads model parameters from file that", "in range(3): asum=0.0 for i in range(3): for j in range(3): for k", "# Stack all traces for tr in st1: tmp1 += tr.data hilb1 =", "= es.harzburgite() elif typ=='SP_37': C, rho = es.serpentinite_37() elif typ=='SP_80': C, rho =", "+ C[2,2] + 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt = (C[0,0] + C[1,1]", "dtype=complex) # Stack all traces for tr in st1: tmp1 += tr.data hilb1", "= es.quartz() elif typ=='zo': C, rho = es.zoisite() # Rocks elif typ=='BS_f': C,", "calculation to succeed. This is typically ensured through reading of the model file", "= es.biotite() elif typ=='cpx': C, rho = es.clinopyroxene_92() elif typ=='dol': C, rho =", "Voigt average bulk modulus (GPa) * Gvoigt (float): Voigt average shear modulus (GPa)", "= np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P':", "15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] - \\ 4.*S[1,2] +", "ttr.data) if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH =", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "import plane as pw_f def set_iso_tensor(a, b): \"\"\" Function to generate tensor for", "CC + 2.*LL) + (AA + LL)*(CC + LL)) # eta = FF/(AA", "cf.a[:,:,:,:,j] = cc elif fl[j]=='tri': cc = set_tri_tensor(a[j],b[j],tr[j],pl[j],ani[j]) cf.a[:,:,:,:,j] = cc elif fl[j]", "of this software and associated documentation files (the \"Software\"), to deal # in", "es.tri_tensor(AA, CC, FF, LL, NN) # Rotate tensor using trend and plunge cc", "np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha)", "= cc >>> conf.rho[0] = rho >>> conf.thickn[0] = 10. >>> conf.wvtype =", "``telewavesim.rmat_f.plane_****`` function. Args: fortran (book, option): Whether or not the Fortran modules are", "and plunge of symmetry axis tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180.", "the analysis is done for an OBS stations Returns: (obspy.stream): trxyz: Stream containing", "and stop check_cf(obs) # Pass variables to Fortran conf model2for() wave2for() # Run", "Elastic tensor (GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" a =", ">>> C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\"", "K, G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp =", "np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2) # Normalize tmp1 = tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2))", "in rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j] = rho else:", "C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" #", "with shape ``(3, 3, 3, 3)`` alpha (float): Angle in radians beta (float):", "traces in streams') # Copy stats from stream str_stats = st1[0].stats # Initialize", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data))", "+ \\ 3.*C[4,4] + 3.*C[5,5])/15. # Reuss averaging Kreuss = 1./(S[0,0] + S[1,1]", "if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH':", "+ \\ np.cos(gam)*np.cos(alpha) # # c_ijkl ---> c_mnrs # for m in range(3):", "es.serpentinite_37() elif typ=='SP_80': C, rho = es.serpentinite_80() elif typ=='LHZ': C, rho = es.lherzolite()", "set_tri_tensor(a, b, tr, pl, ani): \"\"\" Function to generate tensor for transverse isotropy.", "functions to interact with ``telewavesim`` modules. ''' import sys import itertools import numpy", "if not all variables are set. \"\"\" lst = [cf.a, cf.rho, cf.thickn, cf.isoflg,", "then further pass OBS-related paramters to Fortran conf obs2for() # Get the Fourier", "from Z-R-T coordinate system to P-SV-SH wave mode Returns: (obspy.stream): tfs: Stream containing", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "# c_ijkl ---> c_mnrs # for m in range(3): for n in range(3):", "typ=='hbl': C, rho = es.hornblende() elif typ=='jade': C, rho = es.jadeite() elif typ=='lws':", "objects. Args: st1 (obspy.stream): Stream 1 st2 (obspy.stream,): Stream 2 pws (bool, optional):", "np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay)) >>> # Pass variables to the `conf` module", "rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j] = rho else: print('\\nFlag not defined:", "C[1,1] + C[2,2] + 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt = (C[0,0] +", "module. Returns: None Variables to pass are ``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt =", "time calculation >>> conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0] = cc >>> conf.rho[0] =", "r in range(3): for s in range(3): asum=0.0 for i in range(3): for", "tuz = Trace(data=uz) # Update trace header tux = update_stats(tux, cf.nt, cf.dt, cf.slow,", "transfer functions from displacement traces. Args: trxyz (obspy.stream): Obspy ``Stream`` object in cartesian", "cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> # Define two-layer model model with", "tr: Trace with updated stats \"\"\" tr.stats.delta = dt tr.stats.slow = slow tr.stats.baz", "np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt', 'cpx', 'dol',", "conf.wvtype = 'P' >>> slow = 0.06 # s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\"", "pass are ``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp = cf.dp cf_f.c = cf.c cf_f.rhof", "cf.baz) # Append to stream trxyz = Stream(traces=[tux, tuy, tuz]) return trxyz def", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "C, rho = es.eclogite_massive() elif typ=='HB': C, rho = es.harzburgite() elif typ=='SP_37': C,", "range(cf.nlay-1): if cf.isoflg[i] == 'iso': a0 = cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else: cc", "Vector of Radial and Vertical r_z = np.array([trR.data,trZ.data]) # Rotation vec = np.dot(rot,", "the Software without restriction, including without limitation the rights # to use, copy,", "Whether the analysis is done for an OBS case or not. :raises ExceptionError:", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "person obtaining a copy # of this software and associated documentation files (the", "shear modulus (GPa) * Kvrh (float): Voigt-Reuss-Hill average bulk modulus (GPa) * Gvrh", "S-wave velocity used for rotation Returns: (tuple): tuple containing: * trP (obspy.trace): Compressional", "(obspy.stream): tfs: Stream containing Radial and Transverse transfer functions \"\"\" # Extract East,", "= es.hornblende() elif typ=='jade': C, rho = es.jadeite() elif typ=='lws': C, rho =", "db/2.)**2 AC = (a*1.e3)**2 FF = -LL + np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC", "slow (float): Slowness value (s/km) Returns: (float): t1: Time in seconds Example -------", "Get the Fourier transform of seismograms for ``obs``case yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype,", "rho = es.epidote() elif typ=='grt': C, rho = es.garnet() elif typ=='gln': C, rho", "hilb1.real) weight1 += np.exp(1j*phase1) for tr in st2: tmp2 += tr.data hilb2 =", "all # copies or substantial portions of the Software. # THE SOFTWARE IS", "yz) return trxyz def get_trxyz(yx, yy, yz): \"\"\" Function to store displacement seismograms", "i return 6-i-j def voigt2cc(C): \"\"\" Convert the Voigt representation of the stiffness", "Fortran modules are used obs (bool, optional): Whether or not the analysis is", "modulus (GPa) * Kreuss (float): Reuss average bulk modulus (GPa) * Greuss (float):", "np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex) # Stack all traces for tr in", "Returns: (tuple): tuple containing: * trP (obspy.trace): Compressional (P) wave mode * trV", "all set. Set all of the following variables through the conf module: 'a',", "# Normalize tmp1 = tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) # Phase-weighting if pws: weight1", "(tuple): Tuple containing: * Kvoigt (float): Voigt average bulk modulus (GPa) * Gvoigt", "axis. Args: a (float): P-wave velocity (km/s) b (float): S-wave velocity (km/s) tr", "persons to whom the Software is # furnished to do so, subject to", "not, throw an Exception and stop check_cf(obs) # Pass variables to Fortran conf", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "``(6, 6)``) \"\"\" Voigt_notation = [(0, 0), (1, 1), (2, 2), (1, 2),", "= get_trxyz(yx, yy, yz) return trxyz def get_trxyz(yx, yy, yz): \"\"\" Function to", "gam (float): Angle in radians Returns: (np.ndarray): aa: Rotated tensor with shape ``(3,", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "elif typ=='lz': C, rho = es.lizardite() elif typ=='ms': C, rho = es.muscovite() elif", "* stack1 (obspy.trace): Stacked trace for Stream 1 * stack2 (obspy.trace): Stacked trace", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh = (Kvoigt + Kreuss)/2. Gvrh = (Gvoigt +", "cf_f.isoflg[i] = 1 def wave2for(): \"\"\" Passes global wavefield variables to Fortran ``conf``", "of rotation matrix m11 = cf.slow*vs*vs/vp m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 =", "# Rotation vec = np.dot(rot, r_z) # Extract P and SV components trP.data", "+ 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] - \\ 4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4]", "of charge, to any person obtaining a copy # of this software and", "phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1) for tr in st2: tmp2 +=", "38474215710.088997) \"\"\" # Compliance matrix S = np.linalg.inv(C) # Voigt averaging Kvoigt =", "= len(h) cf.thickn = h cf.rho = r cf.isoflg = fl cf.a =", "yz): \"\"\" Function to store displacement seismograms into ``obspy`` ``Trace`` obsjects and then", "from `Z-R-T` orientation to `P-SV-SH` wave mode. Args: trZ (obspy.trace): Vertical component trR", "\"\"\" Function to generate transfer functions from displacement traces. Args: trxyz (obspy.stream): Obspy", "elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv))))", "model variables to Fortran ``conf`` module. Returns: None Variables to pass are ``a``,", "es.quartz() elif typ=='zo': C, rho = es.zoisite() # Rocks elif typ=='BS_f': C, rho", "np.zeros((conf.nlay)) >>> # Pass variables to the `conf` module >>> # Only topmost", "update nt (int): Number of samples dt (float): Sampling rate slow (float): Slowness", "``rhof`` \"\"\" cf_f.dp = cf.dp cf_f.c = cf.c cf_f.rhof = cf.rhof def run_plane(obs=False):", "telewavesim import utils >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> C =", "= weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data)) weight2", "else: weight1 = np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) # Put back into traces stack1", "* Kvrh (float): Voigt-Reuss-Hill average bulk modulus (GPa) * Gvrh (float): Voigt-Reuss-Hill average", "the Voigt notation of the stiffness matrix. Args: cc (np.ndarray): Elastic tensor (shape", "P and S wave velocities from given bulk (K) and shear (G) moduli", "vp (float, optional): P-wave velocity used for rotation vs (float, optional): S-wave velocity", "3.*C[4,4] + 3.*C[5,5])/15. # Reuss averaging Kreuss = 1./(S[0,0] + S[1,1] + S[2,2]", "cf from telewavesim import elast as es from telewavesim.rmat_f import conf as cf_f", "= utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp = np.sqrt((K +", "the x_2, x_3, x_1 axes. Note that the sequence of the rotation is", "= np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft =", "np.zeros((3,3,cf.nlay),dtype=complex) cf.Rdi = np.zeros((3,3,cf.nlay),dtype=complex) mins = ['atg', 'bt', 'cpx', 'dol', 'ep', 'grt', 'gln',", "ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer", "\"\"\" C = np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float) for i, j, k, l", "Stiffness matrix (shape ``(6, 6)``) Returns: (tuple): Tuple containing: * Kvoigt (float): Voigt", "+ 3.*C[3,3] + \\ 3.*C[4,4] + 3.*C[5,5])/15. # Reuss averaging Kreuss = 1./(S[0,0]", "stats from stream str_stats = st1[0].stats # Initialize arrays tmp1 = np.zeros(len(st1[0].data)) tmp2", "vec[1,:] trH.data = -trT.data/2. return trP, trV, trH def stack_all(st1, st2, pws=False): \"\"\"", "rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80'] for j in", "Transfer displacement seismograms to an ``obspy`` ``Stream`` object. trxyz = get_trxyz(yx, yy, yz)", "from obspy.signal.rotate import rotate_ne_rt from telewavesim import conf as cf from telewavesim import", "the Voigt representation of the stiffness matrix to the full 3x3x3x3 tensor representation.", "= [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)]", "Gvoigt, Kreuss, Greuss, Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\" Calculates the isotropic P and", "hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2) # Normalize tmp1 = tmp1/np.float(len(st1))", "= fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs = np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui =", "``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt = cf.dt cf_f.slow = cf.slow cf_f.baz = cf.baz", "and associated documentation files (the \"Software\"), to deal # in the Software without", "and populate lists try: open(modfile) except: raise(Exception('model file cannot be opened: ',modfile)) with", "= np.zeros(len(st2[0].data), dtype=complex) # Stack all traces for tr in st1: tmp1 +=", "function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data", "typ='atg') >>> C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997)", "Greuss)/2. return Kvoigt, Gvoigt, Kreuss, Greuss, Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\" Calculates the", "print() print('Stacking ALL traces in streams') # Copy stats from stream str_stats =", "travel time calculation >>> conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0] = cc >>> conf.rho[0]", "= np.zeros(len(tfr.data)) tft = trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh =", "rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha) +", "<NAME> # This file is part of Telewavesim. # Permission is hereby granted,", "displacement seismograms \"\"\" # Check if all variables are set. If not, throw", "= 1e-3 cc = np.asarray(cc) C = np.zeros((6,6)) for i in range(6): for", "sure the variables are all set before executing the main ``telewavesim.rmat_f.plane_****`` function. Args:", "variables are set. If not, throw an Exception and stop check_cf(obs) # Pass", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "Voigt_notation[i] m, n = Voigt_notation[j] C[i,j] = cc[k,l,m,n] return C def VRH_average(C): \"\"\"", "analysis is done for an OBS stations Returns: (obspy.stream): trxyz: Stream containing 3-component", "t1: Time in seconds Example ------- >>> from telewavesim import conf >>> from", "trP, trV, trH def stack_all(st1, st2, pws=False): \"\"\" Stacks all traces in two", "in kg/m^3 Args: K (float): Bulk modulus (GPa) G (float): Shear modulus (GPa)", "baz (float): Back-azimuth value (degree) Returns: (obspy.trace): tr: Trace with updated stats \"\"\"", "in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set. Set all of", "Back-azimuth value (degree) Returns: (obspy.trace): tr: Trace with updated stats \"\"\" tr.stats.delta =", "ani (float): Percent anisotropy Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "C, rho = es.quartz() elif typ=='zo': C, rho = es.zoisite() # Rocks elif", "pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data", "plunge ``beta`` = trend \"\"\" rot = np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0] =", "x_2, x_3, x_1 axes. Note that the sequence of the rotation is important:", "tf_from_xyz(trxyz, pvh=False): \"\"\" Function to generate transfer functions from displacement traces. Args: trxyz", "run_plane(obs=False): \"\"\" Function to run the ``plane`` module and return 3-component seismograms as", "Percent anisotropy da = (a*1.e3)*ani/100. db = (b*1.e3)*ani/100. # Set up matrix elements", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "containing: * trP (obspy.trace): Compressional (P) wave mode * trV (obspy.trace): Vertically polarized", "Stream 1 st2 (obspy.stream,): Stream 2 pws (bool, optional): Enables Phase-Weighted Stacking Returns:", "seismograms \"\"\" # Check if all variables are set. If not, throw an", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy(); tfr.data =", "hilb2 = hilbert(tr.data) phase2 = np.arctan2(hilb2.imag, hilb2.real) weight2 += np.exp(1j*phase2) # Normalize tmp1", "(np.ndarray): x-component displacement seismogram uy (np.ndarray): y-component displacement seismogram uz (np.ndarray): z-component displacement", "material Returns: (tuple): Tuple containing: * cc (np.ndarray): Elastic tensor (GPa /density)\\ (shape", "# Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data =", "- C[0,1] - C[0,2] - C[1,2] + 3.*C[3,3] + \\ 3.*C[4,4] + 3.*C[5,5])/15.", "= pyfftw.empty_aligned(len(rtr.data), dtype='float') # print(rtr.data, ttr.data) if pvh: vp = np.sqrt(cf.a[2,2,2,2,0])/1.e3 vs =", "conf.nlay = 2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay)) >>> conf.thickn", "velocity (km/s) b (float): S-wave velocity (km/s) tr (float): Trend angle of symmetry", "(b*1.e3)*ani/100. # Set up matrix elements AA = (a*1.e3 - da/2.)**2 CC =", "averaging Kreuss = 1./(S[0,0] + S[1,1] + S[2,2] + 2.*S[0,1] + 2.*S[0,2] +", "# Convert Voigt to full tensor cc = voigt2cc(C)*1.e9/rho # Rotate tensor using", "arrays tmp1 = np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2 =", "beta, gamma) Args: a (np.ndarray): Elastic tensor with shape ``(3, 3, 3, 3)``", "for i in range(cf.nlay-1): if cf.isoflg[i] == 'iso': a0 = cf.a[2,2,2,2,i] b0 =", "# Rotate to radial and transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data, baz) a", "velocity (km/s) b (float): S-wave velocity (km/s) Returns: (np.ndarray): cc: Elastic tensor (GPa", "Exception and stop check_cf(obs) # Pass variables to Fortran conf model2for() wave2for() #", "as cf_f from telewavesim.rmat_f import plane as pw_f def set_iso_tensor(a, b): \"\"\" Function", "= set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j] = rho else: print('\\nFlag not defined: use", "(c_ijkl) about three angles (alpha, beta, gamma) Args: a (np.ndarray): Elastic tensor with", "l) cc[i, j, k, l] = C[Voigt_i, Voigt_j] return cc def cc2voigt(cc): \"\"\"", ">>> conf.rho[0] = rho >>> conf.thickn[0] = 10. >>> conf.wvtype = 'P' >>>", "BA). In this case we rotate about x_2 first, x_3 second and x_1", "wave mode * trH (obspy.trace): Horizontally polarized shear (SH) wave mode \"\"\" #", "elif typ=='plag': C, rho = es.plagioclase_06() elif typ=='qtz': C, rho = es.quartz() elif", "set before executing the main ``telewavesim.rmat_f.plane_****`` function. Args: fortran (book, option): Whether or", "= np.zeros((6,6,cf.nlay),dtype=complex) cf.evals = np.zeros((6,cf.nlay),dtype=complex) cf.Tui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Rui = np.zeros((3,3,cf.nlay),dtype=complex) cf.Tdi =", "Passes global OBS-related variables to Fortran ``conf`` module. Returns: None Variables to pass", "streams') # Copy stats from stream str_stats = st1[0].stats # Initialize arrays tmp1", "tft.data = np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft = pyfftw.interfaces.numpy_fft.fft(ttr.data) ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if", "an ``obspy`` ``Stream`` object. Args: ux (np.ndarray): x-component displacement seismogram uy (np.ndarray): y-component", "st2, pws=False): \"\"\" Stacks all traces in two ``Stream`` objects. Args: st1 (obspy.stream):", "[]; fl = []; ani = []; tr = []; pl = []", "elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr =", "= Stream(traces=[tfr, tft]) # Return stream return tfs def update_stats(tr, nt, dt, slow,", "0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)] tol =", "Trend angle of symmetry axis (degree) pl (float): Plunge angle of symmetry axis", "parameters cf.nlay = len(h) cf.thickn = h cf.rho = r cf.isoflg = fl", "representation of the stiffness matrix to the full 3x3x3x3 tensor representation. Args: C", "Initialize arrays tmp1 = np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2", ">>> conf.wvtype = 'P' >>> slow = 0.06 # s/km >>> utils.calc_ttime(slow) 0.0013519981570791182", "notice and this permission notice shall be included in all # copies or", "import numpy as np >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> #", "= etr.copy() # Rotate to radial and transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data, etr.data,", "fl[j] in mins or fl[j] in rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] =", "Voigt-Reuss-Hill average Kvrh = (Kvoigt + Kreuss)/2. Gvrh = (Gvoigt + Greuss)/2. return", "from telewavesim import conf as cf from telewavesim import elast as es from", "C, rho = es.serpentinite_80() elif typ=='LHZ': C, rho = es.lherzolite() else: print('type of", "es.olivine() elif typ=='opx': C, rho = es.orthopyroxene() elif typ=='plag': C, rho = es.plagioclase_06()", "= asum return aa def rotate_zrt_pvh(trZ, trR, trT, vp=6., vs=3.5): \"\"\" Rotates traces", "Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (tuple): Tuple containing: *", "Set all of the following variables through the conf module: 'a', 'rho', 'thickn',", "Function to generate transfer functions from displacement traces. Args: trxyz (obspy.stream): Obspy ``Stream``", "of seismograms for ``land`` case yx, yy, yz = pw_f.plane_land(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) # Transfer", "for j in range(3): for k in range(3): for l in range(3): rr", "tensor (GPa /density) \\ (shape ``(3, 3, 3, 3)``) \"\"\" a = a*1.e3", "range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i, j, k,", "stack2 (obspy.trace): Stacked trace for Stream 2 \"\"\" print() print('Stacking ALL traces in", "alpha (float): Angle in radians beta (float): Angle in radians gam (float): Angle", "``obspy`` ``Stream`` object. .. note:: The ``conf`` global variables need to be set", ">>> # Pass variables to the `conf` module >>> # Only topmost layer", "between all other modules \"\"\" h = []; r = []; a =", "rotate_ne_rt from telewavesim import conf as cf from telewavesim import elast as es", "(6760.617471753726, 3832.0771334254896) \"\"\" Vp = np.sqrt((K + 4.*G/3.)/rho) Vs = np.sqrt(G/rho) return Vp,", "0.0013519981570791182 \"\"\" t1 = 0. for i in range(cf.nlay-1): if cf.isoflg[i] == 'iso':", "a0, b0 = mod2vel(K,G,rho) a0 = a0**2 b0 = b0**2 if cf.wvtype=='P': t1", "import itertools import numpy as np import pyfftw from scipy.signal import hilbert from", "model model with identical material >>> conf.nlay = 2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay))", "variables to Fortran ``conf`` module. Returns: None Variables to pass are ``dp``, ``c``,", "store displacement seismograms into ``obspy`` ``Trace`` obsjects and then an ``obspy`` ``Stream`` object.", "``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx = cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx))", "= utils.set_aniso_tensor(0., 0., typ='atg') >>> # Define two-layer model model with identical material", "48113333333.333336, 61245706544.967415, 28835098086.844658, 68450631050.26149, 38474215710.088997) \"\"\" # Compliance matrix S = np.linalg.inv(C) #", "Kreuss, Greuss, Kvrh, Gvrh def mod2vel(K,G,rho): \"\"\" Calculates the isotropic P and S", "velocity (km/s) tr (float): Trend angle of symmetry axis (degree) pl (float): Plunge", "for r in range(3): for s in range(3): asum=0.0 for i in range(3):", "traces tux = Trace(data=ux) tuy = Trace(data=uy) tuz = Trace(data=uz) # Update trace", "slow = 0.06 # s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 = 0. for", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh))))", "= utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> utils.VRH_average(C*rho) (75655555555.555557, 48113333333.333336, 61245706544.967415,", "np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha) rot[1,1] = np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha)", "m22 = cf.slow*vs # Rotation matrix rot = np.array([[-m11, m12], [-m21, m22]]) #", ">>> # Only topmost layer is useful for travel time calculation >>> conf.isoflg", "- 2.*LL) # Get tensor with horizontal axis cc = es.tri_tensor(AA, CC, FF,", "+ 2.*S[1,2]) Greuss = 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2]", "Elements of rotation matrix m11 = cf.slow*vs*vs/vp m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21", "in stream tfs = Stream(traces=[tfr, tft]) # Return stream return tfs def update_stats(tr,", "b0 = cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho = cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho)", "b0 = b0**2 if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2) elif cf.wvtype=='Si' or", "b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration parameters cf.nlay = len(h) cf.thickn", "Rotated tensor with shape ``(3, 3, 3, 3)`` .. note:: The three angles", "stiffness matrix. Args: cc (np.ndarray): Elastic tensor (shape ``(3, 3, 3, 3)``) Returns:", "2.*S[1,2]) Greuss = 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] -", "if obs: lst = [cf.dp, cf.c, cf.rhof] check = [f is None for", "rho >>> conf.thickn[0] = 10. >>> conf.wvtype = 'P' >>> slow = 0.06", "= []; r = []; a = []; b = []; fl =", "with horizontal axis cc = es.tri_tensor(AA, CC, FF, LL, NN) # Rotate tensor", "= cc[k,l,m,n] return C def VRH_average(C): \"\"\" Performs a Voigt-Reuss-Hill average of the", "trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs) tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data))", "telewavesim import conf >>> from telewavesim import utils >>> import numpy as np", "0.06 # s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 = 0. for i in", "(m/s) Example ------- >>> from telewavesim import utils >>> cc, rho = utils.set_aniso_tensor(0.,", "cf.wvtype=='SV' or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1 def read_model(modfile): \"\"\"", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else:", "this case we rotate about x_2 first, x_3 second and x_1 third. For", "the tensor cc (c_ijkl) about three angles (alpha, beta, gamma) Args: a (np.ndarray):", "ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) # Store in traces", "(GPa) * Gvoigt (float): Voigt average shear modulus (GPa) * Kreuss (float): Reuss", "* trP (obspy.trace): Compressional (P) wave mode * trV (obspy.trace): Vertically polarized shear", "global variables are set and throws an Exception if not. Args: obs (bool,", "cf.thickn, cf.isoflg, cf.dt, cf.nt, cf.slow, cf.baz] check = [f is None for f", "rho = es.quartz() elif typ=='zo': C, rho = es.zoisite() # Rocks elif typ=='BS_f':", "tft]) # Return stream return tfs def update_stats(tr, nt, dt, slow, baz): \"\"\"", "coordinate system to P-SV-SH wave mode Returns: (obspy.stream): tfs: Stream containing Radial and", "angle of symmetry axis (degree) type (str, optional): Type of elastic material Returns:", "free of charge, to any person obtaining a copy # of this software", "are all set before executing the main ``telewavesim.rmat_f.plane_****`` function. Args: fortran (book, option):", "model = line.rstrip().split() h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass", "trP, trV, trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs) tfr = trV.copy(); tfr.data", "es.eclogite_massive() elif typ=='HB': C, rho = es.harzburgite() elif typ=='SP_37': C, rho = es.serpentinite_37()", "tr = -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Percent anisotropy da =", "rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation of the tensor cc (c_ijkl) about three angles", "except: raise(Exception('model file cannot be opened: ',modfile)) with open(modfile) as fileobj: for line", "cc, rho def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of tensor to Voigt notation for", "is done for an OBS case or not. :raises ExceptionError: Throws ExceptionError if", "k, l = Voigt_notation[i] m, n = Voigt_notation[j] C[i,j] = cc[k,l,m,n] return C", "np >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>> # Define two-layer model", "scipy.signal import hilbert from obspy.core import Trace, Stream from obspy.signal.rotate import rotate_ne_rt from", "cf.thickn = h cf.rho = r cf.isoflg = fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs", "68450631050.26149, 38474215710.088997) \"\"\" # Compliance matrix S = np.linalg.inv(C) # Voigt averaging Kvoigt", "= np.array([trR.data,trZ.data]) # Rotation vec = np.dot(rot, r_z) # Extract P and SV", "the following variables through the conf module: 'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt',", "representation to the Voigt notation of the stiffness matrix. Args: cc (np.ndarray): Elastic", "= cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg =", "cf.baz) tuz = update_stats(tuz, cf.nt, cf.dt, cf.slow, cf.baz) # Append to stream trxyz", "elif typ=='bt': C, rho = es.biotite() elif typ=='cpx': C, rho = es.clinopyroxene_92() elif", "+ C[1,1] + C[2,2] - C[0,1] - C[0,2] - C[1,2] + 3.*C[3,3] +", "j: return i return 6-i-j def voigt2cc(C): \"\"\" Convert the Voigt representation of", "import conf as cf_f from telewavesim.rmat_f import plane as pw_f def set_iso_tensor(a, b):", "S-wave velocity (km/s) Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape ``(3,", "value (degree) Returns: (obspy.trace): tr: Trace with updated stats \"\"\" tr.stats.delta = dt", "import sys import itertools import numpy as np import pyfftw from scipy.signal import", "`conf` module >>> # Only topmost layer is useful for travel time calculation", "(float): P-wave velocity (km/s) b (float): S-wave velocity (km/s) Returns: (np.ndarray): cc: Elastic", "Convert Voigt to full tensor cc = voigt2cc(C) return cc def set_tri_tensor(a, b,", "Fortran ``conf`` module. Returns: None Variables to pass are ``dp``, ``c``, ``rhof`` \"\"\"", "(shape ``(6, 6)``) \"\"\" Voigt_notation = [(0, 0), (1, 1), (2, 2), (1,", "np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs) tfr = trV.copy();", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "Stiffness matrix (shape ``(6, 6)``) Returns: (np.ndarray): cc: Elastic tensor (shape ``(3, 3,", "with ``telewavesim`` modules. ''' import sys import itertools import numpy as np import", "tft.data = np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data) ftfh = pyfftw.interfaces.numpy_fft.fft(trH.data) ftfp = pyfftw.interfaces.numpy_fft.fft(trP.data) if", "\\ axis. Args: tr (float): Trend angle of symmetry axis (degree) pl (float):", "Performs a Voigt-Reuss-Hill average of the anisotropic stifness matrix to the bulk modulus", "conf.thickn[0] = 10. >>> conf.wvtype = 'P' >>> slow = 0.06 # s/km", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "pw_f def set_iso_tensor(a, b): \"\"\" Function to generate tensor for isotropic material. Args:", "= -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs #", "np.exp(1j*phase1) for tr in st2: tmp2 += tr.data hilb2 = hilbert(tr.data) phase2 =", "(degree) pl (float): Plunge angle of symmetry axis (degree) ani (float): Percent anisotropy", "2), (0, 2), (0, 1)] tol = 1e-3 cc = np.asarray(cc) C =", "identical material >>> conf.nlay = 2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho =", "an ``obspy`` ``Stream`` object. .. note:: The ``conf`` global variables need to be", "gamma) Args: a (np.ndarray): Elastic tensor with shape ``(3, 3, 3, 3)`` alpha", "cc elif fl[j] in mins or fl[j] in rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j])", "cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftft,ftfz)))) elif cf.wvtype=='Si': tfr.data", "Software, and to permit persons to whom the Software is # furnished to", "+ 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] - \\ 4.*S[1,2] + 3.*S[3,3]", "land or OBS case. if obs: # If OBS, then further pass OBS-related", "conf obs2for() # Get the Fourier transform of seismograms for ``obs``case yx, yy,", "def wave2for(): \"\"\" Passes global wavefield variables to Fortran ``conf`` module. Returns: None", "velocity (m/s) Example ------- >>> from telewavesim import utils >>> cc, rho =", "pl.append(np.float64(model[7])) # Pass configuration parameters cf.nlay = len(h) cf.thickn = h cf.rho =", "of Telewavesim. # Permission is hereby granted, free of charge, to any person", "trR, trT, vp=6., vs=3.5): \"\"\" Rotates traces from `Z-R-T` orientation to `P-SV-SH` wave", "i in range(3): for j in range(3): for k in range(3): for l", "variable ``conf.wvtype`` Args: slow (float): Slowness value (s/km) Returns: (float): t1: Time in", "open(modfile) as fileobj: for line in fileobj: if not line.rstrip().startswith('#'): model = line.rstrip().split()", "[]; a = []; b = []; fl = []; ani = [];", "Kreuss = 1./(S[0,0] + S[1,1] + S[2,2] + 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2])", "ftfz = pyfftw.interfaces.numpy_fft.fft(ztr.data) if cf.wvtype=='P': # Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfr,ftfz)))) tft.data =", "2 \"\"\" print() print('Stacking ALL traces in streams') # Copy stats from stream", "# Check if all variables are set. If not, throw an Exception and", "baz = cf.baz # Copy to radial and transverse rtr = ntr.copy() ttr", "3, 3)``) * rho (float): Density (kg/m^3) \"\"\" # Trend and plunge of", "conf.a[:,:,:,:,0] = cc >>> conf.rho[0] = rho >>> conf.thickn[0] = 10. >>> conf.wvtype", "Voigt to full tensor cc = voigt2cc(C) return cc def set_tri_tensor(a, b, tr,", "G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp = np.sqrt((K", "rho = es.eclogite_massive() elif typ=='HB': C, rho = es.harzburgite() elif typ=='SP_37': C, rho", "(float): Voigt average bulk modulus (GPa) * Gvoigt (float): Voigt average shear modulus", "of the following variables through the conf module: 'dp', 'c', 'rhof'\") def model2for():", "(np.ndarray): aa: Rotated tensor with shape ``(3, 3, 3, 3)`` .. note:: The", "all traces in two ``Stream`` objects. Args: st1 (obspy.stream): Stream 1 st2 (obspy.stream,):", "conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0] = cc >>> conf.rho[0] = rho >>> conf.thickn[0]", "variables through the conf module: 'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'\")", "Set all of the following variables through the conf module: 'dp', 'c', 'rhof'\")", ">>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 = 0. for i in range(cf.nlay-1): if cf.isoflg[i]", "= es.serpentinite_80() elif typ=='LHZ': C, rho = es.lherzolite() else: print('type of mineral/rock not", "'qtz', 'zo'] rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80'] for", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "utils.set_aniso_tensor(0., 0., typ='atg') >>> C = utils.cc2voigt(cc) >>> K, G = utils.VRH_average(C*rho)[4:6] >>>", "done for an OBS stations Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms", "to pass are ``a``, ``rho``, ``thickn``, ``isoflg`` \"\"\" nlaymx = cf_f.nlaymx cf_f.a =", "es.zoisite() # Rocks elif typ=='BS_f': C, rho = es.blueschist_felsic() elif typ=='BS_m': C, rho", "(float): Plunge angle of symmetry axis (degree) ani (float): Percent anisotropy Returns: (np.ndarray):", "opened: ',modfile)) with open(modfile) as fileobj: for line in fileobj: if not line.rstrip().startswith('#'):", "None for f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set.", "4.*G/3.)/rho) Vs = np.sqrt(G/rho) return Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation", "elif typ=='EC_f': C, rho = es.eclogite_foliated() elif typ=='EC_m': C, rho = es.eclogite_massive() elif", "pl)*np.pi/180. # Percent anisotropy da = (a*1.e3)*ani/100. db = (b*1.e3)*ani/100. # Set up", "# Copy traces trP = trZ.copy() trV = trR.copy() trH = trT.copy() #", "= []; fl = []; ani = []; tr = []; pl =", "system to P-SV-SH wave mode Returns: (obspy.stream): tfs: Stream containing Radial and Transverse", "Calculates the isotropic P and S wave velocities from given bulk (K) and", "utils >>> import numpy as np >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg')", "cf.dp cf_f.c = cf.c cf_f.rhof = cf.rhof def run_plane(obs=False): \"\"\" Function to run", "of the symmetry \\ axis. Args: tr (float): Trend angle of symmetry axis", "x_3, x_1 axes. Note that the sequence of the rotation is important: (AB", "Voigt average shear modulus (GPa) * Kreuss (float): Reuss average bulk modulus (GPa)", "Tuple containing: * Kvoigt (float): Voigt average bulk modulus (GPa) * Gvoigt (float):", ">>> conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0] = cc >>> conf.rho[0] = rho >>>", "done for an OBS case or not. :raises ExceptionError: Throws ExceptionError if not", "(a*1.e3)*ani/100. db = (b*1.e3)*ani/100. # Set up matrix elements AA = (a*1.e3 -", "be opened: ',modfile)) with open(modfile) as fileobj: for line in fileobj: if not", "# Extract East, North and Vertical ntr = trxyz[0] etr = trxyz[1] ztr", "# Copy to radial and transverse rtr = ntr.copy() ttr = etr.copy() #", "'zo'] rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80'] for j", "plunge cc = rot_tensor(cc, pl, tr, 0.) # Return tensor return cc def", "cc def cc2voigt(cc): \"\"\" Convert from the full 3x3x3x3 tensor representation to the", "np.sqrt(1/vp/vp - cf.slow*cf.slow) qs = np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements of rotation matrix", "P-wave velocity (km/s) b (float): S-wave velocity (km/s) tr (float): Trend angle of", "wave velocities from given bulk (K) and shear (G) moduli and density (rho)", "+ (AA + LL)*(CC + LL)) # eta = FF/(AA - 2.*LL) #", "elif cf.wvtype=='Si' or cf.wvtype=='SV' or cf.wvtype=='SH': t1 += cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1", "if i == j: return i return 6-i-j def voigt2cc(C): \"\"\" Convert the", "'HB', 'LHZ', 'SP_37', 'SP_80'] for j in range(cf.nlay): if fl[j]=='iso': cc = set_iso_tensor(a[j],b[j])", "Stack all traces for tr in st1: tmp1 += tr.data hilb1 = hilbert(tr.data)", "= trend \"\"\" rot = np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1]", "h.append(np.float64(model[0])*1.e3) r.append(np.float64(model[1])) a.append(np.float64(model[2])) b.append(np.float64(model[3])) fl.append(model[4]) ani.append(np.float64(model[5])) tr.append(np.float64(model[6])) pl.append(np.float64(model[7])) # Pass configuration parameters cf.nlay", "If not, throw an Exception and stop check_cf(obs) # Pass variables to Fortran", "Reads model parameters from file that are passed through the configuration module ``conf``.", "in the Software without restriction, including without limitation the rights # to use,", "es.plagioclase_06() elif typ=='qtz': C, rho = es.quartz() elif typ=='zo': C, rho = es.zoisite()", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "\"\"\" t1 = 0. for i in range(cf.nlay-1): if cf.isoflg[i] == 'iso': a0", "+ np.sqrt((2.*AC)**2 - 2.*AC*(AA + CC + 2.*LL) + (AA + LL)*(CC +", "np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2] = np.sin(alpha)*np.cos(beta) rot[1,0] = -np.cos(gam)*np.sin(beta)*np.cos(alpha) - \\ np.sin(gam)*np.sin(alpha)", "pws=False): \"\"\" Stacks all traces in two ``Stream`` objects. Args: st1 (obspy.stream): Stream", "angle of symmetry axis (degree) pl (float): Plunge angle of symmetry axis (degree)", "North and Vertical ntr = trxyz[0] etr = trxyz[1] ztr = trxyz[2] baz", "(np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (tuple): Tuple containing: * Kvoigt (float):", "np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx), dtype='int') for i in range(cf.nlay): cf_f.a[:,:,:,:,i]", "elast as es from telewavesim.rmat_f import conf as cf_f from telewavesim.rmat_f import plane", "and density (rho) in kg/m^3 Args: K (float): Bulk modulus (GPa) G (float):", "Telewavesim. # Permission is hereby granted, free of charge, to any person obtaining", "= es.lawsonite() elif typ=='lz': C, rho = es.lizardite() elif typ=='ms': C, rho =", "stream str_stats = st1[0].stats # Initialize arrays tmp1 = np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data))", "= hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1) for tr in st2:", "3-component displacement seismograms \"\"\" # Check if all variables are set. If not,", "2.*C[1,2])/9. Gvoigt = (C[0,0] + C[1,1] + C[2,2] - C[0,1] - C[0,2] -", "P and SV components trP.data = vec[0,:] trV.data = vec[1,:] trH.data = -trT.data/2.", "= np.sqrt(1/vp/vp - cf.slow*cf.slow) qs = np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements of rotation", "a0 = a0**2 b0 = b0**2 if cf.wvtype=='P': t1 += cf.thickn[i]*np.sqrt(1./a0 - (slow*1.e-3)**2)", "elif typ=='EC_m': C, rho = es.eclogite_massive() elif typ=='HB': C, rho = es.harzburgite() elif", "calc_ttime(slow): \"\"\" Calculates total propagation time through model. The bottom layer is irrelevant", "'a', 'rho', 'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'\") if obs: lst = [cf.dp,", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) # Store", "Args: tr (float): Trend angle of symmetry axis (degree) pl (float): Plunge angle", "= Trace(data=weight1*tmp1,header=str_stats) stack2 = Trace(data=weight2*tmp2,header=str_stats) return stack1, stack2 def calc_ttime(slow): \"\"\" Calculates total", ">>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay)) >>>", "None Variables to pass are ``dt``, ``slow``, ``baz`` \"\"\" cf_f.dt = cf.dt cf_f.slow", "of the stiffness matrix. Args: cc (np.ndarray): Elastic tensor (shape ``(3, 3, 3,", "'grt', 'gln', 'hbl', 'jade',\\ 'lws', 'lz', 'ms', 'ol', 'opx', 'plag', 'qtz', 'zo'] rocks", "'dp', 'c', 'rhof'\") def model2for(): \"\"\" Passes global model variables to Fortran ``conf``", "mins or fl[j] in rocks: cc, rho = set_aniso_tensor(tr[j],pl[j],typ=fl[j]) cf.a[:,:,:,:,j] = cc cf.rho[j]", "P-SV-SH wave mode Returns: (obspy.stream): tfs: Stream containing Radial and Transverse transfer functions", "cf.isoflg[i] == 'iso': a0 = cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i])", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "rho = es.lherzolite() else: print('type of mineral/rock not implemented') return # Convert Voigt", "tmp1/np.float(len(st1)) tmp2 = tmp2/np.float(len(st2)) # Phase-weighting if pws: weight1 = weight1/np.float(len(st1)) weight2 =", "= [f is None for f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables", "isotropy. The tensor is rotated using the trend and plunge of the symmetry", "obspy.core import Trace, Stream from obspy.signal.rotate import rotate_ne_rt from telewavesim import conf as", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "import numpy as np import pyfftw from scipy.signal import hilbert from obspy.core import", "Angle in radians Returns: (np.ndarray): aa: Rotated tensor with shape ``(3, 3, 3,", "This file is part of Telewavesim. # Permission is hereby granted, free of", "(shape ``(3, 3, 3, 3)``) \"\"\" C = np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float)", "elif typ=='ol': C, rho = es.olivine() elif typ=='opx': C, rho = es.orthopyroxene() elif", "np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] = np.sin(gam)*np.sin(beta)*np.sin(alpha)", "The above copyright notice and this permission notice shall be included in all", "and/or sell # copies of the Software, and to permit persons to whom", "SOFTWARE. ''' Utility functions to interact with ``telewavesim`` modules. ''' import sys import", ">>> from telewavesim import utils >>> cc, rho = utils.set_aniso_tensor(0., 0., typ='atg') >>>", "through reading of the model file from the function ``utils.read_model(modfile)``, and setting the", "= cf.rho[i] cf_f.thickn[i] = cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def wave2for(): \"\"\"", "= cf.thickn[i] if cf.isoflg[i]=='iso': cf_f.isoflg[i] = 1 def wave2for(): \"\"\" Passes global wavefield", "in time domain ux = np.real(pyfftw.interfaces.numpy_fft.fft(yx)) uy = np.real(pyfftw.interfaces.numpy_fft.fft(yy)) uz = -np.real(pyfftw.interfaces.numpy_fft.fft(yz)) #", "symmetry axis (e.g., tri_tensor): ``alpha`` = plunge ``beta`` = trend \"\"\" rot =", "cc >>> conf.rho[0] = rho >>> conf.thickn[0] = 10. >>> conf.wvtype = 'P'", "with updated stats \"\"\" tr.stats.delta = dt tr.stats.slow = slow tr.stats.baz = baz", "cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn = np.zeros((nlaymx)) cf_f.isoflg = np.zeros((nlaymx),", "setting the variable ``conf.wvtype`` Args: slow (float): Slowness value (s/km) Returns: (float): t1:", "isotropic P and S wave velocities from given bulk (K) and shear (G)", "return cc def set_tri_tensor(a, b, tr, pl, ani): \"\"\" Function to generate tensor", "(np.ndarray): y-component displacement seismogram uz (np.ndarray): z-component displacement seismogram Returns: (obspy.stream): trxyz: Stream", "layer is useful for travel time calculation >>> conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0]", "trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft = trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv = pyfftw.interfaces.numpy_fft.fft(trV.data)", "trH def stack_all(st1, st2, pws=False): \"\"\" Stacks all traces in two ``Stream`` objects.", "cf_f.isoflg = np.zeros((nlaymx), dtype='int') for i in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] =", "'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80'] for j in range(cf.nlay): if fl[j]=='iso': cc", "This function first checks to make sure the variables are all set before", "return Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation of the tensor cc", "= es.zoisite() # Rocks elif typ=='BS_f': C, rho = es.blueschist_felsic() elif typ=='BS_m': C,", "- C[0,2] - C[1,2] + 3.*C[3,3] + \\ 3.*C[4,4] + 3.*C[5,5])/15. # Reuss", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "= h cf.rho = r cf.isoflg = fl cf.a = np.zeros((3,3,3,3,cf.nlay)) cf.evecs =", "'thickn', 'isoflg', 'dt', 'nt', 'slow', 'baz'\") if obs: lst = [cf.dp, cf.c, cf.rhof]", "Exception if not. Args: obs (bool, optional): Whether the analysis is done for", "typ=='grt': C, rho = es.garnet() elif typ=='gln': C, rho = es.glaucophane() elif typ=='hbl':", "as an ``obspy`` ``Stream`` object. .. note:: The ``conf`` global variables need to", "'bt', 'cpx', 'dol', 'ep', 'grt', 'gln', 'hbl', 'jade',\\ 'lws', 'lz', 'ms', 'ol', 'opx',", "= np.zeros((3,3)) aa = np.zeros((3,3,3,3)) rot[0,0] = np.cos(alpha)*np.cos(beta) rot[0,1] = np.sin(beta) rot[0,2] =", "vec[0,:] trV.data = vec[1,:] trH.data = -trT.data/2. return trP, trV, trH def stack_all(st1,", "# Set up matrix elements AA = (a*1.e3 - da/2.)**2 CC = (a*1.e3", "Voigt to full tensor cc = voigt2cc(C)*1.e9/rho # Rotate tensor using trend and", "the full 3x3x3x3 tensor representation to the Voigt notation of the stiffness matrix.", "or not the Fortran modules are used obs (bool, optional): Whether or not", "tr (obspy.trace): Trace object to update nt (int): Number of samples dt (float):", "using the trend and plunge of the symmetry axis. Args: a (float): P-wave", "+ CC + 2.*LL) + (AA + LL)*(CC + LL)) # eta =", "and return 3-component seismograms as an ``obspy`` ``Stream`` object. .. note:: The ``conf``", "+ S[2,2] + 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2]) Greuss = 15./(4.*S[0,0] + 4.*S[1,1]", "Reuss averaging Kreuss = 1./(S[0,0] + S[1,1] + S[2,2] + 2.*S[0,1] + 2.*S[0,2]", "Trace(data=uy) tuz = Trace(data=uz) # Update trace header tux = update_stats(tux, cf.nt, cf.dt,", "ttr = etr.copy() # Rotate to radial and transverse rtr.data, ttr.data = rotate_ne_rt(ntr.data,", "= -trT.data/2. return trP, trV, trH def stack_all(st1, st2, pws=False): \"\"\" Stacks all", "0., typ='atg') >>> # Define two-layer model model with identical material >>> conf.nlay", "tensor cc (c_ijkl) about three angles (alpha, beta, gamma) Args: a (np.ndarray): Elastic", ">>> C = utils.cc2voigt(cc) >>> K, G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho)", "itertools.product(range(3), range(3), range(3), range(3)): Voigt_i = full_3x3_to_Voigt_6_index(i, j) Voigt_j = full_3x3_to_Voigt_6_index(k, l) cc[i,", "in range(6): k, l = Voigt_notation[i] m, n = Voigt_notation[j] C[i,j] = cc[k,l,m,n]", "= np.sqrt((K + 4.*G/3.)/rho) Vs = np.sqrt(G/rho) return Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\"", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "elif typ=='BS_f': C, rho = es.blueschist_felsic() elif typ=='BS_m': C, rho = es.blueschist_mafic() elif", "np.asarray(C) cc = np.zeros((3,3,3,3), dtype=float) for i, j, k, l in itertools.product(range(3), range(3),", "np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) # Put back into traces", "axis (degree) type (str, optional): Type of elastic material Returns: (tuple): Tuple containing:", "modules \"\"\" h = []; r = []; a = []; b =", "h = []; r = []; a = []; b = []; fl", "Trend and plunge of symmetry axis tr = -tr*np.pi/180. pl = (90. -", "C, rho = es.plagioclase_06() elif typ=='qtz': C, rho = es.quartz() elif typ=='zo': C,", "executing the main ``telewavesim.rmat_f.plane_****`` function. Args: fortran (book, option): Whether or not the", "-(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs # Rotation", "def calc_ttime(slow): \"\"\" Calculates total propagation time through model. The bottom layer is", "= -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Get tensor with horizontal axis", "wavefield variables to Fortran ``conf`` module. Returns: None Variables to pass are ``dt``,", "np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) elif", "Stream from obspy.signal.rotate import rotate_ne_rt from telewavesim import conf as cf from telewavesim", "displacement seismogram Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" # Get", "for f in lst] if sum(check)/len(check)>0.: raise Exception(\"global variables not all set for", "module depending on land or OBS case. if obs: # If OBS, then", "= rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs) tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft", "fortran (book, option): Whether or not the Fortran modules are used obs (bool,", "(GPa) * Gvrh (float): Voigt-Reuss-Hill average shear modulus (GPa) Example ------- >>> from", "= tmp2/np.float(len(st2)) # Phase-weighting if pws: weight1 = weight1/np.float(len(st1)) weight2 = weight2/np.float(len(st2)) weight1", "to Fortran ``conf`` module. Returns: None Variables to pass are ``a``, ``rho``, ``thickn``,", "from telewavesim import elast as es from telewavesim.rmat_f import conf as cf_f from", "is typically ensured through reading of the model file from the function ``utils.read_model(modfile)``,", "for isotropic material. Args: a (float): P-wave velocity (km/s) b (float): S-wave velocity", "(90. - pl)*np.pi/180. # Get tensor with horizontal axis # Minerals if typ=='atg':", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "C[1,2] + 3.*C[3,3] + \\ 3.*C[4,4] + 3.*C[5,5])/15. # Reuss averaging Kreuss =", "range(3): for k in range(3): for l in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum", "for OBS case. Set all of the following variables through the conf module:", "weight2 = np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data)) weight2 = np.ones(len(st1[0].data)) # Put back", "= []; tr = []; pl = [] # Read file line by", "# Pass variables to Fortran conf model2for() wave2for() # Run the ``plane`` module", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "uz (np.ndarray): z-component displacement seismogram Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms", "trace for Stream 2 \"\"\" print() print('Stacking ALL traces in streams') # Copy", "(shape ``(3, 3, 3, 3)``) \"\"\" # Trend and plunge of symmetry axis", "l in range(3): rr = rot[m,i]*rot[n,j]*rot[r,k]*rot[s,l] asum = asum + rr*a[i,j,k,l] aa[m,n,r,s] =", "case. if obs: # If OBS, then further pass OBS-related paramters to Fortran", "Rocks elif typ=='BS_f': C, rho = es.blueschist_felsic() elif typ=='BS_m': C, rho = es.blueschist_mafic()", "the Fourier transform of seismograms for ``obs``case yx, yy, yz = pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c'))", "if typ=='atg': C, rho = es.antigorite() elif typ=='bt': C, rho = es.biotite() elif", "rotation Returns: (tuple): tuple containing: * trP (obspy.trace): Compressional (P) wave mode *", "'slow', 'baz'\") if obs: lst = [cf.dp, cf.c, cf.rhof] check = [f is", "= -tr*np.pi/180. pl = (90. - pl)*np.pi/180. # Percent anisotropy da = (a*1.e3)*ani/100.", "transverse isotropy. The tensor is rotated using the trend and plunge of the", "C, rho = es.dolomite() elif typ=='ep': C, rho = es.epidote() elif typ=='grt': C,", "= 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2] - 4.*S[0,1] - 4.*S[0,2] - \\ 4.*S[1,2]", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "st1 (obspy.stream): Stream 1 st2 (obspy.stream,): Stream 2 pws (bool, optional): Enables Phase-Weighted", "vs = np.sqrt(cf.a[1,2,1,2,0])/1.e3 trP, trV, trH = rotate_zrt_pvh(ztr, rtr, ttr, vp=vp, vs=vs) tfr", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "kg/m^3 Args: K (float): Bulk modulus (GPa) G (float): Shear modulus (GPa) rho", "= 2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay)) >>> conf.thickn =", "- cf.slow*cf.slow) qs = np.sqrt(1/vs/vs - cf.slow*cf.slow) # Elements of rotation matrix m11", "functions \"\"\" # Extract East, North and Vertical ntr = trxyz[0] etr =", "seismogram Returns: (obspy.stream): trxyz: Stream containing 3-component displacement seismograms \"\"\" # Get displacements", "generate tensor for anisotropic minerals. The \\ tensor is rotated using the trend", "G, rho) (6760.617471753726, 3832.0771334254896) \"\"\" Vp = np.sqrt((K + 4.*G/3.)/rho) Vs = np.sqrt(G/rho)", "dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex) # Stack all traces for tr in st1:", "\"\"\" Calculates the isotropic P and S wave velocities from given bulk (K)", "set for this calculation to succeed. This function first checks to make sure", "pvh (bool, optional): Whether to rotate from Z-R-T coordinate system to P-SV-SH wave", "trR.copy() trH = trT.copy() # Vertical slownesses qp = np.sqrt(1/vp/vp - cf.slow*cf.slow) qs", "np.cos(gam)*np.cos(beta) rot[1,2] = -np.cos(gam)*np.sin(beta)*np.sin(alpha) + \\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha)", "rho = es.lawsonite() elif typ=='lz': C, rho = es.lizardite() elif typ=='ms': C, rho", "0.) # Return tensor return cc def set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function to", "3, 3, 3)`` alpha (float): Angle in radians beta (float): Angle in radians", "Pass configuration parameters cf.nlay = len(h) cf.thickn = h cf.rho = r cf.isoflg", "+= cf.thickn[i]*np.sqrt(1./b0 - (slow*1.e-3)**2) return t1 def read_model(modfile): \"\"\" Reads model parameters from", "\"\"\" Voigt_notation = [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2),", "= np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data", "= pw_f.plane_obs(cf.nt,cf.nlay,np.array(cf.wvtype, dtype='c')) else: # Get the Fourier transform of seismograms for ``land``", "np.dot(rot, r_z) # Extract P and SV components trP.data = vec[0,:] trV.data =", "to store displacement seismograms into ``obspy`` ``Trace`` obsjects and then an ``obspy`` ``Stream``", "3x3x3x3 tensor representation. Args: C (np.ndarray): Stiffness matrix (shape ``(6, 6)``) Returns: (np.ndarray):", "vec = np.dot(rot, r_z) # Extract P and SV components trP.data = vec[0,:]", "def obs2for(): \"\"\" Passes global OBS-related variables to Fortran ``conf`` module. Returns: None", "= cf.rho[i] K1,G1,K2,G2,K,G = VRH_average(cc*rho) a0, b0 = mod2vel(K,G,rho) a0 = a0**2 b0", "2.*LL) + (AA + LL)*(CC + LL)) # eta = FF/(AA - 2.*LL)", "useful for travel time calculation >>> conf.isoflg = ['atg'] >>> conf.a[:,:,:,:,0] = cc", "- 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs # Rotation matrix", "Z-R-T coordinate system to P-SV-SH wave mode Returns: (obspy.stream): tfs: Stream containing Radial", "of the Software. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "obsjects and then an ``obspy`` ``Stream`` object. Args: ux (np.ndarray): x-component displacement seismogram", "permit persons to whom the Software is # furnished to do so, subject", "# Vertical slownesses qp = np.sqrt(1/vp/vp - cf.slow*cf.slow) qs = np.sqrt(1/vs/vs - cf.slow*cf.slow)", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. '''", "function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfv)))) tft.data", "``conf`` module. Returns: None Variables to pass are ``dp``, ``c``, ``rhof`` \"\"\" cf_f.dp", "Append to stream trxyz = Stream(traces=[tux, tuy, tuz]) return trxyz def tf_from_xyz(trxyz, pvh=False):", "other modules \"\"\" h = []; r = []; a = []; b", "= cf.slow*vs # Rotation matrix rot = np.array([[-m11, m12], [-m21, m22]]) # Vector", "Checks whether or not all required global variables are set and throws an", "Enables Phase-Weighted Stacking Returns: (tuple): tuple containing: * stack1 (obspy.trace): Stacked trace for", "polarized shear (SV) wave mode * trH (obspy.trace): Horizontally polarized shear (SH) wave", "= (90. - pl)*np.pi/180. # Percent anisotropy da = (a*1.e3)*ani/100. db = (b*1.e3)*ani/100.", "``(3, 3, 3, 3)`` alpha (float): Angle in radians beta (float): Angle in", "are passed through the configuration module ``conf``. Returns: None: Parameters are now global", "a (np.ndarray): Elastic tensor with shape ``(3, 3, 3, 3)`` alpha (float): Angle", "``isoflg`` \"\"\" nlaymx = cf_f.nlaymx cf_f.a = np.zeros((3,3,3,3,nlaymx)) cf_f.rho = np.zeros((nlaymx)) cf_f.thickn =", "[]; pl = [] # Read file line by line and populate lists", "utils.set_aniso_tensor(0., 0., typ='atg') >>> # Define two-layer model model with identical material >>>", "(obspy.trace): Vertical component trR (obspy.trace): Radial component trT (obspy.trace): Transverse component vp (float,", "and Vertical ntr = trxyz[0] etr = trxyz[1] ztr = trxyz[2] baz =", "\"\"\" # Trend and plunge of symmetry axis tr = -tr*np.pi/180. pl =", "average shear modulus (GPa) Example ------- >>> from telewavesim import utils >>> cc,", "= np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data), dtype=complex) # Stack all", "else: print('type of mineral/rock not implemented') return # Convert Voigt to full tensor", "2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs # Rotation matrix rot = np.array([[-m11, m12], [-m21, m22]])", "system pvh (bool, optional): Whether to rotate from Z-R-T coordinate system to P-SV-SH", "C: Stiffness matrix (shape ``(6, 6)``) \"\"\" Voigt_notation = [(0, 0), (1, 1),", "trxyz (obspy.stream): Obspy ``Stream`` object in cartesian coordinate system pvh (bool, optional): Whether", "= np.asarray(cc) C = np.zeros((6,6)) for i in range(6): for j in range(6):", "# Transfer function tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfv,ftfp)))) tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(ftfh,ftfp)))) elif cf.wvtype=='Si': tfr.data =", "aa: Rotated tensor with shape ``(3, 3, 3, 3)`` .. note:: The three", "tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft)))) elif cf.wvtype=='SV': tfr.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftfr)))) elif cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfz,ftft))))", "cf.wvtype=='SH': tft.data = np.fft.fftshift(np.real(pyfftw.interfaces.numpy_fft.ifft(np.divide(-ftfp,ftfh)))) else: tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft =", "\"\"\" Updates the ``stats`` doctionary from an obspy ``Trace`` object. Args: tr (obspy.trace):", "into ``obspy`` ``Trace`` obsjects and then an ``obspy`` ``Stream`` object. Args: ux (np.ndarray):", "4.*S[0,2] - \\ 4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill average", "(float): Plunge angle of symmetry axis (degree) type (str, optional): Type of elastic", "(degree) Returns: (obspy.trace): tr: Trace with updated stats \"\"\" tr.stats.delta = dt tr.stats.slow", "= np.zeros((3,3,3,3), dtype=float) for i, j, k, l in itertools.product(range(3), range(3), range(3), range(3)):", "case we rotate about x_2 first, x_3 second and x_1 third. For trend", "updated stats \"\"\" tr.stats.delta = dt tr.stats.slow = slow tr.stats.baz = baz return", "traces from `Z-R-T` orientation to `P-SV-SH` wave mode. Args: trZ (obspy.trace): Vertical component", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "LL)) # eta = FF/(AA - 2.*LL) # Get tensor with horizontal axis", "* Kvoigt (float): Voigt average bulk modulus (GPa) * Gvoigt (float): Voigt average", "'plag', 'qtz', 'zo'] rocks = ['BS_f', 'BS_m', 'EC_f', 'EC_m', 'HB', 'LHZ', 'SP_37', 'SP_80']", "3, 3, 3)`` .. note:: The three angles (``alpha``, ``beta``, ``gam``) correspond to", "tri_tensor): ``alpha`` = plunge ``beta`` = trend \"\"\" rot = np.zeros((3,3)) aa =", "else: tfr = rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data = np.zeros(len(tft.data))", "st1: tmp1 += tr.data hilb1 = hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1 +=", "of the rotation is important: (AB ~= BA). In this case we rotate", "print(mins,rocks) print() raise(Exception()) return def check_cf(obs=False): \"\"\" Checks whether or not all required", "np.zeros((nlaymx), dtype='int') for i in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i]", "4.*S[1,2] + 3.*S[3,3] + 3.*S[4,4] + 3.*S[5,5]) # Voigt-Reuss-Hill average Kvrh = (Kvoigt", "axis cc = es.tri_tensor(AA, CC, FF, LL, NN) # Rotate tensor using trend", "tr in st1: tmp1 += tr.data hilb1 = hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real)", "= rho >>> conf.thickn[0] = 10. >>> conf.wvtype = 'P' >>> slow =", "trT.copy() # Vertical slownesses qp = np.sqrt(1/vp/vp - cf.slow*cf.slow) qs = np.sqrt(1/vs/vs -", "rho = es.lizardite() elif typ=='ms': C, rho = es.muscovite() elif typ=='ol': C, rho", "[]; r = []; a = []; b = []; fl = [];", "conf.rho = np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay)) >>> # Pass variables to the", "1 st2 (obspy.stream,): Stream 2 pws (bool, optional): Enables Phase-Weighted Stacking Returns: (tuple):", "rotated using the trend and plunge of the symmetry axis. Args: a (float):", "es.lizardite() elif typ=='ms': C, rho = es.muscovite() elif typ=='ol': C, rho = es.olivine()", "and plunge of the symmetry axis. Args: a (float): P-wave velocity (km/s) b", "with identical material >>> conf.nlay = 2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho", "rot = np.array([[-m11, m12], [-m21, m22]]) # Vector of Radial and Vertical r_z", "Fortran conf obs2for() # Get the Fourier transform of seismograms for ``obs``case yx,", "material. Args: a (float): P-wave velocity (km/s) b (float): S-wave velocity (km/s) Returns:", "m12 = -(1 - 2*vs*vs*cf.slow*cf.slow)/(2*vp*qp) m21 = (1 - 2*vs*vs*cf.slow*cf.slow)/(2*vs*qs) m22 = cf.slow*vs", "= 1 def wave2for(): \"\"\" Passes global wavefield variables to Fortran ``conf`` module.", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "utils.cc2voigt(cc) >>> K, G = utils.VRH_average(C*rho)[4:6] >>> utils.mod2vel(K, G, rho) (6760.617471753726, 3832.0771334254896) \"\"\"", "cc cf.rho[j] = rho else: print('\\nFlag not defined: use either \"iso\", \"tri\" or", "+ C[1,1] + C[2,2] + 2.*C[0,1] + 2.*C[0,2] + 2.*C[1,2])/9. Gvoigt = (C[0,0]", "\\ np.sin(gam)*np.cos(alpha) rot[2,0] = np.sin(gam)*np.sin(beta)*np.cos(alpha) - \\ np.cos(gam)*np.sin(alpha) rot[2,1] = -np.sin(gam)*np.cos(beta) rot[2,2] =", "tensor return cc def set_aniso_tensor(tr, pl, typ='atg'): \"\"\" Function to generate tensor for", "axis. Args: tr (float): Trend angle of symmetry axis (degree) pl (float): Plunge", "# s/km >>> utils.calc_ttime(slow) 0.0013519981570791182 \"\"\" t1 = 0. for i in range(cf.nlay-1):", "rtr.copy(); tfr.data = np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data)", "2.*C[0,2] + 2.*C[1,2])/9. Gvoigt = (C[0,0] + C[1,1] + C[2,2] - C[0,1] -", "Transverse component vp (float, optional): P-wave velocity used for rotation vs (float, optional):", "material >>> conf.nlay = 2 >>> conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay))", "to make sure the variables are all set before executing the main ``telewavesim.rmat_f.plane_****``", "USE OR OTHER DEALINGS IN THE # SOFTWARE. ''' Utility functions to interact", "so, subject to the following conditions: # The above copyright notice and this", "= es.serpentinite_37() elif typ=='SP_80': C, rho = es.serpentinite_80() elif typ=='LHZ': C, rho =", "a rotation of the tensor cc (c_ijkl) about three angles (alpha, beta, gamma)", "Returns: (tuple): tuple containing: * stack1 (obspy.trace): Stacked trace for Stream 1 *", "- pl)*np.pi/180. # Percent anisotropy da = (a*1.e3)*ani/100. db = (b*1.e3)*ani/100. # Set", "= np.sqrt(G/rho) return Vp, Vs def rot_tensor(a,alpha,beta,gam): \"\"\" Performs a rotation of the", "radians gam (float): Angle in radians Returns: (np.ndarray): aa: Rotated tensor with shape", "typically ensured through reading of the model file from the function ``utils.read_model(modfile)``, and", "+ 2.*S[0,1] + 2.*S[0,2] + 2.*S[1,2]) Greuss = 15./(4.*S[0,0] + 4.*S[1,1] + 4.*S[2,2]", "Updates the ``stats`` doctionary from an obspy ``Trace`` object. Args: tr (obspy.trace): Trace", "C, rho = es.eclogite_foliated() elif typ=='EC_m': C, rho = es.eclogite_massive() elif typ=='HB': C,", "wave2for(): \"\"\" Passes global wavefield variables to Fortran ``conf`` module. Returns: None Variables", "tfr = trV.copy(); tfr.data = np.zeros(len(tfr.data)) tft = trH.copy(); tft.data = np.zeros(len(tft.data)) ftfv", "the ``stats`` doctionary from an obspy ``Trace`` object. Args: tr (obspy.trace): Trace object", "containing 3-component displacement seismograms \"\"\" # Check if all variables are set. If", "coordinate system pvh (bool, optional): Whether to rotate from Z-R-T coordinate system to", "elif typ=='opx': C, rho = es.orthopyroxene() elif typ=='plag': C, rho = es.plagioclase_06() elif", "rho = es.dolomite() elif typ=='ep': C, rho = es.epidote() elif typ=='grt': C, rho", "a*1.e3 b = b*1.e3 C = es.iso_tensor(a, b) # Convert Voigt to full", "def set_iso_tensor(a, b): \"\"\" Function to generate tensor for isotropic material. Args: a", "trZ (obspy.trace): Vertical component trR (obspy.trace): Radial component trT (obspy.trace): Transverse component vp", "Rotation matrix rot = np.array([[-m11, m12], [-m21, m22]]) # Vector of Radial and", "+= tr.data hilb1 = hilbert(tr.data) phase1 = np.arctan2(hilb1.imag, hilb1.real) weight1 += np.exp(1j*phase1) for", "conf.a = np.zeros((3,3,3,3,conf.nlay)) >>> conf.rho = np.zeros((conf.nlay)) >>> conf.thickn = np.zeros((conf.nlay)) >>> #", "weight2 = weight2/np.float(len(st2)) weight1 = np.real(abs(weight1)) weight2 = np.real(abs(weight2)) else: weight1 = np.ones(len(st1[0].data))", "(np.ndarray): Elastic tensor (GPa /density)\\ (shape ``(3, 3, 3, 3)``) * rho (float):", "db/2.)**2 NN = (b*1.e3 - db/2.)**2 AC = (a*1.e3)**2 FF = -LL +", "Returns: (np.ndarray): cc: Elastic tensor (shape ``(3, 3, 3, 3)``) \"\"\" C =", "b (float): S-wave velocity (km/s) tr (float): Trend angle of symmetry axis (degree)", "shear (SV) wave mode * trH (obspy.trace): Horizontally polarized shear (SH) wave mode", "# Copy stats from stream str_stats = st1[0].stats # Initialize arrays tmp1 =", "in range(cf.nlay-1): if cf.isoflg[i] == 'iso': a0 = cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else:", "tfr.data = np.zeros(len(tfr.data)) tft = ttr.copy(); tft.data = np.zeros(len(tft.data)) ftfr = pyfftw.interfaces.numpy_fft.fft(rtr.data) ftft", "rho = es.blueschist_mafic() elif typ=='EC_f': C, rho = es.eclogite_foliated() elif typ=='EC_m': C, rho", "tmp1 = np.zeros(len(st1[0].data)) tmp2 = np.zeros(len(st2[0].data)) weight1 = np.zeros(len(st1[0].data), dtype=complex) weight2 = np.zeros(len(st2[0].data),", "to the `conf` module >>> # Only topmost layer is useful for travel", "for i in range(cf.nlay): cf_f.a[:,:,:,:,i] = cf.a[:,:,:,:,i] cf_f.rho[i] = cf.rho[i] cf_f.thickn[i] = cf.thickn[i]", "Plunge angle of symmetry axis (degree) type (str, optional): Type of elastic material", "2019 <NAME> # This file is part of Telewavesim. # Permission is hereby", "density (rho) in kg/m^3 Args: K (float): Bulk modulus (GPa) G (float): Shear", "hereby granted, free of charge, to any person obtaining a copy # of", "Convert the Voigt representation of the stiffness matrix to the full 3x3x3x3 tensor", "y-component displacement seismogram uz (np.ndarray): z-component displacement seismogram Returns: (obspy.stream): trxyz: Stream containing", "the rotation is important: (AB ~= BA). In this case we rotate about", "== 'iso': a0 = cf.a[2,2,2,2,i] b0 = cf.a[1,2,1,2,i] else: cc = cc2voigt(cf.a[:,:,:,:,i]) rho", "tensor return cc, rho def full_3x3_to_Voigt_6_index(i, j): \"\"\" Conversion of tensor to Voigt", "cf_f.slow = cf.slow cf_f.baz = cf.baz def obs2for(): \"\"\" Passes global OBS-related variables", "DEALINGS IN THE # SOFTWARE. ''' Utility functions to interact with ``telewavesim`` modules.", "to generate tensor for transverse isotropy. The tensor is rotated using the trend", "are set. If not, throw an Exception and stop check_cf(obs) # Pass variables", "(km/s) Returns: (np.ndarray): cc: Elastic tensor (GPa /density) \\ (shape ``(3, 3, 3,", "are set and throws an Exception if not. Args: obs (bool, optional): Whether", "3, 3, 3)``) \"\"\" # Trend and plunge of symmetry axis tr =", "3-component seismograms as an ``obspy`` ``Stream`` object. .. note:: The ``conf`` global variables", "of Radial and Vertical r_z = np.array([trR.data,trZ.data]) # Rotation vec = np.dot(rot, r_z)", "dtype='c')) else: # Get the Fourier transform of seismograms for ``land`` case yx,", "# Pass variables to the `conf` module >>> # Only topmost layer is", "''' Utility functions to interact with ``telewavesim`` modules. ''' import sys import itertools", "need to be set for this calculation to succeed. This function first checks", "the conf module: 'dp', 'c', 'rhof'\") def model2for(): \"\"\" Passes global model variables", "= cf.dt cf_f.slow = cf.slow cf_f.baz = cf.baz def obs2for(): \"\"\" Passes global", "do so, subject to the following conditions: # The above copyright notice and" ]
[ "Bar(models.Model): name = models.CharField(max_length=200) description = models.TextField() beers = models.ManyToManyField(Beer, related_name='bars') slug =", "= models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class UserProfile(models.Model): user = models.ForeignKey(User, null=True, blank=True)", "Comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user)", "models.IntegerField(default=0) comment = models.TextField() beer = models.ForeignKey(Beer) user = models.ForeignKey(User) def __unicode__(self): return", "models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BeerComment(models.Model): Comment = models.TextField() bar = models.ForeignKey(Bar)", "BarComment(models.Model): comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return", "= \"Bars\" class BarRating(models.Model): rating = models.IntegerField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User)", "\"Beers\" class Bar(models.Model): name = models.CharField(max_length=200) description = models.TextField() beers = models.ManyToManyField(Beer, related_name='bars')", "models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name", "default='') def __unicode__(self): return self.name class Meta: verbose_name = \"Beer\" verbose_name_plural = \"Beers\"", "default='') def __unicode__(self): return self.name class Meta: verbose_name = \"Bar\" verbose_name_plural = \"Bars\"", "models.ForeignKey(User) def __unicode__(self): return unicode(self.rating) class BeerRating(models.Model): rating = models.IntegerField(default=0) comment = models.TextField()", "__unicode__(self): return unicode(self.user) class UserProfile(models.Model): user = models.ForeignKey(User, null=True, blank=True) def __unicode__(self): return", "class Bar(models.Model): name = models.CharField(max_length=200) description = models.TextField() beers = models.ManyToManyField(Beer, related_name='bars') slug", "class BarRating(models.Model): rating = models.IntegerField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self):", "= models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class", "models.TextField() beers = models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name", "bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BeerComment(models.Model): Comment", "models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.rating) class BeerRating(models.Model): rating = models.IntegerField(default=0)", "models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name = \"Bar\" verbose_name_plural =", "BeerComment(models.Model): Comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return", "import models from django.contrib.auth.models import User class Beer(models.Model): name = models.CharField(max_length=200) description =", "models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name = \"Beer\" verbose_name_plural =", "user = models.ForeignKey(User) def __unicode__(self): return unicode(self.rating) class BeerRating(models.Model): rating = models.IntegerField(default=0) comment", "__unicode__(self): return self.name class Meta: verbose_name = \"Beer\" verbose_name_plural = \"Beers\" class Bar(models.Model):", "class Meta: verbose_name = \"Bar\" verbose_name_plural = \"Bars\" class BarRating(models.Model): rating = models.IntegerField()", "from django.db import models from django.contrib.auth.models import User class Beer(models.Model): name = models.CharField(max_length=200)", "\"Beer\" verbose_name_plural = \"Beers\" class Bar(models.Model): name = models.CharField(max_length=200) description = models.TextField() beers", "Meta: verbose_name = \"Beer\" verbose_name_plural = \"Beers\" class Bar(models.Model): name = models.CharField(max_length=200) description", "= models.IntegerField(default=0) comment = models.TextField() beer = models.ForeignKey(Beer) user = models.ForeignKey(User) def __unicode__(self):", "django.db import models from django.contrib.auth.models import User class Beer(models.Model): name = models.CharField(max_length=200) description", "\"Bars\" class BarRating(models.Model): rating = models.IntegerField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def", "bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.rating) class BeerRating(models.Model): rating", "= models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.rating) class BeerRating(models.Model): rating =", "__unicode__(self): return self.name class Meta: verbose_name = \"Bar\" verbose_name_plural = \"Bars\" class BarRating(models.Model):", "bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class UserProfile(models.Model): user", "= models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BeerComment(models.Model): Comment = models.TextField() bar =", "class Beer(models.Model): name = models.CharField(max_length=200) description = models.TextField() slug = models.SlugField(unique=True, default='') def", "= models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class UserProfile(models.Model): user =", "def __unicode__(self): return unicode(self.user) class BeerComment(models.Model): Comment = models.TextField() bar = models.ForeignKey(Bar) user", "models.ForeignKey(Beer) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BarComment(models.Model): comment = models.TextField()", "user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BeerComment(models.Model): Comment = models.TextField() bar", "models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class UserProfile(models.Model): user = models.ForeignKey(User,", "verbose_name = \"Bar\" verbose_name_plural = \"Bars\" class BarRating(models.Model): rating = models.IntegerField() bar =", "def __unicode__(self): return unicode(self.rating) class BeerRating(models.Model): rating = models.IntegerField(default=0) comment = models.TextField() beer", "name = models.CharField(max_length=200) description = models.TextField() slug = models.SlugField(unique=True, default='') def __unicode__(self): return", "= models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta:", "models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class UserProfile(models.Model):", "comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user)", "description = models.TextField() beers = models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True, default='') def __unicode__(self):", "verbose_name_plural = \"Bars\" class BarRating(models.Model): rating = models.IntegerField() bar = models.ForeignKey(Bar) user =", "name = models.CharField(max_length=200) description = models.TextField() beers = models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True,", "beers = models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class", "related_name='bars') slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name =", "class BarComment(models.Model): comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self):", "class BeerComment(models.Model): Comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self):", "verbose_name_plural = \"Beers\" class Bar(models.Model): name = models.CharField(max_length=200) description = models.TextField() beers =", "verbose_name = \"Beer\" verbose_name_plural = \"Beers\" class Bar(models.Model): name = models.CharField(max_length=200) description =", "self.name class Meta: verbose_name = \"Bar\" verbose_name_plural = \"Bars\" class BarRating(models.Model): rating =", "= models.CharField(max_length=200) description = models.TextField() slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name", "unicode(self.rating) class BeerRating(models.Model): rating = models.IntegerField(default=0) comment = models.TextField() beer = models.ForeignKey(Beer) user", "BeerRating(models.Model): rating = models.IntegerField(default=0) comment = models.TextField() beer = models.ForeignKey(Beer) user = models.ForeignKey(User)", "= \"Beer\" verbose_name_plural = \"Beers\" class Bar(models.Model): name = models.CharField(max_length=200) description = models.TextField()", "return unicode(self.user) class BeerComment(models.Model): Comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User)", "User class Beer(models.Model): name = models.CharField(max_length=200) description = models.TextField() slug = models.SlugField(unique=True, default='')", "models.IntegerField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.rating) class BeerRating(models.Model):", "comment = models.TextField() beer = models.ForeignKey(Beer) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user)", "import User class Beer(models.Model): name = models.CharField(max_length=200) description = models.TextField() slug = models.SlugField(unique=True,", "self.name class Meta: verbose_name = \"Beer\" verbose_name_plural = \"Beers\" class Bar(models.Model): name =", "= models.CharField(max_length=200) description = models.TextField() beers = models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True, default='')", "__unicode__(self): return unicode(self.user) class BeerComment(models.Model): Comment = models.TextField() bar = models.ForeignKey(Bar) user =", "return unicode(self.user) class UserProfile(models.Model): user = models.ForeignKey(User, null=True, blank=True) def __unicode__(self): return self.user.username", "user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BarComment(models.Model): comment = models.TextField() bar", "= models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BarComment(models.Model): comment = models.TextField() bar =", "from django.contrib.auth.models import User class Beer(models.Model): name = models.CharField(max_length=200) description = models.TextField() slug", "return unicode(self.user) class BarComment(models.Model): comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User)", "= models.TextField() beer = models.ForeignKey(Beer) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class", "<filename>pdx_beer_finder/beer_googles/models.py from django.db import models from django.contrib.auth.models import User class Beer(models.Model): name =", "= \"Beers\" class Bar(models.Model): name = models.CharField(max_length=200) description = models.TextField() beers = models.ManyToManyField(Beer,", "def __unicode__(self): return unicode(self.user) class BarComment(models.Model): comment = models.TextField() bar = models.ForeignKey(Bar) user", "BarRating(models.Model): rating = models.IntegerField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return", "def __unicode__(self): return unicode(self.user) class UserProfile(models.Model): user = models.ForeignKey(User, null=True, blank=True) def __unicode__(self):", "= models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BeerComment(models.Model): Comment =", "= models.TextField() beers = models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True, default='') def __unicode__(self): return", "def __unicode__(self): return self.name class Meta: verbose_name = \"Bar\" verbose_name_plural = \"Bars\" class", "models.TextField() slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name =", "django.contrib.auth.models import User class Beer(models.Model): name = models.CharField(max_length=200) description = models.TextField() slug =", "= models.ForeignKey(User) def __unicode__(self): return unicode(self.rating) class BeerRating(models.Model): rating = models.IntegerField(default=0) comment =", "models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BarComment(models.Model): comment = models.TextField() bar = models.ForeignKey(Bar)", "models.CharField(max_length=200) description = models.TextField() beers = models.ManyToManyField(Beer, related_name='bars') slug = models.SlugField(unique=True, default='') def", "models.TextField() beer = models.ForeignKey(Beer) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BarComment(models.Model):", "rating = models.IntegerField(default=0) comment = models.TextField() beer = models.ForeignKey(Beer) user = models.ForeignKey(User) def", "models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BeerComment(models.Model): Comment = models.TextField()", "unicode(self.user) class BarComment(models.Model): comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def", "models from django.contrib.auth.models import User class Beer(models.Model): name = models.CharField(max_length=200) description = models.TextField()", "class Meta: verbose_name = \"Beer\" verbose_name_plural = \"Beers\" class Bar(models.Model): name = models.CharField(max_length=200)", "class BeerRating(models.Model): rating = models.IntegerField(default=0) comment = models.TextField() beer = models.ForeignKey(Beer) user =", "slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name = \"Beer\"", "models.CharField(max_length=200) description = models.TextField() slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class", "\"Bar\" verbose_name_plural = \"Bars\" class BarRating(models.Model): rating = models.IntegerField() bar = models.ForeignKey(Bar) user", "return self.name class Meta: verbose_name = \"Beer\" verbose_name_plural = \"Beers\" class Bar(models.Model): name", "= models.ForeignKey(Beer) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BarComment(models.Model): comment =", "unicode(self.user) class BeerComment(models.Model): Comment = models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def", "description = models.TextField() slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta:", "return self.name class Meta: verbose_name = \"Bar\" verbose_name_plural = \"Bars\" class BarRating(models.Model): rating", "Meta: verbose_name = \"Bar\" verbose_name_plural = \"Bars\" class BarRating(models.Model): rating = models.IntegerField() bar", "return unicode(self.rating) class BeerRating(models.Model): rating = models.IntegerField(default=0) comment = models.TextField() beer = models.ForeignKey(Beer)", "= models.IntegerField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.rating) class", "Beer(models.Model): name = models.CharField(max_length=200) description = models.TextField() slug = models.SlugField(unique=True, default='') def __unicode__(self):", "def __unicode__(self): return self.name class Meta: verbose_name = \"Beer\" verbose_name_plural = \"Beers\" class", "models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class UserProfile(models.Model): user = models.ForeignKey(User, null=True, blank=True) def", "__unicode__(self): return unicode(self.rating) class BeerRating(models.Model): rating = models.IntegerField(default=0) comment = models.TextField() beer =", "= models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name = \"Bar\" verbose_name_plural", "= \"Bar\" verbose_name_plural = \"Bars\" class BarRating(models.Model): rating = models.IntegerField() bar = models.ForeignKey(Bar)", "beer = models.ForeignKey(Beer) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BarComment(models.Model): comment", "__unicode__(self): return unicode(self.user) class BarComment(models.Model): comment = models.TextField() bar = models.ForeignKey(Bar) user =", "slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name = \"Bar\"", "user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class UserProfile(models.Model): user = models.ForeignKey(User, null=True,", "rating = models.IntegerField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.rating)", "models.TextField() bar = models.ForeignKey(Bar) user = models.ForeignKey(User) def __unicode__(self): return unicode(self.user) class BeerComment(models.Model):", "= models.TextField() slug = models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name", "= models.SlugField(unique=True, default='') def __unicode__(self): return self.name class Meta: verbose_name = \"Beer\" verbose_name_plural" ]
[ "import platform import socket serialPort = Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0',", "import Serial import time import platform import socket serialPort = Serial('COM3' if platform.system()", "= Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2) server = socket.socket(socket.AF_INET,", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while True: (client, address) = server.accept() print('Connected')", "serial import Serial import time import platform import socket serialPort = Serial('COM3' if", "time import platform import socket serialPort = Serial('COM3' if platform.system() == 'Windows' else", "'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while", "time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while True: (client, address) =", "if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('',", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while True: (client, address) = server.accept() print('Connected') while", "serialPort = Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2) server =", "server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while True: (client, address) = server.accept()", "True: (client, address) = server.accept() print('Connected') while True: data = client.recv(6)#.decode() if 'CLOSE'", "2222)) server.listen(1) while True: (client, address) = server.accept() print('Connected') while True: data =", "= server.accept() print('Connected') while True: data = client.recv(6)#.decode() if 'CLOSE' in data: break", "while True: (client, address) = server.accept() print('Connected') while True: data = client.recv(6)#.decode() if", "from serial import Serial import time import platform import socket serialPort = Serial('COM3'", "'/dev/ttyUSB0', 9600) time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while True: (client,", "server.bind(('', 2222)) server.listen(1) while True: (client, address) = server.accept() print('Connected') while True: data", "import time import platform import socket serialPort = Serial('COM3' if platform.system() == 'Windows'", "socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while True: (client, address) = server.accept() print('Connected') while True:", "socket serialPort = Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2) server", "Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "server.listen(1) while True: (client, address) = server.accept() print('Connected') while True: data = client.recv(6)#.decode()", "== 'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1)", "import socket serialPort = Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2)", "print('Connected') while True: data = client.recv(6)#.decode() if 'CLOSE' in data: break #print(data) serialPort.write(data)", "address) = server.accept() print('Connected') while True: data = client.recv(6)#.decode() if 'CLOSE' in data:", "else '/dev/ttyUSB0', 9600) time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while True:", "(client, address) = server.accept() print('Connected') while True: data = client.recv(6)#.decode() if 'CLOSE' in", "server.accept() print('Connected') while True: data = client.recv(6)#.decode() if 'CLOSE' in data: break #print(data)", "platform.system() == 'Windows' else '/dev/ttyUSB0', 9600) time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222))", "platform import socket serialPort = Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600)", "9600) time.sleep(2) server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('', 2222)) server.listen(1) while True: (client, address)", "Serial import time import platform import socket serialPort = Serial('COM3' if platform.system() ==" ]
[ "app.models.res import Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username =", "Time : 2019/7/27 16:15 Author : Hansybx \"\"\" from flask import request, jsonify", "Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username = request.form['username'] password", "code = 401 msg = '查询失败' info = { 'result': '账号或密码错误' } except", "semester, academy, zc) code = 200 msg = '查询成功' info = result except", "result = get_teacher_class(username, password, semester, academy, zc) code = 200 msg = '查询成功'", "= request.form['zc'] try: result = get_teacher_class(username, password, semester, academy, zc) code = 200", "academy = request.form['academy'] zc = request.form['zc'] try: result = get_teacher_class(username, password, semester, academy,", "coding: utf-8 -*- Time : 2019/7/27 16:15 Author : Hansybx \"\"\" from flask", "16:15 Author : Hansybx \"\"\" from flask import request, jsonify from app.api.v1.stu import", "from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username = request.form['username'] password =", "= get_teacher_class(username, password, semester, academy, zc) code = 200 msg = '查询成功' info", "= 500 msg = '查询失败' info = [ { 'result': '未知异常' } ]", "'账号或密码错误' } except Exception: code = 500 msg = '查询失败' info = [", "import Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username = request.form['username']", "flask import request, jsonify from app.api.v1.stu import stu from app.models.error import PasswordFailed from", "request.form['academy'] zc = request.form['zc'] try: result = get_teacher_class(username, password, semester, academy, zc) code", "Author : Hansybx \"\"\" from flask import request, jsonify from app.api.v1.stu import stu", "teacher_class_get(): username = request.form['username'] password = request.form['password'] semester = request.form['semester'] academy = request.form['academy']", "academy, zc) code = 200 msg = '查询成功' info = result except PasswordFailed:", "PasswordFailed: code = 401 msg = '查询失败' info = { 'result': '账号或密码错误' }", "Hansybx \"\"\" from flask import request, jsonify from app.api.v1.stu import stu from app.models.error", "request.form['semester'] academy = request.form['academy'] zc = request.form['zc'] try: result = get_teacher_class(username, password, semester,", "<filename>app/api/v1/stu/teacher_class.py \"\"\" -*- coding: utf-8 -*- Time : 2019/7/27 16:15 Author : Hansybx", "utf-8 -*- Time : 2019/7/27 16:15 Author : Hansybx \"\"\" from flask import", "= request.form['password'] semester = request.form['semester'] academy = request.form['academy'] zc = request.form['zc'] try: result", "-*- Time : 2019/7/27 16:15 Author : Hansybx \"\"\" from flask import request,", "= request.form['academy'] zc = request.form['zc'] try: result = get_teacher_class(username, password, semester, academy, zc)", "{ 'result': '账号或密码错误' } except Exception: code = 500 msg = '查询失败' info", "msg = '查询失败' info = [ { 'result': '未知异常' } ] res_json =", "info = { 'result': '账号或密码错误' } except Exception: code = 500 msg =", "'查询失败' info = { 'result': '账号或密码错误' } except Exception: code = 500 msg", "= result except PasswordFailed: code = 401 msg = '查询失败' info = {", "= 200 msg = '查询成功' info = result except PasswordFailed: code = 401", "request.form['password'] semester = request.form['semester'] academy = request.form['academy'] zc = request.form['zc'] try: result =", "info = [ { 'result': '未知异常' } ] res_json = Res(code, msg, info)", "msg = '查询失败' info = { 'result': '账号或密码错误' } except Exception: code =", "msg = '查询成功' info = result except PasswordFailed: code = 401 msg =", "\"\"\" from flask import request, jsonify from app.api.v1.stu import stu from app.models.error import", "'result': '账号或密码错误' } except Exception: code = 500 msg = '查询失败' info =", "Exception: code = 500 msg = '查询失败' info = [ { 'result': '未知异常'", "except Exception: code = 500 msg = '查询失败' info = [ { 'result':", "[ { 'result': '未知异常' } ] res_json = Res(code, msg, info) return jsonify(res_json.__dict__)", "info = result except PasswordFailed: code = 401 msg = '查询失败' info =", "} except Exception: code = 500 msg = '查询失败' info = [ {", "PasswordFailed from app.models.res import Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get():", ": Hansybx \"\"\" from flask import request, jsonify from app.api.v1.stu import stu from", "401 msg = '查询失败' info = { 'result': '账号或密码错误' } except Exception: code", "\"\"\" -*- coding: utf-8 -*- Time : 2019/7/27 16:15 Author : Hansybx \"\"\"", "'查询成功' info = result except PasswordFailed: code = 401 msg = '查询失败' info", "stu from app.models.error import PasswordFailed from app.models.res import Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class", "zc) code = 200 msg = '查询成功' info = result except PasswordFailed: code", "password = request.form['password'] semester = request.form['semester'] academy = request.form['academy'] zc = request.form['zc'] try:", "@stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username = request.form['username'] password = request.form['password'] semester = request.form['semester']", "= request.form['semester'] academy = request.form['academy'] zc = request.form['zc'] try: result = get_teacher_class(username, password,", "except PasswordFailed: code = 401 msg = '查询失败' info = { 'result': '账号或密码错误'", "from flask import request, jsonify from app.api.v1.stu import stu from app.models.error import PasswordFailed", "= { 'result': '账号或密码错误' } except Exception: code = 500 msg = '查询失败'", "200 msg = '查询成功' info = result except PasswordFailed: code = 401 msg", "import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username = request.form['username'] password = request.form['password'] semester", "from app.api.v1.stu import stu from app.models.error import PasswordFailed from app.models.res import Res from", "code = 200 msg = '查询成功' info = result except PasswordFailed: code =", "from app.models.res import Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username", "request, jsonify from app.api.v1.stu import stu from app.models.error import PasswordFailed from app.models.res import", "code = 500 msg = '查询失败' info = [ { 'result': '未知异常' }", "zc = request.form['zc'] try: result = get_teacher_class(username, password, semester, academy, zc) code =", "methods=['POST']) def teacher_class_get(): username = request.form['username'] password = request.form['password'] semester = request.form['semester'] academy", "app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username = request.form['username'] password = request.form['password']", "= [ { 'result': '未知异常' } ] res_json = Res(code, msg, info) return", "import stu from app.models.error import PasswordFailed from app.models.res import Res from app.utils.teacher_class.teacher_class_utils import", "= '查询失败' info = [ { 'result': '未知异常' } ] res_json = Res(code,", "= 401 msg = '查询失败' info = { 'result': '账号或密码错误' } except Exception:", "2019/7/27 16:15 Author : Hansybx \"\"\" from flask import request, jsonify from app.api.v1.stu", "import PasswordFailed from app.models.res import Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST']) def", "import request, jsonify from app.api.v1.stu import stu from app.models.error import PasswordFailed from app.models.res", "app.models.error import PasswordFailed from app.models.res import Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher', methods=['POST'])", "= '查询成功' info = result except PasswordFailed: code = 401 msg = '查询失败'", "-*- coding: utf-8 -*- Time : 2019/7/27 16:15 Author : Hansybx \"\"\" from", "result except PasswordFailed: code = 401 msg = '查询失败' info = { 'result':", "500 msg = '查询失败' info = [ { 'result': '未知异常' } ] res_json", "= request.form['username'] password = request.form['password'] semester = request.form['semester'] academy = request.form['academy'] zc =", "request.form['zc'] try: result = get_teacher_class(username, password, semester, academy, zc) code = 200 msg", "try: result = get_teacher_class(username, password, semester, academy, zc) code = 200 msg =", ": 2019/7/27 16:15 Author : Hansybx \"\"\" from flask import request, jsonify from", "get_teacher_class @stu.route('/class/teacher', methods=['POST']) def teacher_class_get(): username = request.form['username'] password = request.form['password'] semester =", "def teacher_class_get(): username = request.form['username'] password = request.form['password'] semester = request.form['semester'] academy =", "jsonify from app.api.v1.stu import stu from app.models.error import PasswordFailed from app.models.res import Res", "get_teacher_class(username, password, semester, academy, zc) code = 200 msg = '查询成功' info =", "request.form['username'] password = request.form['password'] semester = request.form['semester'] academy = request.form['academy'] zc = request.form['zc']", "'查询失败' info = [ { 'result': '未知异常' } ] res_json = Res(code, msg,", "password, semester, academy, zc) code = 200 msg = '查询成功' info = result", "app.api.v1.stu import stu from app.models.error import PasswordFailed from app.models.res import Res from app.utils.teacher_class.teacher_class_utils", "from app.models.error import PasswordFailed from app.models.res import Res from app.utils.teacher_class.teacher_class_utils import get_teacher_class @stu.route('/class/teacher',", "= '查询失败' info = { 'result': '账号或密码错误' } except Exception: code = 500", "username = request.form['username'] password = request.form['password'] semester = request.form['semester'] academy = request.form['academy'] zc", "semester = request.form['semester'] academy = request.form['academy'] zc = request.form['zc'] try: result = get_teacher_class(username," ]
[ "import base64 from jinja2 import Template import sys import os import urllib.request def", "def encodebase64(filename): fin = open(filename, 'rb') contents = fin.read() data_url = base64.b64encode(contents) fin.close()", "if os.path.exists(basename): return get_local_file(basename) else: with urllib.request.urlopen(url) as response: html = response.read().decode(\"utf-8\") save_file(html,basename)", "response.read().decode(\"utf-8\") save_file(html,basename) return html if __name__ == \"__main__\": render_template() # gets tensorflow for", "= fin.read() data_url = base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix = 'data:image/png;base64,'", "encodebase64(filename) def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return prefix + encodebase64(filename) def render_template(): src_file", "prefix = 'data:{};base64,'.format(type) return prefix + encodebase64(filename) def render_template(): src_file = \"template.tmpl\" dest_file", "src_file = \"template.tmpl\" dest_file = \"index.html\" template = Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file,", "def get_remote_file(url): basename = url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename) else: with urllib.request.urlopen(url) as", "html = response.read().decode(\"utf-8\") save_file(html,basename) return html if __name__ == \"__main__\": render_template() # gets", "get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename): return open(filename).read() def save_file(data,filename):", "def get_local_file(filename): return open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename = url.split(\"/\")[-1] if", "return prefix + encodebase64(filename) def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return prefix + encodebase64(filename)", "'data:{};base64,'.format(type) return prefix + encodebase64(filename) def render_template(): src_file = \"template.tmpl\" dest_file = \"index.html\"", "urllib.request.urlopen(url) as response: html = response.read().decode(\"utf-8\") save_file(html,basename) return html if __name__ == \"__main__\":", "open(filename,'w').write(data) def get_remote_file(url): basename = url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename) else: with urllib.request.urlopen(url)", "open(filename, 'rb') contents = fin.read() data_url = base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename):", "save_file(html,basename) return html if __name__ == \"__main__\": render_template() # gets tensorflow for s3", "save_file(result,dest_file) def get_local_file(filename): return open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename = url.split(\"/\")[-1]", "data_url = base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix = 'data:image/png;base64,' return prefix", "basename = url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename) else: with urllib.request.urlopen(url) as response: html", "prefix + encodebase64(filename) def render_template(): src_file = \"template.tmpl\" dest_file = \"index.html\" template =", "return prefix + encodebase64(filename) def render_template(): src_file = \"template.tmpl\" dest_file = \"index.html\" template", "def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename = url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename) else:", "else: with urllib.request.urlopen(url) as response: html = response.read().decode(\"utf-8\") save_file(html,basename) return html if __name__", "= url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename) else: with urllib.request.urlopen(url) as response: html =", "= base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix = 'data:image/png;base64,' return prefix +", "os import urllib.request def encodebase64(filename): fin = open(filename, 'rb') contents = fin.read() data_url", "open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename = url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename)", "= response.read().decode(\"utf-8\") save_file(html,basename) return html if __name__ == \"__main__\": render_template() # gets tensorflow", "contents = fin.read() data_url = base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix =", "template = Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file)", "template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename): return open(filename).read() def", "Template import sys import os import urllib.request def encodebase64(filename): fin = open(filename, 'rb')", "response: html = response.read().decode(\"utf-8\") save_file(html,basename) return html if __name__ == \"__main__\": render_template() #", "= template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename): return open(filename).read()", "from jinja2 import Template import sys import os import urllib.request def encodebase64(filename): fin", "dest_file = \"index.html\" template = Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type,", "= 'data:{};base64,'.format(type) return prefix + encodebase64(filename) def render_template(): src_file = \"template.tmpl\" dest_file =", "'data:image/png;base64,' return prefix + encodebase64(filename) def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return prefix +", "encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename): return open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename", "'rb') contents = fin.read() data_url = base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix", "save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename = url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename) else: with", "html if __name__ == \"__main__\": render_template() # gets tensorflow for s3 upload get_remote_file(\"https://cdnjs.cloudflare.com/ajax/libs/tensorflow/0.14.1/tf.min.js\")", "make_data_url_png(filename): prefix = 'data:image/png;base64,' return prefix + encodebase64(filename) def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type)", "os.path.exists(basename): return get_local_file(basename) else: with urllib.request.urlopen(url) as response: html = response.read().decode(\"utf-8\") save_file(html,basename) return", "get_local_file(filename): return open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename = url.split(\"/\")[-1] if os.path.exists(basename):", "data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix = 'data:image/png;base64,' return prefix + encodebase64(filename) def make_data_url_of_type(filename,type): prefix", "make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return prefix + encodebase64(filename) def render_template(): src_file = \"template.tmpl\"", "\"template.tmpl\" dest_file = \"index.html\" template = Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png,", "with urllib.request.urlopen(url) as response: html = response.read().decode(\"utf-8\") save_file(html,basename) return html if __name__ ==", "+ encodebase64(filename) def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return prefix + encodebase64(filename) def render_template():", "def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return prefix + encodebase64(filename) def render_template(): src_file =", "+ encodebase64(filename) def render_template(): src_file = \"template.tmpl\" dest_file = \"index.html\" template = Template(get_local_file(src_file))", "= \"index.html\" template = Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64,", "urllib.request def encodebase64(filename): fin = open(filename, 'rb') contents = fin.read() data_url = base64.b64encode(contents)", "= open(filename, 'rb') contents = fin.read() data_url = base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def", "import os import urllib.request def encodebase64(filename): fin = open(filename, 'rb') contents = fin.read()", "return get_local_file(basename) else: with urllib.request.urlopen(url) as response: html = response.read().decode(\"utf-8\") save_file(html,basename) return html", ") save_file(result,dest_file) def get_local_file(filename): return open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename =", "result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename): return", "fin.read() data_url = base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix = 'data:image/png;base64,' return", "make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename): return open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def", "make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename): return open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url):", "encodebase64(filename) def render_template(): src_file = \"template.tmpl\" dest_file = \"index.html\" template = Template(get_local_file(src_file)) result", "= \"template.tmpl\" dest_file = \"index.html\" template = Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file,", "= Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def", "as response: html = response.read().decode(\"utf-8\") save_file(html,basename) return html if __name__ == \"__main__\": render_template()", "return open(filename).read() def save_file(data,filename): open(filename,'w').write(data) def get_remote_file(url): basename = url.split(\"/\")[-1] if os.path.exists(basename): return", "encodebase64(filename): fin = open(filename, 'rb') contents = fin.read() data_url = base64.b64encode(contents) fin.close() return", "= 'data:image/png;base64,' return prefix + encodebase64(filename) def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return prefix", "fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix = 'data:image/png;base64,' return prefix + encodebase64(filename) def", "import urllib.request def encodebase64(filename): fin = open(filename, 'rb') contents = fin.read() data_url =", "def render_template(): src_file = \"template.tmpl\" dest_file = \"index.html\" template = Template(get_local_file(src_file)) result =", "base64 from jinja2 import Template import sys import os import urllib.request def encodebase64(filename):", "jinja2 import Template import sys import os import urllib.request def encodebase64(filename): fin =", "render_template(): src_file = \"template.tmpl\" dest_file = \"index.html\" template = Template(get_local_file(src_file)) result = template.render(", "\"index.html\" template = Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, )", "sys import os import urllib.request def encodebase64(filename): fin = open(filename, 'rb') contents =", "return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix = 'data:image/png;base64,' return prefix + encodebase64(filename) def make_data_url_of_type(filename,type):", "import sys import os import urllib.request def encodebase64(filename): fin = open(filename, 'rb') contents", "base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\") def make_data_url_png(filename): prefix = 'data:image/png;base64,' return prefix + encodebase64(filename)", "<reponame>weepingwillowben/web-script-wars<gh_stars>1-10 import base64 from jinja2 import Template import sys import os import urllib.request", "return html if __name__ == \"__main__\": render_template() # gets tensorflow for s3 upload", "fin = open(filename, 'rb') contents = fin.read() data_url = base64.b64encode(contents) fin.close() return data_url.decode(\"utf-8\")", "Template(get_local_file(src_file)) result = template.render( get_local_file=get_local_file, get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename):", "prefix = 'data:image/png;base64,' return prefix + encodebase64(filename) def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return", "get_remote_file=get_remote_file, make_data_url_png=make_data_url_png, make_data_url_of_type=make_data_url_of_type, encodebase64=encodebase64, ) save_file(result,dest_file) def get_local_file(filename): return open(filename).read() def save_file(data,filename): open(filename,'w').write(data)", "import Template import sys import os import urllib.request def encodebase64(filename): fin = open(filename,", "get_remote_file(url): basename = url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename) else: with urllib.request.urlopen(url) as response:", "prefix + encodebase64(filename) def make_data_url_of_type(filename,type): prefix = 'data:{};base64,'.format(type) return prefix + encodebase64(filename) def", "get_local_file(basename) else: with urllib.request.urlopen(url) as response: html = response.read().decode(\"utf-8\") save_file(html,basename) return html if", "def make_data_url_png(filename): prefix = 'data:image/png;base64,' return prefix + encodebase64(filename) def make_data_url_of_type(filename,type): prefix =", "url.split(\"/\")[-1] if os.path.exists(basename): return get_local_file(basename) else: with urllib.request.urlopen(url) as response: html = response.read().decode(\"utf-8\")" ]
[ "<filename>setup.py from distutils.core import setup setup( name='MACD', version='1.0', py_modules=[\"Average\", \"MACD\"], license='MIT', author='<NAME>', author_email='<EMAIL>'", "from distutils.core import setup setup( name='MACD', version='1.0', py_modules=[\"Average\", \"MACD\"], license='MIT', author='<NAME>', author_email='<EMAIL>' )" ]
[ "number of data points. Parameters ---------- ipath: str Input path. tests: List of", "get_spline() or init(). Returns ------- p-value: float P-value for given test. \"\"\" #tests", "data size dependence of the parameters of the gamma distributions representing cumulative Shannon", "init(). Returns ------- p-value: float P-value for given test. \"\"\" #tests = ['chi2',", "tests: spline_func[k] = {} for i in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3)", "\"%s_%s_%s.npy\" % (tmp, k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return spline_par def cumulative_SID_gamma(SI,", "gamma distributions for given number of data points. Parameters ---------- Ns: int Number", "alpha, beta, I0) elif test == \"hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h\",", "gamma distribution. Returns ------- cdf: float Value of Shannon information \"\"\" cdf =", "= scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns parameters", "distribution function of the Shannon information given by gamma distribution. Parameters ---------- SI:", "distributions for given number of data points. Parameters ---------- Ns: int Number of", "init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function object. Parameters ---------- gamma_params_ipath: str Input path. Returns", "Dictionary of spline functions. Output of get_spline() or init(). Returns ------- p-value: float", "\"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma", "of get_spline() or init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return spline_func", "tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Load knots and coefficients for B-splines representing :math:`\\alpha`,", "information \"\"\" cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0) return", "with the package. ------- gsp_dir: str directory path containing gamma spline parameter files", "function cumulative(SI, number_data_points, test, spline_func) Parameters ---------- SI: float Shannon information value. number_data_points:", "alpha, beta, I0): \"\"\" Returns cumulative distribution function of the Shannon information given", "of spline functions. Output of get_spline() or init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func", "or init(). Returns ------- p-value: float P-value for given test. \"\"\" p_value =", "'h_simple', 'both_simple']): \"\"\" Load knots and coefficients for B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o`", "Load knots and coefficients for B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of the", "iname)) return spline_par def cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\" Returns cumulative distribution function", "Dictionary containing knots and coefficients of B-splines for all tests and parameters of", "spline_func: dict Dictionary of spline functions. Output of get_spline() or init(). Returns -------", "number_data_points, test, spline_func): \"\"\" Calculate p-values for given test using the gamma distribution", "the Shannon information given by gamma distribution. Parameters ---------- SI: float or array-like", "Output of get_spline() or init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return", "\"\"\" log_Ns = np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns)", "as functions of :math:`\\log_{10} N`, where :math:`N` is the number of data points.", "number_data_points, test, spline_func): \"\"\" Calculate p-values for given test using gamma disribuiton approximation", "= ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp'] if test == \"chi2\": alpha = 0.5", "\"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"hpm\": alpha,", "parameters of the shifted gamma disributions. Ouput of load_spline_parameters(). tests: List of str", "def cumulative(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given test using gamma", "\"both\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) else: print(\"Error: Test \\\"%s\\\" not", "of shifted gamma distributions for given number of data points. Parameters ---------- Ns:", "- 2, number_data_points)) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"h\":", "information distribution. Parameters ---------- SI: float Shannon information value. number_data_points: int Number of", "na in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] = {} for tmp in [\"knots\", \"coeffs\"]:", "test: str Name of statistical test. spline_func: dict Dictionary of spline functions. Output", "-np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test ==", "str (optional) Names of tests. Returns ------- spline_func: dict Dictionary of spline functions.", "loc=I0) return cdf def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Returns spline function", "gsp_dir def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Load knots and coefficients for", "using gamma disribuiton approximation of Shannon information distribution. Parameters ---------- SI: float Shannon", "points. test: str Name of statistical test. spline_func: dict Dictionary of spline functions.", "= cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"hpm\": alpha, beta, I0 =", "spline parameter files that come bundled with the package. ------- gsp_dir: str directory", "in [\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\" % (tmp, k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath,", "in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] = {} for tmp in [\"knots\", \"coeffs\"]: iname", "(location) parameter of the gamma distribution. Returns ------- cdf: float Value of Shannon", "the gamma distribution. Returns ------- cdf: float Value of Shannon information \"\"\" cdf", "if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter directory not found at \" +", "{} for k in tests: spline_func[k] = {} for i in range(3): spline_func[k][nam[i]]", "float P-value for given test. \"\"\" #tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp']", "\"coeffs\"]: iname = \"%s_%s_%s.npy\" % (tmp, k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return", "of Biophysics, Frankfurt am Main, Germany # Released under the MIT Licence, see", "shifted gamma disributions. \"\"\" spline_par = {} for k in tests: spline_par[k] =", "of the shifted gamma disributions. Ouput of load_spline_parameters(). tests: List of str (optional)", "os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter directory not found at", "spline_par = {} for k in tests: spline_par[k] = {} for na in", "str Input path. tests: List of str (optional) Names of tests, for which", "= get_spline(spline_par) return spline_func def cumulative(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for", "number_data_points)) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"h\": alpha, beta,", "gamma distribution approximation of the Shannon information distribution. Wrapper function for function cumulative(SI,", "-1.\") return -1. return p_value def get_p_value(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values", "disributions as functions of :math:`\\log_{10} N`, where :math:`N` is the number of data", "Returns ------- spline_func: dict Dictionary of spline functions. \"\"\" nam = [\"alpha\", \"beta\",", "gamma disribution. I0: float Shift (location) parameter of the gamma distribution. \"\"\" log_Ns", "Parameters ---------- Ns: int Number of data points. test: str Name of test.", "Calculate p-values for given test using the gamma distribution approximation of the Shannon", "Names of tests. Returns ------- spline_func: dict Dictionary of spline functions. \"\"\" nam", "\"\"\" nam = [\"alpha\", \"beta\", \"I0\"] spline_func = {} for k in tests:", "str Input path. Returns ------- spline_func: dict Dictionary of spline functions. Output of", "for k in tests: spline_par[k] = {} for na in [\"alpha\", \"beta\", \"I0\"]:", "of tests. Returns ------- spline_func: dict Dictionary of spline functions. \"\"\" nam =", "alpha = 0.5 beta = 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value", "# Released under the MIT Licence, see the file LICENSE.txt. import os import", "disribution. I0: float Shift (location) parameter of the gamma distribution. \"\"\" log_Ns =", "List of str (optional) Names of tests. Returns ------- spline_func: dict Dictionary of", "_get_package_gsp(): \"\"\" Return the directory path containing gamma spline parameter files that come", "of data points. test: str Name of test. spline_func: dict Dictionary of spline", "spline function objects for the data size dependence of the parameters of the", "information distribution. Wrapper function for function cumulative(SI, number_data_points, test, spline_func) Parameters ---------- SI:", "of data points. Parameters ---------- ipath: str Input path. tests: List of str", "containing gamma spline parameter files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\")", "<NAME>, Max Planck Institute of Biophysics, Frankfurt am Main, Germany # Released under", "= \"%s_%s_%s.npy\" % (tmp, k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return spline_par def", "shifted gamma disributions as functions of :math:`\\log_{10} N`, where :math:`N` is the number", "by gamma distribution. Parameters ---------- SI: float or array-like Shannon information alpha: float", "spline functions. Output of get_spline() or init(). Returns ------- p-value: float P-value for", "Released under the MIT Licence, see the file LICENSE.txt. import os import numpy", "spline_par = load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return spline_func def cumulative(SI, number_data_points, test, spline_func):", "spline_func): \"\"\" Calculate p-values for given test using the gamma distribution approximation of", "the shifted gamma disributions. Ouput of load_spline_parameters(). tests: List of str (optional) Names", "np.load(os.path.join(ipath, iname)) return spline_par def cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\" Returns cumulative distribution", "the gamma distributions representing cumulative Shannon information distribution functions. Parameters ---------- spline_par: dict", "Return the directory path containing gamma spline parameter files that come bundled with", "load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return spline_func def cumulative(SI, number_data_points, test, spline_func): \"\"\" Calculate", "bundled with the package. ------- gsp_dir: str directory path containing gamma spline parameter", "dependence of the parameters of the gamma distributions representing cumulative Shannon information distribution", "Parameters ---------- ipath: str Input path. tests: List of str (optional) Names of", "of the gamma distribution. Returns ------- cdf: float Value of Shannon information \"\"\"", "the directory path containing gamma spline parameter files that come bundled with the", "parameters of the gamma distributions representing cumulative Shannon information distribution functions. Parameters ----------", "------- spline_func: dict Dictionary of spline functions. Output of get_spline() or init(). \"\"\"", "file LICENSE.txt. import os import numpy as np from scipy.stats import gamma as", "scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns parameters of", "spline parameter directory not found at \" + gsp_dir) else: return gsp_dir def", "available!\") print(\"Exiting. Returning -1.\") return -1. return p_value def get_p_value(SI, number_data_points, test, spline_func):", "gamma distribution. \"\"\" log_Ns = np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0", "coefficients for B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of the shifted gamma disributions", "== \"h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha,", "import os import numpy as np from scipy.stats import gamma as gamma_dist import", "spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"hpm\": alpha, beta,", "------- alpha: float Shape parameter of the gamma disribution. beta: float Inverser scale", "functions. Parameters ---------- spline_par: dict Dictionary containing knots and coefficients of B-splines for", "shifted gamma disributions. Ouput of load_spline_parameters(). tests: List of str (optional) Names of", "Output of get_spline() or init(). Returns ------- alpha: float Shape parameter of the", "Returns ------- spline_func: dict Dictionary of spline functions. Output of get_spline() or init().", "get_spline(spline_par) return spline_func def cumulative(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given", "p-value: float P-value for given test. \"\"\" #tests = ['chi2', 'h', 'hpm', 'chi2_h',", "I0) elif test == \"hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h\", spline_func) p_value", "the package. ------- gsp_dir: str directory path containing gamma spline parameter files \"\"\"", "the gamma disribution. I0: float Shift (location) parameter of the gamma distribution. Returns", "Shift (location) parameter of the gamma distribution. Returns ------- cdf: float Value of", "Max Planck Institute of Biophysics, Frankfurt am Main, Germany # Released under the", "= os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter", "str Name of statistical test. spline_func: dict Dictionary of spline functions. Output of", "p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_hpm\": alpha, beta, I0", "\"\"\" Returns spline function objects for the data size dependence of the parameters", "\"\"\" Load knots and coefficients for B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of", "B-splines for all tests and parameters of the shifted gamma disributions. Ouput of", "gsp_dir: str directory path containing gamma spline parameter files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__))", "------- spline_par: dict Dictionary containing knots and coefficients of B-splines for all tests", "spline_func): \"\"\" Returns parameters of shifted gamma distributions for given number of data", "distribution. \"\"\" log_Ns = np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0 =", "of the Shannon information distribution. Wrapper function for function cumulative(SI, number_data_points, test, spline_func)", "function of the Shannon information given by gamma distribution. Parameters ---------- SI: float", "tests, for which paramaters are read in. Names identify the corresponding files. Returns", "k in tests: spline_func[k] = {} for i in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"],", "spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_h\": alpha, beta,", "# Copyright (c) 2020 <NAME>, Max Planck Institute of Biophysics, Frankfurt am Main,", "cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points,", "cumulative(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given test using gamma disribuiton", "approximation of the Shannon information distribution. Wrapper function for function cumulative(SI, number_data_points, test,", "0.5 beta = 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value = cumulative_SID_gamma(SI,", "in tests: spline_par[k] = {} for na in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] =", "= {} for tmp in [\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\" % (tmp, k,", "of the gamma distribution. \"\"\" log_Ns = np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta =", "Shannon information value. number_data_points: int Number of data points. test: str Name of", "-1. return p_value def get_p_value(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given", "of tests, for which paramaters are read in. Names identify the corresponding files.", "float Shape parameter of the gamma disribution. beta: float Inverser scale parameter of", "tests and parameters of the shifted gamma disributions. Ouput of load_spline_parameters(). tests: List", "gamma distributions representing cumulative Shannon information distribution functions. Parameters ---------- spline_par: dict Dictionary", "test == \"chi2_h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI,", "cdf def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Returns spline function objects for", "directory not found at \" + gsp_dir) else: return gsp_dir def load_spline_parameters(ipath, tests=['h',", "test == \"hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h\", spline_func) p_value = cumulative_SID_gamma(SI,", "= get_gamma_parameters(number_data_points, \"both\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) else: print(\"Error: Test", "Name of statistical test. spline_func: dict Dictionary of spline functions. Output of get_spline()", "\"I0\"] spline_func = {} for k in tests: spline_func[k] = {} for i", "p-values for given test using gamma disribuiton approximation of Shannon information distribution. Parameters", "= 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value = cumulative_SID_gamma(SI, alpha, beta,", "k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return spline_par def cumulative_SID_gamma(SI, alpha, beta, I0):", "of spline functions. \"\"\" nam = [\"alpha\", \"beta\", \"I0\"] spline_func = {} for", "gamma disribuiton approximation of Shannon information distribution. Parameters ---------- SI: float Shannon information", "the data size dependence of the parameters of the gamma distributions representing cumulative", "= spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function object.", "alpha = spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0", "I0 = get_gamma_parameters(number_data_points, \"h\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test", "the shifted gamma disributions. \"\"\" spline_par = {} for k in tests: spline_par[k]", "\" + gsp_dir) else: return gsp_dir def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\"", "as gamma_dist import scipy def _get_package_gsp(): \"\"\" Return the directory path containing gamma", "files that come bundled with the package. ------- gsp_dir: str directory path containing", "identify the corresponding files. Returns ------- spline_par: dict Dictionary containing knots and coefficients", "parameter of the gamma disribution. beta: float Inverser scale parameter of the gamma", "iname = \"%s_%s_%s.npy\" % (tmp, k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return spline_par", "of the shifted gamma disributions. \"\"\" spline_par = {} for k in tests:", "------- cdf: float Value of Shannon information \"\"\" cdf = 1. - gamma_dist.cdf(SI,", "parameter files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir):", "Dictionary of spline functions. \"\"\" nam = [\"alpha\", \"beta\", \"I0\"] spline_func = {}", "Names identify the corresponding files. Returns ------- spline_par: dict Dictionary containing knots and", "alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0)", "at \" + gsp_dir) else: return gsp_dir def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']):", "Returns parameters of shifted gamma distributions for given number of data points. Parameters", "elif test == \"chi2_hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both\", spline_func) p_value =", "cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"h\": alpha, beta, I0 = get_gamma_parameters(number_data_points,", "Returns spline function objects for the data size dependence of the parameters of", "'chi2_hp'] if test == \"chi2\": alpha = 0.5 beta = 1. I0 =", "test, spline_func): \"\"\" Calculate p-values for given test using gamma disribuiton approximation of", "Returns ------- p-value: float P-value for given test. \"\"\" p_value = cumulative(SI, number_data_points,", "k=3) return spline_func def get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns parameters of shifted gamma", "---------- gamma_params_ipath: str Input path. Returns ------- spline_func: dict Dictionary of spline functions.", "all tests and parameters of the shifted gamma disributions. Ouput of load_spline_parameters(). tests:", "value. number_data_points: int Number of data points. test: str Name of statistical test.", "return cdf def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Returns spline function objects", "gsp_dir) else: return gsp_dir def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Load knots", "Germany # Released under the MIT Licence, see the file LICENSE.txt. import os", "for k in tests: spline_func[k] = {} for i in range(3): spline_func[k][nam[i]] =", "the number of data points. Parameters ---------- ipath: str Input path. tests: List", "directory path containing gamma spline parameter files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir =", "\"\"\" cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0) return cdf", "for given test using gamma disribuiton approximation of Shannon information distribution. Parameters ----------", "I0) elif test == \"chi2_h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value", "dict Dictionary containing knots and coefficients of B-splines for all tests and parameters", "Licence, see the file LICENSE.txt. import os import numpy as np from scipy.stats", "and coefficients for B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of the shifted gamma", "Input path. Returns ------- spline_func: dict Dictionary of spline functions. Output of get_spline()", "scale parameter of the gamma disribution. I0: float Shift (location) parameter of the", "os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter directory", "cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points,", "float or array-like Shannon information alpha: float Shape parameter of the gamma disribution.", "alpha, beta, I0) else: print(\"Error: Test \\\"%s\\\" not available!\") print(\"Exiting. Returning -1.\") return", "Dictionary of spline functions. Output of get_spline() or init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath)", "spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_hpm\": alpha, beta,", "cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0) return cdf def", "na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return spline_par def cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\"", "Dictionary of spline functions. Output of get_spline() or init(). Returns ------- alpha: float", "and parameters of the shifted gamma disributions. Ouput of load_spline_parameters(). tests: List of", "= get_gamma_parameters(number_data_points, \"h\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test ==", "of get_spline() or init(). Returns ------- alpha: float Shape parameter of the gamma", "beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif", "the corresponding files. Returns ------- spline_par: dict Dictionary containing knots and coefficients of", "knots and coefficients for B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of the shifted", "points. Parameters ---------- ipath: str Input path. tests: List of str (optional) Names", "of data points. test: str Name of statistical test. spline_func: dict Dictionary of", "beta, I0) elif test == \"chi2_hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both\", spline_func)", "cumulative_SID_gamma(SI, alpha, beta, I0) else: print(\"Error: Test \\\"%s\\\" not available!\") print(\"Exiting. Returning -1.\")", "cumulative distribution function of the Shannon information given by gamma distribution. Parameters ----------", "spline functions. \"\"\" nam = [\"alpha\", \"beta\", \"I0\"] spline_func = {} for k", "I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function object. Parameters ---------- gamma_params_ipath: str Input", "scipy def _get_package_gsp(): \"\"\" Return the directory path containing gamma spline parameter files", "k in tests: spline_par[k] = {} for na in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na]", "for given test. \"\"\" #tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp'] if test", "(tmp, k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return spline_par def cumulative_SID_gamma(SI, alpha, beta,", "------- p-value: float P-value for given test. \"\"\" p_value = cumulative(SI, number_data_points, test,", "functions. Output of get_spline() or init(). Returns ------- alpha: float Shape parameter of", "test == \"chi2\": alpha = 0.5 beta = 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points -", "where :math:`N` is the number of data points. Parameters ---------- ipath: str Input", "parameter of the gamma disribution. I0: float Shift (location) parameter of the gamma", "Name of test. spline_func: dict Dictionary of spline functions. Output of get_spline() or", "= cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"h\": alpha, beta, I0 =", "path containing gamma spline parameter files that come bundled with the package. -------", "scipy.stats import gamma as gamma_dist import scipy def _get_package_gsp(): \"\"\" Return the directory", "spline_func def get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns parameters of shifted gamma distributions for", "path. Returns ------- spline_func: dict Dictionary of spline functions. Output of get_spline() or", "test, spline_func) Parameters ---------- SI: float Shannon information value. number_data_points: int Number of", "for B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of the shifted gamma disributions as", "get_spline() or init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return spline_func def", "tests: spline_par[k] = {} for na in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] = {}", "of the gamma distributions representing cumulative Shannon information distribution functions. Parameters ---------- spline_par:", "spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns", "of str (optional) Names of tests, for which paramaters are read in. Names", "float P-value for given test. \"\"\" p_value = cumulative(SI, number_data_points, test, spline_func) return", "spline_func) Parameters ---------- SI: float Shannon information value. number_data_points: int Number of data", "== \"chi2_h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha,", "= 1. - gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0) return cdf def get_spline(spline_par,", "List of str (optional) Names of tests, for which paramaters are read in.", "I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif", "\"beta\", \"I0\"] spline_func = {} for k in tests: spline_func[k] = {} for", "---------- SI: float Shannon information value. number_data_points: int Number of data points. test:", "tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Returns spline function objects for the data size", "init(). Returns ------- alpha: float Shape parameter of the gamma disribution. beta: float", ":math:`\\beta`, :math:`\\matcal{I}_o` paramters of the shifted gamma disributions as functions of :math:`\\log_{10} N`,", "and parameters of the shifted gamma disributions. \"\"\" spline_par = {} for k", "alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0)", "#tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp'] if test == \"chi2\": alpha =", "ipath: str Input path. tests: List of str (optional) Names of tests, for", "log_Ns = np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return", "np from scipy.stats import gamma as gamma_dist import scipy def _get_package_gsp(): \"\"\" Return", "= get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test ==", "dict Dictionary of spline functions. Output of get_spline() or init(). \"\"\" spline_par =", "tmp in [\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\" % (tmp, k, na) spline_par[k][na][tmp] =", "[\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] = {} for tmp in [\"knots\", \"coeffs\"]: iname =", "print(\"Exiting. Returning -1.\") return -1. return p_value def get_p_value(SI, number_data_points, test, spline_func): \"\"\"", "return -1. return p_value def get_p_value(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for", "spline functions. Output of get_spline() or init(). Returns ------- alpha: float Shape parameter", "I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test", "disributions. \"\"\" spline_par = {} for k in tests: spline_par[k] = {} for", "of str (optional) Names of tests. Returns ------- spline_func: dict Dictionary of spline", "test using the gamma distribution approximation of the Shannon information distribution. Wrapper function", "\"\"\" #tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp'] if test == \"chi2\": alpha", "alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function object. Parameters ---------- gamma_params_ipath:", "def get_p_value(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given test using the", "Institute of Biophysics, Frankfurt am Main, Germany # Released under the MIT Licence,", "function for function cumulative(SI, number_data_points, test, spline_func) Parameters ---------- SI: float Shannon information", "I0: float Shift (location) parameter of the gamma distribution. Returns ------- cdf: float", "Shannon information alpha: float Shape parameter of the gamma disribution. beta: float Inverser", "given number of data points. Parameters ---------- Ns: int Number of data points.", "paramaters are read in. Names identify the corresponding files. Returns ------- spline_par: dict", "def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function object. Parameters ---------- gamma_params_ipath: str Input path.", "Copyright (c) 2020 <NAME>, Max Planck Institute of Biophysics, Frankfurt am Main, Germany", "import numpy as np from scipy.stats import gamma as gamma_dist import scipy def", "functions of :math:`\\log_{10} N`, where :math:`N` is the number of data points. Parameters", "{} for k in tests: spline_par[k] = {} for na in [\"alpha\", \"beta\",", "spline_par def cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\" Returns cumulative distribution function of the", "Ns: int Number of data points. test: str Name of test. spline_func: dict", "spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return spline_par def cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\" Returns", "Output of get_spline() or init(). Returns ------- p-value: float P-value for given test.", "I0: float Shift (location) parameter of the gamma distribution. \"\"\" log_Ns = np.log10(Ns)", "(optional) Names of tests, for which paramaters are read in. Names identify the", "the file LICENSE.txt. import os import numpy as np from scipy.stats import gamma", "of spline functions. Output of get_spline() or init(). Returns ------- p-value: float P-value", "I0 = get_gamma_parameters(number_data_points, \"both\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) else: print(\"Error:", "B-splines for all tests and parameters of the shifted gamma disributions. \"\"\" spline_par", "found at \" + gsp_dir) else: return gsp_dir def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple',", "Calculate p-values for given test using gamma disribuiton approximation of Shannon information distribution.", "of Shannon information distribution. Parameters ---------- SI: float Shannon information value. number_data_points: int", "get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"hpm\":", "{} for tmp in [\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\" % (tmp, k, na)", "for function cumulative(SI, number_data_points, test, spline_func) Parameters ---------- SI: float Shannon information value.", "def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Load knots and coefficients for B-splines", "2, number_data_points)) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"h\": alpha,", "int Number of data points. test: str Name of statistical test. spline_func: dict", "parameter files that come bundled with the package. ------- gsp_dir: str directory path", "alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0)", "float Shift (location) parameter of the gamma distribution. \"\"\" log_Ns = np.log10(Ns) alpha", "directory path containing gamma spline parameter files that come bundled with the package.", "------- spline_func: dict Dictionary of spline functions. \"\"\" nam = [\"alpha\", \"beta\", \"I0\"]", "given test using the gamma distribution approximation of the Shannon information distribution. Wrapper", "cumulative(SI, number_data_points, test, spline_func) Parameters ---------- SI: float Shannon information value. number_data_points: int", "Main, Germany # Released under the MIT Licence, see the file LICENSE.txt. import", "elif test == \"hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h\", spline_func) p_value =", "test using gamma disribuiton approximation of Shannon information distribution. Parameters ---------- SI: float", "of B-splines for all tests and parameters of the shifted gamma disributions. Ouput", "gamma distribution. Parameters ---------- SI: float or array-like Shannon information alpha: float Shape", "beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function object. Parameters ---------- gamma_params_ipath: str", "N`, where :math:`N` is the number of data points. Parameters ---------- ipath: str", "of spline functions. Output of get_spline() or init(). Returns ------- alpha: float Shape", "get_gamma_parameters(number_data_points, \"h\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_h\":", "(location) parameter of the gamma distribution. \"\"\" log_Ns = np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns)", "P-value for given test. \"\"\" p_value = cumulative(SI, number_data_points, test, spline_func) return p_value", "c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns parameters of shifted", "not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter directory not found at \" + gsp_dir)", "shifted gamma distributions for given number of data points. Parameters ---------- Ns: int", "representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of the shifted gamma disributions as functions of", "tests. Returns ------- spline_func: dict Dictionary of spline functions. \"\"\" nam = [\"alpha\",", "corresponding files. Returns ------- spline_par: dict Dictionary containing knots and coefficients of B-splines", "return gsp_dir def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Load knots and coefficients", ":math:`N` is the number of data points. Parameters ---------- ipath: str Input path.", "path. tests: List of str (optional) Names of tests, for which paramaters are", "== \"chi2_hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both\", spline_func) p_value = cumulative_SID_gamma(SI, alpha,", "def _get_package_gsp(): \"\"\" Return the directory path containing gamma spline parameter files that", "\"\"\" Returns cumulative distribution function of the Shannon information given by gamma distribution.", "beta = 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value = cumulative_SID_gamma(SI, alpha,", "Parameters ---------- spline_par: dict Dictionary containing knots and coefficients of B-splines for all", "number of data points. Parameters ---------- Ns: int Number of data points. test:", "of statistical test. spline_func: dict Dictionary of spline functions. Output of get_spline() or", "I0) elif test == \"h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value", "\"chi2\": alpha = 0.5 beta = 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points))", "or init(). Returns ------- alpha: float Shape parameter of the gamma disribution. beta:", "parameter directory not found at \" + gsp_dir) else: return gsp_dir def load_spline_parameters(ipath,", "\"beta\", \"I0\"]: spline_par[k][na] = {} for tmp in [\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\"", "float Inverser scale parameter of the gamma disribution. I0: float Shift (location) parameter", "spline function object. Parameters ---------- gamma_params_ipath: str Input path. Returns ------- spline_func: dict", "spline parameter files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\") if not", "in tests: spline_func[k] = {} for i in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"],", "\"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return spline_func def cumulative(SI, number_data_points, test,", "coefficients of B-splines for all tests and parameters of the shifted gamma disributions.", "RuntimeError(\"gamma spline parameter directory not found at \" + gsp_dir) else: return gsp_dir", "using the gamma distribution approximation of the Shannon information distribution. Wrapper function for", "given by gamma distribution. Parameters ---------- SI: float or array-like Shannon information alpha:", "Wrapper function for function cumulative(SI, number_data_points, test, spline_func) Parameters ---------- SI: float Shannon", "Shannon information given by gamma distribution. Parameters ---------- SI: float or array-like Shannon", "alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0)", "Returns ------- spline_par: dict Dictionary containing knots and coefficients of B-splines for all", "for which paramaters are read in. Names identify the corresponding files. Returns -------", "2020 <NAME>, Max Planck Institute of Biophysics, Frankfurt am Main, Germany # Released", "Shift (location) parameter of the gamma distribution. \"\"\" log_Ns = np.log10(Ns) alpha =", "size dependence of the parameters of the gamma distributions representing cumulative Shannon information", "if test == \"chi2\": alpha = 0.5 beta = 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points", "% (tmp, k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname)) return spline_par def cumulative_SID_gamma(SI, alpha,", "nam = [\"alpha\", \"beta\", \"I0\"] spline_func = {} for k in tests: spline_func[k]", ":math:`\\matcal{I}_o` paramters of the shifted gamma disributions as functions of :math:`\\log_{10} N`, where", "beta, I0): \"\"\" Returns cumulative distribution function of the Shannon information given by", "are read in. Names identify the corresponding files. Returns ------- spline_par: dict Dictionary", "B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of the shifted gamma disributions as functions", "disributions. Ouput of load_spline_parameters(). tests: List of str (optional) Names of tests. Returns", "test. spline_func: dict Dictionary of spline functions. Output of get_spline() or init(). Returns", "<filename>hplusminus/sid.py # Copyright (c) 2020 <NAME>, Max Planck Institute of Biophysics, Frankfurt am", "import gamma as gamma_dist import scipy def _get_package_gsp(): \"\"\" Return the directory path", "i in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def get_gamma_parameters(Ns, test,", "as np from scipy.stats import gamma as gamma_dist import scipy def _get_package_gsp(): \"\"\"", "\"gsp\") if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter directory not found at \"", "= np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return alpha,", "all tests and parameters of the shifted gamma disributions. \"\"\" spline_par = {}", "Parameters ---------- SI: float Shannon information value. number_data_points: int Number of data points.", "= {} for k in tests: spline_par[k] = {} for na in [\"alpha\",", "MIT Licence, see the file LICENSE.txt. import os import numpy as np from", "or array-like Shannon information alpha: float Shape parameter of the gamma disribution. beta:", "spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline", "gamma disribution. beta: float Inverser scale parameter of the gamma disribution. I0: float", "range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def get_gamma_parameters(Ns, test, spline_func): \"\"\"", "test, spline_func): \"\"\" Calculate p-values for given test using the gamma distribution approximation", "Ouput of load_spline_parameters(). tests: List of str (optional) Names of tests. Returns -------", "spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()):", "for i in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def get_gamma_parameters(Ns,", "gamma spline parameter files that come bundled with the package. ------- gsp_dir: str", "of the gamma disribution. I0: float Shift (location) parameter of the gamma distribution.", "Parameters ---------- SI: float or array-like Shannon information alpha: float Shape parameter of", "data points. Parameters ---------- Ns: int Number of data points. test: str Name", "package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline", "Inverser scale parameter of the gamma disribution. I0: float Shift (location) parameter of", "not found at \" + gsp_dir) else: return gsp_dir def load_spline_parameters(ipath, tests=['h', 'both',", "Shannon information distribution. Parameters ---------- SI: float Shannon information value. number_data_points: int Number", "or init(). Returns ------- p-value: float P-value for given test. \"\"\" #tests =", "containing gamma spline parameter files that come bundled with the package. ------- gsp_dir:", "---------- spline_par: dict Dictionary containing knots and coefficients of B-splines for all tests", "spline_par: dict Dictionary containing knots and coefficients of B-splines for all tests and", "spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) else: print(\"Error: Test \\\"%s\\\" not available!\")", "p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"hpm\": alpha, beta, I0", "of test. spline_func: dict Dictionary of spline functions. Output of get_spline() or init().", "\"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_hpm\": alpha,", "= spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises", "= cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_hpm\": alpha, beta, I0 =", "Shannon information \"\"\" cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0)", "\"I0\"]: spline_par[k][na] = {} for tmp in [\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\" %", "of the gamma disribution. beta: float Inverser scale parameter of the gamma disribution.", "'both', 'h_simple', 'both_simple']): \"\"\" Load knots and coefficients for B-splines representing :math:`\\alpha`, :math:`\\beta`,", "'h_simple', 'both_simple']): \"\"\" Returns spline function objects for the data size dependence of", "get_spline() or init(). Returns ------- alpha: float Shape parameter of the gamma disribution.", "print(\"Error: Test \\\"%s\\\" not available!\") print(\"Exiting. Returning -1.\") return -1. return p_value def", "see the file LICENSE.txt. import os import numpy as np from scipy.stats import", "in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def get_gamma_parameters(Ns, test, spline_func):", "functions. Output of get_spline() or init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par)", "the shifted gamma disributions as functions of :math:`\\log_{10} N`, where :math:`N` is the", "files. Returns ------- spline_par: dict Dictionary containing knots and coefficients of B-splines for", "the gamma disribution. I0: float Shift (location) parameter of the gamma distribution. \"\"\"", "test, spline_func): \"\"\" Returns parameters of shifted gamma distributions for given number of", "gamma disributions. \"\"\" spline_par = {} for k in tests: spline_par[k] = {}", "cdf: float Value of Shannon information \"\"\" cdf = 1. - gamma_dist.cdf(SI, alpha,", "else: return gsp_dir def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Load knots and", "= {} for na in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] = {} for tmp", "spline_func = get_spline(spline_par) return spline_func def cumulative(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values", "= cumulative_SID_gamma(SI, alpha, beta, I0) else: print(\"Error: Test \\\"%s\\\" not available!\") print(\"Exiting. Returning", "functions. Output of get_spline() or init(). Returns ------- p-value: float P-value for given", "gamma_dist import scipy def _get_package_gsp(): \"\"\" Return the directory path containing gamma spline", "return spline_func def cumulative(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given test", "come bundled with the package. ------- gsp_dir: str directory path containing gamma spline", "{} for na in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] = {} for tmp in", "beta: float Inverser scale parameter of the gamma disribution. I0: float Shift (location)", "distributions representing cumulative Shannon information distribution functions. Parameters ---------- spline_par: dict Dictionary containing", "\"\"\" Calculate p-values for given test using the gamma distribution approximation of the", "get_p_value(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given test using the gamma", "the gamma distribution. \"\"\" log_Ns = np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns)", "parameter of the gamma distribution. Returns ------- cdf: float Value of Shannon information", "is the number of data points. Parameters ---------- ipath: str Input path. tests:", "'hpm', 'chi2_h', 'chi2_hp'] if test == \"chi2\": alpha = 0.5 beta = 1.", "for all tests and parameters of the shifted gamma disributions. \"\"\" spline_par =", "alpha, beta, I0) elif test == \"h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\",", "dict Dictionary of spline functions. \"\"\" nam = [\"alpha\", \"beta\", \"I0\"] spline_func =", "spline_par[k][na] = {} for tmp in [\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\" % (tmp,", "alpha: float Shape parameter of the gamma disribution. beta: float Inverser scale parameter", "p_value def get_p_value(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given test using", "p_value = cumulative_SID_gamma(SI, alpha, beta, I0) else: print(\"Error: Test \\\"%s\\\" not available!\") print(\"Exiting.", "Frankfurt am Main, Germany # Released under the MIT Licence, see the file", "= np.load(os.path.join(ipath, iname)) return spline_par def cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\" Returns cumulative", "array-like Shannon information alpha: float Shape parameter of the gamma disribution. beta: float", "= {} for i in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func", "[\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\" % (tmp, k, na) spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname))", "disribution. beta: float Inverser scale parameter of the gamma disribution. I0: float Shift", "disribution. I0: float Shift (location) parameter of the gamma distribution. Returns ------- cdf:", "(optional) Names of tests. Returns ------- spline_func: dict Dictionary of spline functions. \"\"\"", "'both', 'h_simple', 'both_simple']): \"\"\" Returns spline function objects for the data size dependence", "float Shannon information value. number_data_points: int Number of data points. test: str Name", "---------- Ns: int Number of data points. test: str Name of test. spline_func:", "distribution approximation of the Shannon information distribution. Wrapper function for function cumulative(SI, number_data_points,", "LICENSE.txt. import os import numpy as np from scipy.stats import gamma as gamma_dist", "Number of data points. test: str Name of statistical test. spline_func: dict Dictionary", "= os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter directory not found", "dict Dictionary of spline functions. Output of get_spline() or init(). Returns ------- p-value:", "gamma disributions as functions of :math:`\\log_{10} N`, where :math:`N` is the number of", "read in. Names identify the corresponding files. Returns ------- spline_par: dict Dictionary containing", "'chi2_h', 'chi2_hp'] if test == \"chi2\": alpha = 0.5 beta = 1. I0", "Number of data points. test: str Name of test. spline_func: dict Dictionary of", "\"\"\" Return the directory path containing gamma spline parameter files that come bundled", "['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp'] if test == \"chi2\": alpha = 0.5 beta", "Shannon information distribution functions. Parameters ---------- spline_par: dict Dictionary containing knots and coefficients", "gamma as gamma_dist import scipy def _get_package_gsp(): \"\"\" Return the directory path containing", "p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"h\": alpha, beta, I0", "def cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\" Returns cumulative distribution function of the Shannon", "spline_func def cumulative(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given test using", "given test using gamma disribuiton approximation of Shannon information distribution. Parameters ---------- SI:", "\"hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta,", "init(). Returns ------- p-value: float P-value for given test. \"\"\" p_value = cumulative(SI,", "test == \"h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI,", "test. \"\"\" #tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp'] if test == \"chi2\":", "get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns parameters of shifted gamma distributions for given number", "of Shannon information \"\"\" cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1. / beta,", "tests and parameters of the shifted gamma disributions. \"\"\" spline_par = {} for", "information given by gamma distribution. Parameters ---------- SI: float or array-like Shannon information", "get_gamma_parameters(number_data_points, \"both\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) else: print(\"Error: Test \\\"%s\\\"", "gamma disribution. I0: float Shift (location) parameter of the gamma distribution. Returns -------", "of the shifted gamma disributions as functions of :math:`\\log_{10} N`, where :math:`N` is", "= get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test ==", "data points. test: str Name of statistical test. spline_func: dict Dictionary of spline", "load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Load knots and coefficients for B-splines representing", "{} for i in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return spline_func def", "beta, I0 = get_gamma_parameters(number_data_points, \"both\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) else:", "'both_simple']): \"\"\" Load knots and coefficients for B-splines representing :math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters", "\"chi2_h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta,", "str directory path containing gamma spline parameter files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir", "I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test", "p-value: float P-value for given test. \"\"\" p_value = cumulative(SI, number_data_points, test, spline_func)", "= -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test", "spline_func): \"\"\" Calculate p-values for given test using gamma disribuiton approximation of Shannon", "test == \"chi2_hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both\", spline_func) p_value = cumulative_SID_gamma(SI,", ":math:`\\alpha`, :math:`\\beta`, :math:`\\matcal{I}_o` paramters of the shifted gamma disributions as functions of :math:`\\log_{10}", "tests: List of str (optional) Names of tests, for which paramaters are read", "np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return alpha, beta,", "of :math:`\\log_{10} N`, where :math:`N` is the number of data points. Parameters ----------", "beta, I0) elif test == \"chi2_h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func)", "Biophysics, Frankfurt am Main, Germany # Released under the MIT Licence, see the", "I0): \"\"\" Returns cumulative distribution function of the Shannon information given by gamma", "from scipy.stats import gamma as gamma_dist import scipy def _get_package_gsp(): \"\"\" Return the", "spline_func: dict Dictionary of spline functions. \"\"\" nam = [\"alpha\", \"beta\", \"I0\"] spline_func", "get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_hpm\":", "beta, I0) elif test == \"hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h\", spline_func)", "Value of Shannon information \"\"\" cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1. /", "which paramaters are read in. Names identify the corresponding files. Returns ------- spline_par:", "get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Returns spline function objects for the data", "distribution. Wrapper function for function cumulative(SI, number_data_points, test, spline_func) Parameters ---------- SI: float", "Returning -1.\") return -1. return p_value def get_p_value(SI, number_data_points, test, spline_func): \"\"\" Calculate", "objects for the data size dependence of the parameters of the gamma distributions", "Returns ------- p-value: float P-value for given test. \"\"\" #tests = ['chi2', 'h',", "'both_simple']): \"\"\" Returns spline function objects for the data size dependence of the", "1. - gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0) return cdf def get_spline(spline_par, tests=['h',", "Planck Institute of Biophysics, Frankfurt am Main, Germany # Released under the MIT", "distribution functions. Parameters ---------- spline_par: dict Dictionary containing knots and coefficients of B-splines", "= cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_h\": alpha, beta, I0 =", "the Shannon information distribution. Wrapper function for function cumulative(SI, number_data_points, test, spline_func) Parameters", "that come bundled with the package. ------- gsp_dir: str directory path containing gamma", "functions. \"\"\" nam = [\"alpha\", \"beta\", \"I0\"] spline_func = {} for k in", "dict Dictionary of spline functions. Output of get_spline() or init(). Returns ------- alpha:", "for na in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] = {} for tmp in [\"knots\",", "spline_func = {} for k in tests: spline_func[k] = {} for i in", "for given test using the gamma distribution approximation of the Shannon information distribution.", "of the parameters of the gamma distributions representing cumulative Shannon information distribution functions.", "under the MIT Licence, see the file LICENSE.txt. import os import numpy as", "== \"chi2\": alpha = 0.5 beta = 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2,", "int Number of data points. test: str Name of test. spline_func: dict Dictionary", "spline_func[k] = {} for i in range(3): spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]][\"knots\"], c=spline_par[k][nam[i]][\"coeffs\"], k=3) return", "for all tests and parameters of the shifted gamma disributions. Ouput of load_spline_parameters().", "statistical test. spline_func: dict Dictionary of spline functions. Output of get_spline() or init().", "+ gsp_dir) else: return gsp_dir def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Load", "of get_spline() or init(). Returns ------- p-value: float P-value for given test. \"\"\"", "= load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return spline_func def cumulative(SI, number_data_points, test, spline_func): \"\"\"", "\"\"\" spline_par = {} for k in tests: spline_par[k] = {} for na", "[\"alpha\", \"beta\", \"I0\"] spline_func = {} for k in tests: spline_func[k] = {}", "os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter directory not found at \" + gsp_dir) else:", "---------- ipath: str Input path. tests: List of str (optional) Names of tests,", "SI: float or array-like Shannon information alpha: float Shape parameter of the gamma", "float Value of Shannon information \"\"\" cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1.", "= [\"alpha\", \"beta\", \"I0\"] spline_func = {} for k in tests: spline_func[k] =", "beta, I0) else: print(\"Error: Test \\\"%s\\\" not available!\") print(\"Exiting. Returning -1.\") return -1.", "spline functions. Output of get_spline() or init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func =", "p-values for given test using the gamma distribution approximation of the Shannon information", "scale=1. / beta, loc=I0) return cdf def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\"", "I0 = spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function", "gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0) return cdf def get_spline(spline_par, tests=['h', 'both', 'h_simple',", "def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Returns spline function objects for the", "cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\" Returns cumulative distribution function of the Shannon information", "os import numpy as np from scipy.stats import gamma as gamma_dist import scipy", "parameters of the shifted gamma disributions. \"\"\" spline_par = {} for k in", "for tmp in [\"knots\", \"coeffs\"]: iname = \"%s_%s_%s.npy\" % (tmp, k, na) spline_par[k][na][tmp]", "'h', 'hpm', 'chi2_h', 'chi2_hp'] if test == \"chi2\": alpha = 0.5 beta =", "= 0.5 beta = 1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value =", "of data points. Parameters ---------- Ns: int Number of data points. test: str", "function object. Parameters ---------- gamma_params_ipath: str Input path. Returns ------- spline_func: dict Dictionary", "paramters of the shifted gamma disributions as functions of :math:`\\log_{10} N`, where :math:`N`", "alpha, beta, I0) elif test == \"chi2_h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\",", "\\\"%s\\\" not available!\") print(\"Exiting. Returning -1.\") return -1. return p_value def get_p_value(SI, number_data_points,", "the MIT Licence, see the file LICENSE.txt. import os import numpy as np", "return spline_func def get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns parameters of shifted gamma distributions", "points. test: str Name of test. spline_func: dict Dictionary of spline functions. Output", "\"\"\" Initialises spline function object. Parameters ---------- gamma_params_ipath: str Input path. Returns -------", "the gamma distribution approximation of the Shannon information distribution. Wrapper function for function", "Shannon information distribution. Wrapper function for function cumulative(SI, number_data_points, test, spline_func) Parameters ----------", "Returns cumulative distribution function of the Shannon information given by gamma distribution. Parameters", ":math:`\\log_{10} N`, where :math:`N` is the number of data points. Parameters ---------- ipath:", "in. Names identify the corresponding files. Returns ------- spline_par: dict Dictionary containing knots", "alpha, scale=1. / beta, loc=I0) return cdf def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']):", "beta, I0) elif test == \"h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func)", "Names of tests, for which paramaters are read in. Names identify the corresponding", "p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_h\": alpha, beta, I0", "str Name of test. spline_func: dict Dictionary of spline functions. Output of get_spline()", "spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function object. Parameters", "number_data_points: int Number of data points. test: str Name of statistical test. spline_func:", "Shape parameter of the gamma disribution. beta: float Inverser scale parameter of the", "float Shift (location) parameter of the gamma distribution. Returns ------- cdf: float Value", "Initialises spline function object. Parameters ---------- gamma_params_ipath: str Input path. Returns ------- spline_func:", "beta, loc=I0) return cdf def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Returns spline", "or init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return spline_func def cumulative(SI,", "\"h\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_h\": alpha,", "Returns ------- cdf: float Value of Shannon information \"\"\" cdf = 1. -", "elif test == \"chi2_h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both_simple\", spline_func) p_value =", "and coefficients of B-splines for all tests and parameters of the shifted gamma", "the parameters of the gamma distributions representing cumulative Shannon information distribution functions. Parameters", "Parameters ---------- gamma_params_ipath: str Input path. Returns ------- spline_func: dict Dictionary of spline", "cumulative Shannon information distribution functions. Parameters ---------- spline_par: dict Dictionary containing knots and", "gamma_params_ipath: str Input path. Returns ------- spline_func: dict Dictionary of spline functions. Output", "am Main, Germany # Released under the MIT Licence, see the file LICENSE.txt.", "approximation of Shannon information distribution. Parameters ---------- SI: float Shannon information value. number_data_points:", "P-value for given test. \"\"\" #tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp'] if", "object. Parameters ---------- gamma_params_ipath: str Input path. Returns ------- spline_func: dict Dictionary of", "return spline_par def cumulative_SID_gamma(SI, alpha, beta, I0): \"\"\" Returns cumulative distribution function of", "return p_value def get_p_value(SI, number_data_points, test, spline_func): \"\"\" Calculate p-values for given test", "load_spline_parameters(). tests: List of str (optional) Names of tests. Returns ------- spline_func: dict", "of load_spline_parameters(). tests: List of str (optional) Names of tests. Returns ------- spline_func:", "import scipy def _get_package_gsp(): \"\"\" Return the directory path containing gamma spline parameter", "distribution. Parameters ---------- SI: float Shannon information value. number_data_points: int Number of data", "path containing gamma spline parameter files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir,", "parameters of shifted gamma distributions for given number of data points. Parameters ----------", "== \"hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h\", spline_func) p_value = cumulative_SID_gamma(SI, alpha,", "------- gsp_dir: str directory path containing gamma spline parameter files \"\"\" package_dir =", "not available!\") print(\"Exiting. Returning -1.\") return -1. return p_value def get_p_value(SI, number_data_points, test,", "number_data_points, test, spline_func) Parameters ---------- SI: float Shannon information value. number_data_points: int Number", "spline_par[k] = {} for na in [\"alpha\", \"beta\", \"I0\"]: spline_par[k][na] = {} for", "else: print(\"Error: Test \\\"%s\\\" not available!\") print(\"Exiting. Returning -1.\") return -1. return p_value", "the gamma disribution. beta: float Inverser scale parameter of the gamma disribution. I0:", "1. I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points)) p_value = cumulative_SID_gamma(SI, alpha, beta, I0)", "for the data size dependence of the parameters of the gamma distributions representing", "of the Shannon information given by gamma distribution. Parameters ---------- SI: float or", "package. ------- gsp_dir: str directory path containing gamma spline parameter files \"\"\" package_dir", "\"\"\" Calculate p-values for given test using gamma disribuiton approximation of Shannon information", "containing knots and coefficients of B-splines for all tests and parameters of the", "I0) else: print(\"Error: Test \\\"%s\\\" not available!\") print(\"Exiting. Returning -1.\") return -1. return", "init(). \"\"\" spline_par = load_spline_parameters(gamma_params_ipath) spline_func = get_spline(spline_par) return spline_func def cumulative(SI, number_data_points,", "data points. test: str Name of test. spline_func: dict Dictionary of spline functions.", "knots and coefficients of B-splines for all tests and parameters of the shifted", "elif test == \"h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value =", "def get_gamma_parameters(Ns, test, spline_func): \"\"\" Returns parameters of shifted gamma distributions for given", "points. Parameters ---------- Ns: int Number of data points. test: str Name of", "get_spline() or init(). Returns ------- p-value: float P-value for given test. \"\"\" p_value", "gamma spline parameter files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\") if", "spline_func: dict Dictionary of spline functions. Output of get_spline() or init(). \"\"\" spline_par", "beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif", "(c) 2020 <NAME>, Max Planck Institute of Biophysics, Frankfurt am Main, Germany #", "---------- SI: float or array-like Shannon information alpha: float Shape parameter of the", "information distribution functions. Parameters ---------- spline_par: dict Dictionary containing knots and coefficients of", "------- p-value: float P-value for given test. \"\"\" #tests = ['chi2', 'h', 'hpm',", "cumulative_SID_gamma(SI, alpha, beta, I0) elif test == \"chi2_h\": alpha, beta, I0 = get_gamma_parameters(number_data_points,", "return alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\" Initialises spline function object. Parameters ----------", "numpy as np from scipy.stats import gamma as gamma_dist import scipy def _get_package_gsp():", "files \"\"\" package_dir = os.path.dirname(os.path.abspath(__file__)) gsp_dir = os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir): raise", "gamma disributions. Ouput of load_spline_parameters(). tests: List of str (optional) Names of tests.", "of B-splines for all tests and parameters of the shifted gamma disributions. \"\"\"", "disribuiton approximation of Shannon information distribution. Parameters ---------- SI: float Shannon information value.", "beta, I0 = get_gamma_parameters(number_data_points, \"h\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta, I0) elif", "Input path. tests: List of str (optional) Names of tests, for which paramaters", "Returns ------- alpha: float Shape parameter of the gamma disribution. beta: float Inverser", "function objects for the data size dependence of the parameters of the gamma", "given test. \"\"\" #tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp'] if test ==", "raise RuntimeError(\"gamma spline parameter directory not found at \" + gsp_dir) else: return", "distribution. Returns ------- cdf: float Value of Shannon information \"\"\" cdf = 1.", "\"chi2_hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta,", "information alpha: float Shape parameter of the gamma disribution. beta: float Inverser scale", "SI: float Shannon information value. number_data_points: int Number of data points. test: str", "for given number of data points. Parameters ---------- Ns: int Number of data", "= {} for k in tests: spline_func[k] = {} for i in range(3):", "parameter of the gamma distribution. \"\"\" log_Ns = np.log10(Ns) alpha = spline_func[test][\"alpha\"](log_Ns) beta", "information value. number_data_points: int Number of data points. test: str Name of statistical", "tests: List of str (optional) Names of tests. Returns ------- spline_func: dict Dictionary", "test: str Name of test. spline_func: dict Dictionary of spline functions. Output of", "I0) elif test == \"chi2_hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both\", spline_func) p_value", "\"\"\" Returns parameters of shifted gamma distributions for given number of data points.", "beta = spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0 def init(gamma_params_ipath=_get_package_gsp()): \"\"\"", "distribution. Parameters ---------- SI: float or array-like Shannon information alpha: float Shape parameter", "gsp_dir = os.path.join(package_dir, \"gsp\") if not os.path.exists(gsp_dir): raise RuntimeError(\"gamma spline parameter directory not", "str (optional) Names of tests, for which paramaters are read in. Names identify", "\"h\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"h_simple\", spline_func) p_value = cumulative_SID_gamma(SI, alpha, beta,", "Test \\\"%s\\\" not available!\") print(\"Exiting. Returning -1.\") return -1. return p_value def get_p_value(SI,", "data points. Parameters ---------- ipath: str Input path. tests: List of str (optional)", "- gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0) return cdf def get_spline(spline_par, tests=['h', 'both',", "alpha, beta, I0) elif test == \"chi2_hpm\": alpha, beta, I0 = get_gamma_parameters(number_data_points, \"both\",", "representing cumulative Shannon information distribution functions. Parameters ---------- spline_par: dict Dictionary containing knots", "= spline_func[test][\"alpha\"](log_Ns) beta = spline_func[test][\"beta\"](log_Ns) I0 = spline_func[test][\"I0\"](log_Ns) return alpha, beta, I0 def", "/ beta, loc=I0) return cdf def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']): \"\"\" Returns" ]
[ "Textarea, TextInput from .models import Review from django import forms from django.contrib.auth.models import", ".models import Review from django import forms from django.contrib.auth.models import User # Form", "# Form to take display to take user's review class ReviewForm(ModelForm): class Meta:", "class ReviewForm(ModelForm): class Meta: model = Review fields = ['rating', 'comment'] #user_name =", "take user's review class ReviewForm(ModelForm): class Meta: model = Review fields = ['rating',", "TextInput from .models import Review from django import forms from django.contrib.auth.models import User", "import ModelForm, Textarea, TextInput from .models import Review from django import forms from", "widgets = { 'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) } class LoginForm(forms.Form):", "from .models import Review from django import forms from django.contrib.auth.models import User #", "model = Review fields = ['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets =", "fields = ['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = { 'comment' :", "import forms from django.contrib.auth.models import User # Form to take display to take", "ReviewForm(ModelForm): class Meta: model = Review fields = ['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder':", "'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) } class LoginForm(forms.Form): username = forms.CharField(widget=forms.TextInput(attrs={'class':'forminput'}))", "take display to take user's review class ReviewForm(ModelForm): class Meta: model = Review", "forms from django.contrib.auth.models import User # Form to take display to take user's", "'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = { 'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name'", "Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) } class LoginForm(forms.Form): username = forms.CharField(widget=forms.TextInput(attrs={'class':'forminput'})) password =", "to take display to take user's review class ReviewForm(ModelForm): class Meta: model =", "to take user's review class ReviewForm(ModelForm): class Meta: model = Review fields =", "= forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = { 'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,})", "Review fields = ['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = { 'comment'", "Review from django import forms from django.contrib.auth.models import User # Form to take", "import User # Form to take display to take user's review class ReviewForm(ModelForm):", "from django.forms import ModelForm, Textarea, TextInput from .models import Review from django import", "Meta: model = Review fields = ['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets", "= ['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = { 'comment' : Textarea(attrs={'cols':35,", "django.contrib.auth.models import User # Form to take display to take user's review class", "forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = { 'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) }", "{ 'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) } class LoginForm(forms.Form): username =", "display to take user's review class ReviewForm(ModelForm): class Meta: model = Review fields", "= Review fields = ['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = {", "'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) } class LoginForm(forms.Form): username = forms.CharField(widget=forms.TextInput(attrs={'class':'forminput'})) password = forms.CharField(widget=forms.PasswordInput)", "import Review from django import forms from django.contrib.auth.models import User # Form to", "from django import forms from django.contrib.auth.models import User # Form to take display", "User # Form to take display to take user's review class ReviewForm(ModelForm): class", "['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = { 'comment' : Textarea(attrs={'cols':35, 'rows':10}),", "'abcd'})) widgets = { 'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) } class", "review class ReviewForm(ModelForm): class Meta: model = Review fields = ['rating', 'comment'] #user_name", "#user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'})) widgets = { 'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name' :", "= { 'comment' : Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) } class LoginForm(forms.Form): username", "django.forms import ModelForm, Textarea, TextInput from .models import Review from django import forms", ": Textarea(attrs={'cols':35, 'rows':10}), #'user_name' : TextInput(attrs={'placeholder':User.username,}) } class LoginForm(forms.Form): username = forms.CharField(widget=forms.TextInput(attrs={'class':'forminput'})) password", "django import forms from django.contrib.auth.models import User # Form to take display to", "user's review class ReviewForm(ModelForm): class Meta: model = Review fields = ['rating', 'comment']", "from django.contrib.auth.models import User # Form to take display to take user's review", "Form to take display to take user's review class ReviewForm(ModelForm): class Meta: model", "ModelForm, Textarea, TextInput from .models import Review from django import forms from django.contrib.auth.models", "class Meta: model = Review fields = ['rating', 'comment'] #user_name = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'abcd'}))" ]
[ "import Flask from flask_restful import Api from resources.vagas import VagasEmpregoResource App = Flask(__name__)", "o banco e todas aas suas tabelas @App.before_first_request def cria_banco(): banco.create_all() def set_routes(Api):", "from resources.vagas import VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False", "VagasEmpregoResource, \"/vagas\" ) if __name__ == '__main__': from sql_alchemy import banco banco.init_app(App) set_routes(Api)", "= Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api = Api(App) # Vai", "set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if __name__ == '__main__': from sql_alchemy import banco", "VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api = Api(App)", "# Vai criar o banco e todas aas suas tabelas @App.before_first_request def cria_banco():", "Api = Api(App) # Vai criar o banco e todas aas suas tabelas", "App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api = Api(App) # Vai criar o banco e todas", "False Api = Api(App) # Vai criar o banco e todas aas suas", "banco e todas aas suas tabelas @App.before_first_request def cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource(", "banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if __name__ == '__main__': from sql_alchemy", "def cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if __name__ == '__main__':", "criar o banco e todas aas suas tabelas @App.before_first_request def cria_banco(): banco.create_all() def", "App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api = Api(App) #", "= Api(App) # Vai criar o banco e todas aas suas tabelas @App.before_first_request", "= False Api = Api(App) # Vai criar o banco e todas aas", "from flask_restful import Api from resources.vagas import VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] =", "Flask from flask_restful import Api from resources.vagas import VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI']", "Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api = Api(App) # Vai criar", "todas aas suas tabelas @App.before_first_request def cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\"", "flask import Flask from flask_restful import Api from resources.vagas import VagasEmpregoResource App =", "Api(App) # Vai criar o banco e todas aas suas tabelas @App.before_first_request def", "import Api from resources.vagas import VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS']", "'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api = Api(App) # Vai criar o banco e", "aas suas tabelas @App.before_first_request def cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" )", "= 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api = Api(App) # Vai criar o banco", "Vai criar o banco e todas aas suas tabelas @App.before_first_request def cria_banco(): banco.create_all()", "e todas aas suas tabelas @App.before_first_request def cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource,", "suas tabelas @App.before_first_request def cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if", "cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if __name__ == '__main__': from", "App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api = Api(App) # Vai criar o", "Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if __name__ == '__main__': from sql_alchemy import banco banco.init_app(App)", "@App.before_first_request def cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if __name__ ==", "import VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api =", "<gh_stars>0 from flask import Flask from flask_restful import Api from resources.vagas import VagasEmpregoResource", "\"/vagas\" ) if __name__ == '__main__': from sql_alchemy import banco banco.init_app(App) set_routes(Api) App.run(debug=True)", "tabelas @App.before_first_request def cria_banco(): banco.create_all() def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if __name__", "from flask import Flask from flask_restful import Api from resources.vagas import VagasEmpregoResource App", "Api from resources.vagas import VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] =", "resources.vagas import VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db' App.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False Api", "flask_restful import Api from resources.vagas import VagasEmpregoResource App = Flask(__name__) App.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///banco.db'", "def set_routes(Api): Api.add_resource( VagasEmpregoResource, \"/vagas\" ) if __name__ == '__main__': from sql_alchemy import" ]
[ ".items import * from .functions import * from .base_materials import * from .materials", "from .materials import * from .operators import * from .display import * from", "* from .functions import * from .base_materials import * from .materials import *", "import * from .operators import * from .display import * from .setup import", "from .base_materials import * from .materials import * from .operators import * from", ".functions import * from .base_materials import * from .materials import * from .operators", "from .constants import * from .items import * from .functions import * from", "import * from .constants import * from .items import * from .functions import", "from item_engine import * from .constants import * from .items import * from", "* from .base_materials import * from .materials import * from .operators import *", "* from .items import * from .functions import * from .base_materials import *", ".materials import * from .operators import * from .display import * from .setup", "import * from .functions import * from .base_materials import * from .materials import", ".constants import * from .items import * from .functions import * from .base_materials", "from .items import * from .functions import * from .base_materials import * from", "* from .constants import * from .items import * from .functions import *", "from .functions import * from .base_materials import * from .materials import * from", "item_engine import * from .constants import * from .items import * from .functions", ".base_materials import * from .materials import * from .operators import * from .display", "import * from .base_materials import * from .materials import * from .operators import", "import * from .items import * from .functions import * from .base_materials import", "* from .operators import * from .display import * from .setup import *", "import * from .materials import * from .operators import * from .display import", "* from .materials import * from .operators import * from .display import *" ]
[ "class Meta: name = \"A\" value: Optional[int] = field( default=None, metadata={ \"required\": True,", "A1: class Meta: name = \"A\" value: Optional[int] = field( default=None, metadata={ \"required\":", "value: Optional[int] = field( default=None, metadata={ \"required\": True, } ) t: Optional[int] =", "\"type\": \"Attribute\", } ) @dataclass class A(A1): class Meta: name = \"a\" namespace", "typing import Optional __NAMESPACE__ = \"derivationMethod\" @dataclass class A1: class Meta: name =", "\"derivationMethod\" @dataclass class A1: class Meta: name = \"A\" value: Optional[int] = field(", "dataclasses import dataclass, field from typing import Optional __NAMESPACE__ = \"derivationMethod\" @dataclass class", "import dataclass, field from typing import Optional __NAMESPACE__ = \"derivationMethod\" @dataclass class A1:", "True, } ) t: Optional[int] = field( default=None, metadata={ \"type\": \"Attribute\", } )", ") t: Optional[int] = field( default=None, metadata={ \"type\": \"Attribute\", } ) @dataclass class", "= field( default=None, metadata={ \"required\": True, } ) t: Optional[int] = field( default=None,", "field from typing import Optional __NAMESPACE__ = \"derivationMethod\" @dataclass class A1: class Meta:", "\"A\" value: Optional[int] = field( default=None, metadata={ \"required\": True, } ) t: Optional[int]", "\"Attribute\", } ) @dataclass class A(A1): class Meta: name = \"a\" namespace =", "Optional[int] = field( default=None, metadata={ \"type\": \"Attribute\", } ) @dataclass class A(A1): class", "from typing import Optional __NAMESPACE__ = \"derivationMethod\" @dataclass class A1: class Meta: name", "default=None, metadata={ \"type\": \"Attribute\", } ) @dataclass class A(A1): class Meta: name =", "} ) t: Optional[int] = field( default=None, metadata={ \"type\": \"Attribute\", } ) @dataclass", "metadata={ \"required\": True, } ) t: Optional[int] = field( default=None, metadata={ \"type\": \"Attribute\",", "field( default=None, metadata={ \"type\": \"Attribute\", } ) @dataclass class A(A1): class Meta: name", "Optional __NAMESPACE__ = \"derivationMethod\" @dataclass class A1: class Meta: name = \"A\" value:", "\"required\": True, } ) t: Optional[int] = field( default=None, metadata={ \"type\": \"Attribute\", }", "<reponame>tefra/xsdata-w3c-tests from dataclasses import dataclass, field from typing import Optional __NAMESPACE__ = \"derivationMethod\"", "class A1: class Meta: name = \"A\" value: Optional[int] = field( default=None, metadata={", "@dataclass class A1: class Meta: name = \"A\" value: Optional[int] = field( default=None,", "dataclass, field from typing import Optional __NAMESPACE__ = \"derivationMethod\" @dataclass class A1: class", "__NAMESPACE__ = \"derivationMethod\" @dataclass class A1: class Meta: name = \"A\" value: Optional[int]", "default=None, metadata={ \"required\": True, } ) t: Optional[int] = field( default=None, metadata={ \"type\":", "metadata={ \"type\": \"Attribute\", } ) @dataclass class A(A1): class Meta: name = \"a\"", "import Optional __NAMESPACE__ = \"derivationMethod\" @dataclass class A1: class Meta: name = \"A\"", "t: Optional[int] = field( default=None, metadata={ \"type\": \"Attribute\", } ) @dataclass class A(A1):", "Optional[int] = field( default=None, metadata={ \"required\": True, } ) t: Optional[int] = field(", "} ) @dataclass class A(A1): class Meta: name = \"a\" namespace = \"derivationMethod\"", "field( default=None, metadata={ \"required\": True, } ) t: Optional[int] = field( default=None, metadata={", "Meta: name = \"A\" value: Optional[int] = field( default=None, metadata={ \"required\": True, }", "= \"A\" value: Optional[int] = field( default=None, metadata={ \"required\": True, } ) t:", "from dataclasses import dataclass, field from typing import Optional __NAMESPACE__ = \"derivationMethod\" @dataclass", "name = \"A\" value: Optional[int] = field( default=None, metadata={ \"required\": True, } )", "= field( default=None, metadata={ \"type\": \"Attribute\", } ) @dataclass class A(A1): class Meta:", "= \"derivationMethod\" @dataclass class A1: class Meta: name = \"A\" value: Optional[int] =" ]
[ "state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=500) q1", "class TD3Actor(nn.Module): def apply(self, x, action_dim, max_action): x = nn.Dense(x, features=256) x =", "= nn.elu(x) x = nn.Dense(x, features=2 * action_dim) mu, log_sig = jnp.split(x, 2,", "sample: return max_action * nn.tanh(mu), log_sig else: pi = mu + random.normal(key, mu.shape)", "actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial() _, init_params", "return nn.Model(critic, init_params) def build_model(module: nn.Module, key: PRNGKey, input_shapes): _, init_params = module.init_by_shape(key,", "dtype) def build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng) return nn.Model(constant,", "q2 = nn.Dense(state_action, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=256) q2 =", "features=200) x = nn.LayerNorm(x) x = nn.tanh(x) x = nn.Dense(x, features=200) x =", "input_shapes) return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action)", "mu.shape) * jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu, log_sig) pi = nn.tanh(pi) log_pi -=", "input_shapes) return nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial() _, init_params =", "return nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng,", "nn.Dense(q2, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class", "nn.relu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class DoubleCritic(nn.Module): def apply(self, state,", "not sample: return max_action * nn.tanh(mu), log_sig else: pi = mu + random.normal(key,", "= nn.Dense(q2, features=1) return q1, q2 class GaussianPolicy(nn.Module): def apply( self, x, action_dim,", "apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=256)", "build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params)", "log_sig = jnp.split(x, 2, axis=-1) log_sig = nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max)", "nn.Model(critic, init_params) def build_model(module: nn.Module, key: PRNGKey, input_shapes): _, init_params = module.init_by_shape(key, input_shapes)", "x = nn.Dense(x, features=200) x = nn.LayerNorm(x) x = nn.tanh(x) x = nn.Dense(x,", "log_sig = nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO: return mu, log_sig", "init_params) def build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return", "import jax from haiku._src.typing import PRNGKey from jax import random import jax.numpy as", "if not sample: return max_action * nn.tanh(mu), log_sig else: pi = mu +", "jax from haiku._src.typing import PRNGKey from jax import random import jax.numpy as jnp", "List, Tuple from utils import gaussian_likelihood class TD3Actor(nn.Module): def apply(self, x, action_dim, max_action):", "axis=1) q1 = nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1) q1 = nn.tanh(q1) q1 =", "init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params)", "init_params) def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params =", "import numpy as onp from typing import List, Tuple from utils import gaussian_likelihood", "= nn.Dense(x, features=action_dim) return max_action * nn.tanh(x) class TD3Critic(nn.Module): def apply(self, state, action,", "import PRNGKey from jax import random import jax.numpy as jnp import numpy as", "GaussianPolicy(nn.Module): def apply( self, x, action_dim, max_action, key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ):", "-= jnp.sum(jnp.log(nn.relu(1 - pi ** 2) + 1e-6), axis=1) return max_action * pi,", "features=2 * action_dim) mu, log_sig = jnp.split(x, 2, axis=-1) log_sig = nn.softplus(log_sig) log_sig", "pi = mu + random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu, log_sig)", "+ random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu, log_sig) pi = nn.tanh(pi)", "features=1) return q1, q2 class GaussianPolicy(nn.Module): def apply( self, x, action_dim, max_action, key=None,", "1e-6), axis=1) return max_action * pi, log_pi class Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32):", "Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=256) q1 = nn.relu(q1)", "nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action) _, init_params", "nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=action_dim) return max_action * nn.tanh(x)", "init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params)", "input_shapes): _, init_params = module.init_by_shape(key, input_shapes) return nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng): critic", "nn.relu(q1) q1 = nn.Dense(q1, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=1) if", "nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi ** 2) + 1e-6), axis=1) return max_action", "x = nn.LayerNorm(x) x = nn.tanh(x) x = nn.Dense(x, features=200) x = nn.elu(x)", "x = nn.relu(x) x = nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x,", "module.init_by_shape(key, input_shapes) return nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial() _, init_params", "max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor,", "MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ): x = nn.Dense(x, features=200) x = nn.LayerNorm(x) x", "def build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic,", "def build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic,", "features=256) x = nn.relu(x) x = nn.Dense(x, features=action_dim) return max_action * nn.tanh(x) class", "q1 = nn.tanh(q1) q1 = nn.Dense(q1, features=500) q1 = nn.elu(q1) q1 = nn.Dense(q1,", "max_action * nn.tanh(x) class TD3Critic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state,", "init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action) _, init_params =", "from typing import List, Tuple from utils import gaussian_likelihood class TD3Actor(nn.Module): def apply(self,", "pi, log_pi class Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32): value = self.param(\"value\", (1,), nn.initializers.ones)", "jnp.sum(jnp.log(nn.relu(1 - pi ** 2) + 1e-6), axis=1) return max_action * pi, log_pi", "nn.elu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=500)", "def apply(self, x, action_dim, max_action): x = nn.Dense(x, features=256) x = nn.relu(x) x", "x, action_dim, max_action, key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ): x = nn.Dense(x, features=200)", "= nn.Dense(q1, features=500) q1 = nn.elu(q1) q1 = nn.Dense(q1, features=1) if Q1: return", "_, init_params = module.init_by_shape(key, input_shapes) return nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng): critic =", "apply( self, x, action_dim, max_action, key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ): x =", "log_sig else: pi = mu + random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi = gaussian_likelihood(pi,", "start_value * jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value) _, init_params =", "init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor", "jax import random import jax.numpy as jnp import numpy as onp from typing", "key: PRNGKey, input_shapes): _, init_params = module.init_by_shape(key, input_shapes) return nn.Model(module, init_params) def build_double_critic_model(input_shapes,", "= nn.Dense(x, features=2 * action_dim) mu, log_sig = jnp.split(x, 2, axis=-1) log_sig =", "q2 = nn.relu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class DoubleCritic(nn.Module): def", "if Q1: return q1 q2 = nn.Dense(state_action, features=256) q2 = nn.relu(q2) q2 =", "nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=256) x = nn.relu(x) x", "random import jax.numpy as jnp import numpy as onp from typing import List,", "action], axis=1) q1 = nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1) q1 = nn.tanh(q1) q1", "nn import jax from haiku._src.typing import PRNGKey from jax import random import jax.numpy", "nn.tanh(mu), log_sig else: pi = mu + random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi =", "features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=256) q1 = nn.relu(q1) q1 =", "from flax import nn import jax from haiku._src.typing import PRNGKey from jax import", "return q1, q2 class GaussianPolicy(nn.Module): def apply( self, x, action_dim, max_action, key=None, MPO=False,", "typing import List, Tuple from utils import gaussian_likelihood class TD3Actor(nn.Module): def apply(self, x,", "= critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor =", "nn.tanh(x) x = nn.Dense(x, features=200) x = nn.elu(x) x = nn.Dense(x, features=2 *", "= gaussian_likelihood(pi, mu, log_sig) pi = nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi **", "Q1: return q1 q2 = nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2) q2 = nn.tanh(q2)", "= jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=256) q1 = nn.relu(q1) q1 =", "key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ): x = nn.Dense(x, features=200) x = nn.LayerNorm(x)", "= nn.Dense(q2, features=500) q2 = nn.elu(q2) q2 = nn.Dense(q2, features=1) return q1, q2", "features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=256) q2 = nn.relu(q2) q2 =", "nn.Dense(state_action, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=256) q1 = nn.relu(q1) q1", "jax.numpy as jnp import numpy as onp from typing import List, Tuple from", "= nn.LayerNorm(q2) q2 = nn.tanh(q2) q2 = nn.Dense(q2, features=500) q2 = nn.elu(q2) q2", "nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=256) q2 = nn.relu(q2)", "def build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng) return nn.Model(constant, init_params)", "= nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=action_dim) return max_action *", "def apply(self, start_value, dtype=jnp.float32): value = self.param(\"value\", (1,), nn.initializers.ones) return start_value * jnp.asarray(value,", "nn.Module, key: PRNGKey, input_shapes): _, init_params = module.init_by_shape(key, input_shapes) return nn.Model(module, init_params) def", "* jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng)", "= mu + random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu, log_sig) pi", "nn.LayerNorm(q1) q1 = nn.tanh(q1) q1 = nn.Dense(q1, features=500) q1 = nn.elu(q1) q1 =", "q1 = nn.elu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1 q2 =", "return start_value * jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value) _, init_params", "= nn.Dense(x, features=200) x = nn.LayerNorm(x) x = nn.tanh(x) x = nn.Dense(x, features=200)", "state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=256) q1 = nn.relu(q1) q1", "Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng) return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng):", "nn.relu(q2) q2 = nn.Dense(q2, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=1) return", "nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes)", "x, action_dim, max_action): x = nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x,", "q1 = nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1) q1 = nn.tanh(q1) q1 = nn.Dense(q1,", "action_dim, max_action, key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ): x = nn.Dense(x, features=200) x", "features=500) q1 = nn.LayerNorm(q1) q1 = nn.tanh(q1) q1 = nn.Dense(q1, features=500) q1 =", "_, init_params = constant.init(init_rng) return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor", "def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action,", "as onp from typing import List, Tuple from utils import gaussian_likelihood class TD3Actor(nn.Module):", "_, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng): critic =", "+ 1e-6), axis=1) return max_action * pi, log_pi class Constant(nn.Module): def apply(self, start_value,", "log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO: return mu, log_sig if not sample:", "= TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_model(module: nn.Module,", "def apply( self, x, action_dim, max_action, key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ): x", "return max_action * nn.tanh(x) class TD3Critic(nn.Module): def apply(self, state, action, Q1=False): state_action =", "= nn.tanh(q1) q1 = nn.Dense(q1, features=500) q1 = nn.elu(q1) q1 = nn.Dense(q1, features=1)", "= nn.relu(q2) q2 = nn.Dense(q2, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=1)", "return max_action * nn.tanh(mu), log_sig else: pi = mu + random.normal(key, mu.shape) *", "jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng) return", "x = nn.relu(x) x = nn.Dense(x, features=action_dim) return max_action * nn.tanh(x) class TD3Critic(nn.Module):", "= jnp.split(x, 2, axis=-1) log_sig = nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max) if", "= constant.init(init_rng) return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim,", "q2 = nn.LayerNorm(q2) q2 = nn.tanh(q2) q2 = nn.Dense(q2, features=500) q2 = nn.elu(q2)", "init_rng): critic = TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def", "Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1)", "nn.Dense(q2, features=1) return q1, q2 class DoubleCritic(nn.Module): def apply(self, state, action, Q1=False): state_action", "from haiku._src.typing import PRNGKey from jax import random import jax.numpy as jnp import", "= jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO: return mu, log_sig if not sample: return", "q1 = nn.relu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1 q2 =", "features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class DoubleCritic(nn.Module):", "q1 = nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=500) q2", "= nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=256) q2 =", "return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _,", "q2 = nn.Dense(q2, features=1) return q1, q2 class DoubleCritic(nn.Module): def apply(self, state, action,", "log_sig if not sample: return max_action * nn.tanh(mu), log_sig else: pi = mu", "nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes)", "q1 = nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=256) q2", "= actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial() _,", "init_params = module.init_by_shape(key, input_shapes) return nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial()", "log_sig_min, log_sig_max) if MPO: return mu, log_sig if not sample: return max_action *", "q2 = nn.elu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class GaussianPolicy(nn.Module): def", "DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action,", "nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO: return mu, log_sig if not", "nn.Dense(q1, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1", "def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng,", "nn.relu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=256)", "apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=500)", "* nn.tanh(x) class TD3Critic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action],", "gaussian_likelihood(pi, mu, log_sig) pi = nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi ** 2)", "critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_model(module: nn.Module, key: PRNGKey, input_shapes): _, init_params", "Q1: return q1 q2 = nn.Dense(state_action, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2,", "log_sig_min=-20, log_sig_max=2, ): x = nn.Dense(x, features=200) x = nn.LayerNorm(x) x = nn.tanh(x)", "return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action) _,", "if MPO: return mu, log_sig if not sample: return max_action * nn.tanh(mu), log_sig", "init_params = constant.init(init_rng) return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor =", "import jax.numpy as jnp import numpy as onp from typing import List, Tuple", "haiku._src.typing import PRNGKey from jax import random import jax.numpy as jnp import numpy", "nn.tanh(q1) q1 = nn.Dense(q1, features=500) q1 = nn.elu(q1) q1 = nn.Dense(q1, features=1) if", "features=1) return q1, q2 class DoubleCritic(nn.Module): def apply(self, state, action, Q1=False): state_action =", "= nn.relu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action,", "pi = nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi ** 2) + 1e-6), axis=1)", "x = nn.Dense(x, features=200) x = nn.elu(x) x = nn.Dense(x, features=2 * action_dim)", "= nn.Dense(x, features=200) x = nn.elu(x) x = nn.Dense(x, features=2 * action_dim) mu,", "pi ** 2) + 1e-6), axis=1) return max_action * pi, log_pi class Constant(nn.Module):", "= nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1) q1 = nn.tanh(q1) q1 = nn.Dense(q1, features=500)", "= nn.LayerNorm(x) x = nn.tanh(x) x = nn.Dense(x, features=200) x = nn.elu(x) x", "import random import jax.numpy as jnp import numpy as onp from typing import", "q1 = nn.Dense(q1, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=1) if Q1:", "= nn.elu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action,", "= nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=500) q2 =", "mu, log_sig if not sample: return max_action * nn.tanh(mu), log_sig else: pi =", "jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO: return mu, log_sig if not sample: return max_action", "= nn.elu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class GaussianPolicy(nn.Module): def apply(", "action_dim, max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return", "nn.LayerNorm(x) x = nn.tanh(x) x = nn.Dense(x, features=200) x = nn.elu(x) x =", "return mu, log_sig if not sample: return max_action * nn.tanh(mu), log_sig else: pi", "nn.tanh(q2) q2 = nn.Dense(q2, features=500) q2 = nn.elu(q2) q2 = nn.Dense(q2, features=1) return", "features=500) q1 = nn.elu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1 q2", "flax import nn import jax from haiku._src.typing import PRNGKey from jax import random", "numpy as onp from typing import List, Tuple from utils import gaussian_likelihood class", "= nn.Dense(q1, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=1) if Q1: return", "= DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim,", "= nn.relu(x) x = nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=action_dim)", "features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1 q2", "nn.Dense(x, features=200) x = nn.LayerNorm(x) x = nn.tanh(x) x = nn.Dense(x, features=200) x", "nn.LayerNorm(q2) q2 = nn.tanh(q2) q2 = nn.Dense(q2, features=500) q2 = nn.elu(q2) q2 =", "state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1) q1", "x = nn.Dense(x, features=action_dim) return max_action * nn.tanh(x) class TD3Critic(nn.Module): def apply(self, state,", "= nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO: return mu, log_sig if", "action], axis=1) q1 = nn.Dense(state_action, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=256)", "features=action_dim) return max_action * nn.tanh(x) class TD3Critic(nn.Module): def apply(self, state, action, Q1=False): state_action", "log_pi class Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32): value = self.param(\"value\", (1,), nn.initializers.ones) return", "jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1,", "mu + random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu, log_sig) pi =", "class GaussianPolicy(nn.Module): def apply( self, x, action_dim, max_action, key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2,", "init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_model(module: nn.Module, key: PRNGKey, input_shapes):", "nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params", "q2 = nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2) q2 = nn.tanh(q2) q2 = nn.Dense(q2,", "action_dim) mu, log_sig = jnp.split(x, 2, axis=-1) log_sig = nn.softplus(log_sig) log_sig = jnp.clip(log_sig,", "onp from typing import List, Tuple from utils import gaussian_likelihood class TD3Actor(nn.Module): def", "TD3Critic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 =", "x = nn.tanh(x) x = nn.Dense(x, features=200) x = nn.elu(x) x = nn.Dense(x,", "= nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi ** 2) + 1e-6), axis=1) return", "build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params)", "q2 = nn.Dense(q2, features=500) q2 = nn.elu(q2) q2 = nn.Dense(q2, features=1) return q1,", "import gaussian_likelihood class TD3Actor(nn.Module): def apply(self, x, action_dim, max_action): x = nn.Dense(x, features=256)", "= Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng) return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim, max_action,", "PRNGKey, input_shapes): _, init_params = module.init_by_shape(key, input_shapes) return nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng):", "init_params) def build_model(module: nn.Module, key: PRNGKey, input_shapes): _, init_params = module.init_by_shape(key, input_shapes) return", "axis=-1) log_sig = nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO: return mu,", "build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes)", "actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params) def", "apply(self, x, action_dim, max_action): x = nn.Dense(x, features=256) x = nn.relu(x) x =", "TD3Actor(nn.Module): def apply(self, x, action_dim, max_action): x = nn.Dense(x, features=256) x = nn.relu(x)", "x = nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=action_dim) return max_action", "= TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes,", "nn.Dense(x, features=200) x = nn.elu(x) x = nn.Dense(x, features=2 * action_dim) mu, log_sig", "mu, log_sig = jnp.split(x, 2, axis=-1) log_sig = nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min,", "import nn import jax from haiku._src.typing import PRNGKey from jax import random import", "jnp import numpy as onp from typing import List, Tuple from utils import", "nn.elu(x) x = nn.Dense(x, features=2 * action_dim) mu, log_sig = jnp.split(x, 2, axis=-1)", "features=200) x = nn.elu(x) x = nn.Dense(x, features=2 * action_dim) mu, log_sig =", "else: pi = mu + random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu,", "init_rng): constant = Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng) return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes,", "(1,), nn.initializers.ones) return start_value * jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value)", "nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2) q2 = nn.tanh(q2) q2 = nn.Dense(q2, features=500) q2", "sample=False, log_sig_min=-20, log_sig_max=2, ): x = nn.Dense(x, features=200) x = nn.LayerNorm(x) x =", "= nn.Dense(q2, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=1) return q1, q2", "build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng) return nn.Model(constant, init_params) def", "x = nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=256) x =", "return q1 q2 = nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2) q2 = nn.tanh(q2) q2", "2, axis=-1) log_sig = nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO: return", "q2 class GaussianPolicy(nn.Module): def apply( self, x, action_dim, max_action, key=None, MPO=False, sample=False, log_sig_min=-20,", "return q1 q2 = nn.Dense(state_action, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=256)", "= nn.LayerNorm(q1) q1 = nn.tanh(q1) q1 = nn.Dense(q1, features=500) q1 = nn.elu(q1) q1", "jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1) q1 = nn.tanh(q1)", "TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng):", "nn.Dense(q1, features=500) q1 = nn.elu(q1) q1 = nn.Dense(q1, features=1) if Q1: return q1", "return q1, q2 class DoubleCritic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state,", "nn.Dense(q1, features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2)", "max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor,", "nn.elu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class GaussianPolicy(nn.Module): def apply( self,", "class Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32): value = self.param(\"value\", (1,), nn.initializers.ones) return start_value", "nn.relu(x) x = nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=action_dim) return", "_, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_model(module: nn.Module, key: PRNGKey,", "DoubleCritic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 =", "def build_model(module: nn.Module, key: PRNGKey, input_shapes): _, init_params = module.init_by_shape(key, input_shapes) return nn.Model(module,", "= nn.relu(x) x = nn.Dense(x, features=action_dim) return max_action * nn.tanh(x) class TD3Critic(nn.Module): def", "action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=256) q1 =", "start_value, dtype=jnp.float32): value = self.param(\"value\", (1,), nn.initializers.ones) return start_value * jnp.asarray(value, dtype) def", "from utils import gaussian_likelihood class TD3Actor(nn.Module): def apply(self, x, action_dim, max_action): x =", "nn.initializers.ones) return start_value * jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng): constant = Constant.partial(start_value=start_value) _,", "nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1) q1 = nn.tanh(q1) q1 = nn.Dense(q1, features=500) q1", "MPO: return mu, log_sig if not sample: return max_action * nn.tanh(mu), log_sig else:", "): x = nn.Dense(x, features=200) x = nn.LayerNorm(x) x = nn.tanh(x) x =", "utils import gaussian_likelihood class TD3Actor(nn.Module): def apply(self, x, action_dim, max_action): x = nn.Dense(x,", "state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=256) q1", "return max_action * pi, log_pi class Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32): value =", "return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng,", "Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32): value = self.param(\"value\", (1,), nn.initializers.ones) return start_value *", "class TD3Critic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1", "q1 q2 = nn.Dense(state_action, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=256) q2", "nn.Dense(state_action, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=256) q2 = nn.relu(q2) q2", "dtype=jnp.float32): value = self.param(\"value\", (1,), nn.initializers.ones) return start_value * jnp.asarray(value, dtype) def build_constant_model(start_value,", "jnp.split(x, 2, axis=-1) log_sig = nn.softplus(log_sig) log_sig = jnp.clip(log_sig, log_sig_min, log_sig_max) if MPO:", "critic = DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes,", "= nn.tanh(x) x = nn.Dense(x, features=200) x = nn.elu(x) x = nn.Dense(x, features=2", "features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2) q2", "action_dim, max_action): x = nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=256)", "= nn.Dense(state_action, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=256) q1 = nn.relu(q1)", "axis=1) q1 = nn.Dense(state_action, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=256) q1", "max_action, key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ): x = nn.Dense(x, features=200) x =", "jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu, log_sig) pi = nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 -", "* pi, log_pi class Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32): value = self.param(\"value\", (1,),", "q2 = nn.relu(q2) q2 = nn.Dense(q2, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2,", "init_params) def build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return", "input_shapes) return nn.Model(critic, init_params) def build_model(module: nn.Module, key: PRNGKey, input_shapes): _, init_params =", "* jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu, log_sig) pi = nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1", "log_sig_max) if MPO: return mu, log_sig if not sample: return max_action * nn.tanh(mu),", "= critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_model(module: nn.Module, key: PRNGKey, input_shapes): _,", "apply(self, start_value, dtype=jnp.float32): value = self.param(\"value\", (1,), nn.initializers.ones) return start_value * jnp.asarray(value, dtype)", "axis=1) return max_action * pi, log_pi class Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32): value", "input_shapes) return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial() _, init_params =", "q2 = nn.tanh(q2) q2 = nn.Dense(q2, features=500) q2 = nn.elu(q2) q2 = nn.Dense(q2,", "q2 class DoubleCritic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1)", "x = nn.Dense(x, features=2 * action_dim) mu, log_sig = jnp.split(x, 2, axis=-1) log_sig", "features=500) q2 = nn.LayerNorm(q2) q2 = nn.tanh(q2) q2 = nn.Dense(q2, features=500) q2 =", "q1 = nn.Dense(state_action, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=256) q1 =", "= jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=500) q1 = nn.LayerNorm(q1) q1 =", "build_model(module: nn.Module, key: PRNGKey, input_shapes): _, init_params = module.init_by_shape(key, input_shapes) return nn.Model(module, init_params)", "nn.relu(x) x = nn.Dense(x, features=action_dim) return max_action * nn.tanh(x) class TD3Critic(nn.Module): def apply(self,", "gaussian_likelihood class TD3Actor(nn.Module): def apply(self, x, action_dim, max_action): x = nn.Dense(x, features=256) x", "as jnp import numpy as onp from typing import List, Tuple from utils", "q1 = nn.relu(q1) q1 = nn.Dense(q1, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1,", "nn.Dense(q2, features=500) q2 = nn.elu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class", "init_rng): critic = DoubleCritic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def", "action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1 = nn.Dense(state_action, features=500) q1 =", "* nn.tanh(mu), log_sig else: pi = mu + random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi", "x = nn.elu(x) x = nn.Dense(x, features=2 * action_dim) mu, log_sig = jnp.split(x,", "q2 = nn.Dense(q2, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=1) return q1,", "features=500) q2 = nn.elu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class GaussianPolicy(nn.Module):", "self, x, action_dim, max_action, key=None, MPO=False, sample=False, log_sig_min=-20, log_sig_max=2, ): x = nn.Dense(x,", "2) + 1e-6), axis=1) return max_action * pi, log_pi class Constant(nn.Module): def apply(self,", "constant = Constant.partial(start_value=start_value) _, init_params = constant.init(init_rng) return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim,", "log_sig_max=2, ): x = nn.Dense(x, features=200) x = nn.LayerNorm(x) x = nn.tanh(x) x", "** 2) + 1e-6), axis=1) return max_action * pi, log_pi class Constant(nn.Module): def", "log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi ** 2) + 1e-6), axis=1) return max_action *", "from jax import random import jax.numpy as jnp import numpy as onp from", "import List, Tuple from utils import gaussian_likelihood class TD3Actor(nn.Module): def apply(self, x, action_dim,", "max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng): critic", "= self.param(\"value\", (1,), nn.initializers.ones) return start_value * jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng): constant", "q1 = nn.Dense(q1, features=500) q1 = nn.elu(q1) q1 = nn.Dense(q1, features=1) if Q1:", "log_pi = gaussian_likelihood(pi, mu, log_sig) pi = nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi", "q2 = nn.Dense(q2, features=1) return q1, q2 class GaussianPolicy(nn.Module): def apply( self, x,", "q1, q2 class GaussianPolicy(nn.Module): def apply( self, x, action_dim, max_action, key=None, MPO=False, sample=False,", "max_action * pi, log_pi class Constant(nn.Module): def apply(self, start_value, dtype=jnp.float32): value = self.param(\"value\",", "_, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng):", "nn.Dense(x, features=2 * action_dim) mu, log_sig = jnp.split(x, 2, axis=-1) log_sig = nn.softplus(log_sig)", "- pi ** 2) + 1e-6), axis=1) return max_action * pi, log_pi class", "= nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2) q2 = nn.tanh(q2) q2 = nn.Dense(q2, features=500)", "mu, log_sig) pi = nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi ** 2) +", "log_sig) pi = nn.tanh(pi) log_pi -= jnp.sum(jnp.log(nn.relu(1 - pi ** 2) + 1e-6),", "q1 = nn.LayerNorm(q1) q1 = nn.tanh(q1) q1 = nn.Dense(q1, features=500) q1 = nn.elu(q1)", "= nn.relu(q2) q2 = nn.Dense(q2, features=1) return q1, q2 class DoubleCritic(nn.Module): def apply(self,", "= nn.relu(q1) q1 = nn.Dense(q1, features=256) q1 = nn.relu(q1) q1 = nn.Dense(q1, features=1)", "TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_model(module: nn.Module, key:", "init_params = actor.init_by_shape(init_rng, input_shapes) return nn.Model(actor, init_params) def build_td3_critic_model(input_shapes, init_rng): critic = TD3Critic.partial()", "features=256) x = nn.relu(x) x = nn.Dense(x, features=256) x = nn.relu(x) x =", "= module.init_by_shape(key, input_shapes) return nn.Model(module, init_params) def build_double_critic_model(input_shapes, init_rng): critic = DoubleCritic.partial() _,", "def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng,", "nn.Dense(x, features=action_dim) return max_action * nn.tanh(x) class TD3Critic(nn.Module): def apply(self, state, action, Q1=False):", "action_dim, max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes) return", "critic = TD3Critic.partial() _, init_params = critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_model(module:", "= nn.Dense(state_action, features=256) q2 = nn.relu(q2) q2 = nn.Dense(q2, features=256) q2 = nn.relu(q2)", "class DoubleCritic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1) q1", "critic.init_by_shape(init_rng, input_shapes) return nn.Model(critic, init_params) def build_gaussian_policy_model(input_shapes, action_dim, max_action, init_rng): actor = GaussianPolicy.partial(action_dim=action_dim,", "q1, q2 class DoubleCritic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action],", "self.param(\"value\", (1,), nn.initializers.ones) return start_value * jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng): constant =", "q1 q2 = nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2) q2 = nn.tanh(q2) q2 =", "value = self.param(\"value\", (1,), nn.initializers.ones) return start_value * jnp.asarray(value, dtype) def build_constant_model(start_value, init_rng):", "max_action * nn.tanh(mu), log_sig else: pi = mu + random.normal(key, mu.shape) * jnp.exp(log_sig)", "features=1) if Q1: return q1 q2 = nn.Dense(state_action, features=256) q2 = nn.relu(q2) q2", "PRNGKey from jax import random import jax.numpy as jnp import numpy as onp", "max_action): x = nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=256) x", "= nn.Dense(x, features=256) x = nn.relu(x) x = nn.Dense(x, features=256) x = nn.relu(x)", "nn.Dense(q2, features=1) return q1, q2 class GaussianPolicy(nn.Module): def apply( self, x, action_dim, max_action,", "random.normal(key, mu.shape) * jnp.exp(log_sig) log_pi = gaussian_likelihood(pi, mu, log_sig) pi = nn.tanh(pi) log_pi", "if Q1: return q1 q2 = nn.Dense(state_action, features=500) q2 = nn.LayerNorm(q2) q2 =", "= nn.tanh(q2) q2 = nn.Dense(q2, features=500) q2 = nn.elu(q2) q2 = nn.Dense(q2, features=1)", "= nn.Dense(q2, features=1) return q1, q2 class DoubleCritic(nn.Module): def apply(self, state, action, Q1=False):", "constant.init(init_rng) return nn.Model(constant, init_params) def build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action)", "* action_dim) mu, log_sig = jnp.split(x, 2, axis=-1) log_sig = nn.softplus(log_sig) log_sig =", "nn.tanh(x) class TD3Critic(nn.Module): def apply(self, state, action, Q1=False): state_action = jnp.concatenate([state, action], axis=1)", "Tuple from utils import gaussian_likelihood class TD3Actor(nn.Module): def apply(self, x, action_dim, max_action): x", "build_td3_actor_model(input_shapes, action_dim, max_action, init_rng): actor = TD3Actor.partial(action_dim=action_dim, max_action=max_action) _, init_params = actor.init_by_shape(init_rng, input_shapes)" ]
[ "设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 # list_filter是性能杀手,尽量不要开启 # list_filter = ('user', 'post',) # 开始没有加id进入list_display导致第一项title没法编辑,出现以上问题,后来加了个id在前面解决", "search_fields = ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 # list_filter是性能杀手,尽量不要开启 # list_filter", "import admin from bootcamp.authentication.models import Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 #", "'url','job_title',) # search_fields = ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 # list_filter是性能杀手,尽量不要开启", "ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 # list_display = ('id','user','location', 'url','job_title',) # search_fields = ('user','location',", "list_display = ('id','user','location', 'url','job_title',) # search_fields = ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page =", "会出错,暂时不管 # list_display = ('id','user','location', 'url','job_title',) # search_fields = ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能", "from django.contrib import admin from bootcamp.authentication.models import Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags", "= ('id','user','location', 'url','job_title',) # search_fields = ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10", "Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 # list_display = ('id','user','location', 'url','job_title',) #", "# 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 # list_filter是性能杀手,尽量不要开启 # list_filter = ('user', 'post',) #", "bootcamp.authentication.models import Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 # list_display = ('id','user','location',", "# list_filter是性能杀手,尽量不要开启 # list_filter = ('user', 'post',) # 开始没有加id进入list_display导致第一项title没法编辑,出现以上问题,后来加了个id在前面解决 # list_editable = ('user','location',", "from bootcamp.authentication.models import Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 # list_display =", "('id','user','location', 'url','job_title',) # search_fields = ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 #", "@admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 # list_display = ('id','user','location', 'url','job_title',) # search_fields", "= 10 # list_filter是性能杀手,尽量不要开启 # list_filter = ('user', 'post',) # 开始没有加id进入list_display导致第一项title没法编辑,出现以上问题,后来加了个id在前面解决 # list_editable", "admin from bootcamp.authentication.models import Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 # list_display", "list_per_page = 10 # list_filter是性能杀手,尽量不要开启 # list_filter = ('user', 'post',) # 开始没有加id进入list_display导致第一项title没法编辑,出现以上问题,后来加了个id在前面解决 #", "class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 # list_display = ('id','user','location', 'url','job_title',) # search_fields =", "('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 # list_filter是性能杀手,尽量不要开启 # list_filter = ('user',", "<reponame>ChowBu/bootcamp from django.contrib import admin from bootcamp.authentication.models import Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): #", "# search_fields = ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 # list_filter是性能杀手,尽量不要开启 #", "# list_display = ('id','user','location', 'url','job_title',) # search_fields = ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page", "list_filter是性能杀手,尽量不要开启 # list_filter = ('user', 'post',) # 开始没有加id进入list_display导致第一项title没法编辑,出现以上问题,后来加了个id在前面解决 # list_editable = ('user','location', 'url','job_title',)", "# 不能添加tags 会出错,暂时不管 # list_display = ('id','user','location', 'url','job_title',) # search_fields = ('user','location', 'url','job_title',)", "不能添加tags 会出错,暂时不管 # list_display = ('id','user','location', 'url','job_title',) # search_fields = ('user','location', 'url','job_title',) #", "10 # list_filter是性能杀手,尽量不要开启 # list_filter = ('user', 'post',) # 开始没有加id进入list_display导致第一项title没法编辑,出现以上问题,后来加了个id在前面解决 # list_editable =", "= ('user','location', 'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 # list_filter是性能杀手,尽量不要开启 # list_filter =", "import Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管 # list_display = ('id','user','location', 'url','job_title',)", "'url','job_title',) # 设置列表页展示条目数比较小,可提高打开列表页性能 list_per_page = 10 # list_filter是性能杀手,尽量不要开启 # list_filter = ('user', 'post',)", "django.contrib import admin from bootcamp.authentication.models import Profile @admin.register(Profile) class ProfileAdmin(admin.ModelAdmin): # 不能添加tags 会出错,暂时不管" ]
[ "anagram class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod", "import join import anagram.anagram as anagram class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser =", "an empty output \"\"\" args = ['--input', 'tests/samples/empty'] self.assertEquals(anagram.main(self.parser.parse_args(args)), 'Input dataset is empty')", "\"\"\" args = ['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test an", "Test an empty output \"\"\" args = ['--input', 'tests/samples/empty'] self.assertEquals(anagram.main(self.parser.parse_args(args)), 'Input dataset is", "errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\" Test read from non-existant file \"\"\" args =", "errno import shutil from os.path import join import anagram.anagram as anagram class TestAnagram(unittest.TestCase):", "anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test an empty output \"\"\" args = ['--input', 'tests/samples/empty']", "test_negative_threshold(self): \"\"\" Test an negative value for the character threshold \"\"\" args =", "from non-existant file \"\"\" args = ['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self):", "= anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod def tearDownClass(cls): try: shutil.rmtree('path') except OSError as", "cls.mock_path = '/path/to/folder' @classmethod def tearDownClass(cls): try: shutil.rmtree('path') except OSError as exc: if", "if exc.errno != errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\" Test read from non-existant file", "from os.path import join import anagram.anagram as anagram class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls):", "read from non-existant file \"\"\" args = ['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def", "character threshold \"\"\" args = ['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\"", "unittest import errno import shutil from os.path import join import anagram.anagram as anagram", "setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod def tearDownClass(cls): try: shutil.rmtree('path') except", "\"\"\" args = ['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test an", "threshold \"\"\" args = ['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test", "= ['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test an empty output", "anagram\"\"\" import unittest import errno import shutil from os.path import join import anagram.anagram", "License: MIT \"\"\" Tests for anagram\"\"\" import unittest import errno import shutil from", "@classmethod def tearDownClass(cls): try: shutil.rmtree('path') except OSError as exc: if exc.errno != errno.ENOENT:", "Tests for anagram\"\"\" import unittest import errno import shutil from os.path import join", "the character threshold \"\"\" args = ['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self):", "'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test an negative value for the", "exc: if exc.errno != errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\" Test read from non-existant", "def tearDownClass(cls): try: shutil.rmtree('path') except OSError as exc: if exc.errno != errno.ENOENT: raise", "non-existant file \"\"\" args = ['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\"", "# Anagram Utility # License: MIT \"\"\" Tests for anagram\"\"\" import unittest import", "import errno import shutil from os.path import join import anagram.anagram as anagram class", "class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod def", "= '/path/to/folder' @classmethod def tearDownClass(cls): try: shutil.rmtree('path') except OSError as exc: if exc.errno", "with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test an empty output \"\"\" args =", "an negative value for the character threshold \"\"\" args = ['--count', '-1'] with", "anagram.anagram as anagram class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path =", "def test_negative_threshold(self): \"\"\" Test an negative value for the character threshold \"\"\" args", "cls.parser = anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod def tearDownClass(cls): try: shutil.rmtree('path') except OSError", "Utility # License: MIT \"\"\" Tests for anagram\"\"\" import unittest import errno import", "import anagram.anagram as anagram class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path", "Test an negative value for the character threshold \"\"\" args = ['--count', '-1']", "\"\"\" Test an empty output \"\"\" args = ['--input', 'tests/samples/empty'] self.assertEquals(anagram.main(self.parser.parse_args(args)), 'Input dataset", "except OSError as exc: if exc.errno != errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\" Test", "!= errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\" Test read from non-existant file \"\"\" args", "tearDownClass(cls): try: shutil.rmtree('path') except OSError as exc: if exc.errno != errno.ENOENT: raise def", "Test read from non-existant file \"\"\" args = ['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args))", "anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod def tearDownClass(cls): try: shutil.rmtree('path') except OSError as exc:", "self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test an negative value for the character threshold", "negative value for the character threshold \"\"\" args = ['--count', '-1'] with self.assertRaises(ValueError):", "shutil.rmtree('path') except OSError as exc: if exc.errno != errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\"", "raise def test_incorrect_dict_path(self): \"\"\" Test read from non-existant file \"\"\" args = ['--input',", "@classmethod def setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod def tearDownClass(cls): try:", "def setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod def tearDownClass(cls): try: shutil.rmtree('path')", "import unittest import errno import shutil from os.path import join import anagram.anagram as", "try: shutil.rmtree('path') except OSError as exc: if exc.errno != errno.ENOENT: raise def test_incorrect_dict_path(self):", "\"\"\" Test read from non-existant file \"\"\" args = ['--input', 'tests/samples/test'] with self.assertRaises(IOError):", "['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test an negative value for", "value for the character threshold \"\"\" args = ['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args))", "args = ['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test an empty", "exc.errno != errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\" Test read from non-existant file \"\"\"", "shutil from os.path import join import anagram.anagram as anagram class TestAnagram(unittest.TestCase): @classmethod def", "# License: MIT \"\"\" Tests for anagram\"\"\" import unittest import errno import shutil", "args = ['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test an negative", "'/path/to/folder' @classmethod def tearDownClass(cls): try: shutil.rmtree('path') except OSError as exc: if exc.errno !=", "def test_empty_output(self): \"\"\" Test an empty output \"\"\" args = ['--input', 'tests/samples/empty'] self.assertEquals(anagram.main(self.parser.parse_args(args)),", "with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test an negative value for the character", "def test_incorrect_dict_path(self): \"\"\" Test read from non-existant file \"\"\" args = ['--input', 'tests/samples/test']", "test_empty_output(self): \"\"\" Test an empty output \"\"\" args = ['--input', 'tests/samples/empty'] self.assertEquals(anagram.main(self.parser.parse_args(args)), 'Input", "MIT \"\"\" Tests for anagram\"\"\" import unittest import errno import shutil from os.path", "file \"\"\" args = ['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test", "= ['--input', 'tests/samples/test'] with self.assertRaises(IOError): anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test an negative value", "join import anagram.anagram as anagram class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = anagram.args_options()", "['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test an empty output \"\"\"", "anagram.main(self.parser.parse_args(args)) def test_negative_threshold(self): \"\"\" Test an negative value for the character threshold \"\"\"", "Anagram Utility # License: MIT \"\"\" Tests for anagram\"\"\" import unittest import errno", "\"\"\" Tests for anagram\"\"\" import unittest import errno import shutil from os.path import", "OSError as exc: if exc.errno != errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\" Test read", "'-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test an empty output \"\"\" args", "\"\"\" Test an negative value for the character threshold \"\"\" args = ['--count',", "as anagram class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path = '/path/to/folder'", "for the character threshold \"\"\" args = ['--count', '-1'] with self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def", "import shutil from os.path import join import anagram.anagram as anagram class TestAnagram(unittest.TestCase): @classmethod", "for anagram\"\"\" import unittest import errno import shutil from os.path import join import", "as exc: if exc.errno != errno.ENOENT: raise def test_incorrect_dict_path(self): \"\"\" Test read from", "TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = anagram.args_options() cls.mock_path = '/path/to/folder' @classmethod def tearDownClass(cls):", "os.path import join import anagram.anagram as anagram class TestAnagram(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser", "test_incorrect_dict_path(self): \"\"\" Test read from non-existant file \"\"\" args = ['--input', 'tests/samples/test'] with", "self.assertRaises(ValueError): anagram.main(self.parser.parse_args(args)) def test_empty_output(self): \"\"\" Test an empty output \"\"\" args = ['--input'," ]
[ "src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto build rules for ninja \"\"\"", "build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\" Return configured AndroidKernel class \"\"\" return", "build\" return [os.path.join(self.build_dir, t.as_str) for t in self.conf[\"target_images\"]] def capture_state(self): \"\"\" This method", "[x.as_str for x in env_node] else: env_values = [] env = \" \".join(env_values)", "in env_node] else: env_values = [] env = \" \".join(env_values) variables = {", "variables=variables) self.generator.newline() return targets def get_targets(self): \"Return list of targets that are generated", "for given Android build configuration \"\"\" def __init__(self, conf: YamlValue, name: str, build_dir:", "env_node = self.conf.get(\"env\", None) if env_node: env_values = [x.as_str for x in env_node]", "for a reproducible builds. Luckily, there is nothing to do, as Android state", "src_stamps self.build_dir = build_dir def gen_build(self): \"\"\"Generate ninja rules to build AOSP\"\"\" env_node", "for t in self.conf[\"target_images\"]] def capture_state(self): \"\"\" This method should capture Android Kernel", "rules to build AOSP\"\"\" env_node = self.conf.get(\"env\", None) if env_node: env_values = [x.as_str", "&& \".join([ \"export $env\", \"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke", "self.build_dir, \"env\": env, } targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return", "from moulin.yaml_wrapper import YamlValue from moulin import ninja_syntax def get_builder(conf: YamlValue, name: str,", "]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke Android Kernel build script\", pool=\"console\") generator.newline() class", "capture_state(self): \"\"\" This method should capture Android Kernel state for a reproducible builds.", "build_dir def gen_build(self): \"\"\"Generate ninja rules to build AOSP\"\"\" env_node = self.conf.get(\"env\", None)", "AndroidKernel(conf, name, build_dir, src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto build rules", "to build AOSP\"\"\" env_node = self.conf.get(\"env\", None) if env_node: env_values = [x.as_str for", "self.conf.get(\"env\", None) if env_node: env_values = [x.as_str for x in env_node] else: env_values", "List[str], generator: ninja_syntax.Writer): \"\"\" Return configured AndroidKernel class \"\"\" return AndroidKernel(conf, name, build_dir,", "nothing to do, as Android state is controlled solely by its repo state.", "= src_stamps self.build_dir = build_dir def gen_build(self): \"\"\"Generate ninja rules to build AOSP\"\"\"", "name self.generator = generator self.src_stamps = src_stamps self.build_dir = build_dir def gen_build(self): \"\"\"Generate", "Generate yocto build rules for ninja \"\"\" cmd = \" && \".join([ \"export", "2021 EPAM Systems \"\"\" Android kernel builder module \"\"\" import os.path from typing", "build script\", pool=\"console\") generator.newline() class AndroidKernel: \"\"\" AndroidBuilder class generates Ninja rules for", "as Android state is controlled solely by its repo state. And repo state", "by its repo state. And repo state is captured by repo fetcher code.", "moulin import ninja_syntax def get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator:", "kernel builder module \"\"\" import os.path from typing import List from moulin.yaml_wrapper import", "self.name = name self.generator = generator self.src_stamps = src_stamps self.build_dir = build_dir def", "env_values = [] env = \" \".join(env_values) variables = { \"build_dir\": self.build_dir, \"env\":", "Luckily, there is nothing to do, as Android state is controlled solely by", "generator: ninja_syntax.Writer): \"\"\" Return configured AndroidKernel class \"\"\" return AndroidKernel(conf, name, build_dir, src_stamps,", "gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto build rules for ninja \"\"\" cmd = \"", "method should capture Android Kernel state for a reproducible builds. Luckily, there is", "{ \"build_dir\": self.build_dir, \"env\": env, } targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables)", "is controlled solely by its repo state. And repo state is captured by", "in self.conf[\"target_images\"]] def capture_state(self): \"\"\" This method should capture Android Kernel state for", "typing import List from moulin.yaml_wrapper import YamlValue from moulin import ninja_syntax def get_builder(conf:", "EPAM Systems \"\"\" Android kernel builder module \"\"\" import os.path from typing import", "def gen_build(self): \"\"\"Generate ninja rules to build AOSP\"\"\" env_node = self.conf.get(\"env\", None) if", "if env_node: env_values = [x.as_str for x in env_node] else: env_values = []", "\"\"\" Android kernel builder module \"\"\" import os.path from typing import List from", "given Android build configuration \"\"\" def __init__(self, conf: YamlValue, name: str, build_dir: str,", "= self.conf.get(\"env\", None) if env_node: env_values = [x.as_str for x in env_node] else:", "str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): self.conf = conf self.name = name", "x in env_node] else: env_values = [] env = \" \".join(env_values) variables =", "= [] env = \" \".join(env_values) variables = { \"build_dir\": self.build_dir, \"env\": env,", "a reproducible builds. Luckily, there is nothing to do, as Android state is", "do, as Android state is controlled solely by its repo state. And repo", "build rules for ninja \"\"\" cmd = \" && \".join([ \"export $env\", \"cd", "env_node] else: env_values = [] env = \" \".join(env_values) variables = { \"build_dir\":", "module \"\"\" import os.path from typing import List from moulin.yaml_wrapper import YamlValue from", "class AndroidKernel: \"\"\" AndroidBuilder class generates Ninja rules for given Android build configuration", "builder module \"\"\" import os.path from typing import List from moulin.yaml_wrapper import YamlValue", "\"{cmd}\"', description=\"Invoke Android Kernel build script\", pool=\"console\") generator.newline() class AndroidKernel: \"\"\" AndroidBuilder class", "# Copyright 2021 EPAM Systems \"\"\" Android kernel builder module \"\"\" import os.path", "YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\" Return configured AndroidKernel", "def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto build rules for ninja \"\"\" cmd =", "ninja_syntax.Writer): \"\"\" Generate yocto build rules for ninja \"\"\" cmd = \" &&", "str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\" Return configured AndroidKernel class \"\"\"", "ninja \"\"\" cmd = \" && \".join([ \"export $env\", \"cd $build_dir\", \"build/build.sh\", ])", "= conf self.name = name self.generator = generator self.src_stamps = src_stamps self.build_dir =", "description=\"Invoke Android Kernel build script\", pool=\"console\") generator.newline() class AndroidKernel: \"\"\" AndroidBuilder class generates", "return targets def get_targets(self): \"Return list of targets that are generated by this", "its repo state. And repo state is captured by repo fetcher code. \"\"\"", "\"\"\" import os.path from typing import List from moulin.yaml_wrapper import YamlValue from moulin", "import YamlValue from moulin import ninja_syntax def get_builder(conf: YamlValue, name: str, build_dir: str,", "\"\"\" Generate yocto build rules for ninja \"\"\" cmd = \" && \".join([", "Kernel state for a reproducible builds. Luckily, there is nothing to do, as", "rules for given Android build configuration \"\"\" def __init__(self, conf: YamlValue, name: str,", "state for a reproducible builds. Luckily, there is nothing to do, as Android", "-c \"{cmd}\"', description=\"Invoke Android Kernel build script\", pool=\"console\") generator.newline() class AndroidKernel: \"\"\" AndroidBuilder", "Kernel build script\", pool=\"console\") generator.newline() class AndroidKernel: \"\"\" AndroidBuilder class generates Ninja rules", "build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): self.conf = conf self.name = name self.generator", "rules for ninja \"\"\" cmd = \" && \".join([ \"export $env\", \"cd $build_dir\",", "configured AndroidKernel class \"\"\" return AndroidKernel(conf, name, build_dir, src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer):", "class generates Ninja rules for given Android build configuration \"\"\" def __init__(self, conf:", "are generated by this build\" return [os.path.join(self.build_dir, t.as_str) for t in self.conf[\"target_images\"]] def", "$env\", \"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke Android Kernel build", "Android Kernel build script\", pool=\"console\") generator.newline() class AndroidKernel: \"\"\" AndroidBuilder class generates Ninja", "List from moulin.yaml_wrapper import YamlValue from moulin import ninja_syntax def get_builder(conf: YamlValue, name:", "name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): self.conf = conf self.name =", "\"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return targets def get_targets(self): \"Return list of targets that", "AndroidKernel class \"\"\" return AndroidKernel(conf, name, build_dir, src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\"", "t in self.conf[\"target_images\"]] def capture_state(self): \"\"\" This method should capture Android Kernel state", "\"\"\"Generate ninja rules to build AOSP\"\"\" env_node = self.conf.get(\"env\", None) if env_node: env_values", "\"env\": env, } targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return targets", "# SPDX-License-Identifier: Apache-2.0 # Copyright 2021 EPAM Systems \"\"\" Android kernel builder module", "AndroidKernel: \"\"\" AndroidBuilder class generates Ninja rules for given Android build configuration \"\"\"", "self.src_stamps, variables=variables) self.generator.newline() return targets def get_targets(self): \"Return list of targets that are", "import ninja_syntax def get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer):", "solely by its repo state. And repo state is captured by repo fetcher", "Android build configuration \"\"\" def __init__(self, conf: YamlValue, name: str, build_dir: str, src_stamps:", "def __init__(self, conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): self.conf", "class \"\"\" return AndroidKernel(conf, name, build_dir, src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate", "\".join([ \"export $env\", \"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke Android", "targets def get_targets(self): \"Return list of targets that are generated by this build\"", "\"\"\" AndroidBuilder class generates Ninja rules for given Android build configuration \"\"\" def", "builds. Luckily, there is nothing to do, as Android state is controlled solely", "targets that are generated by this build\" return [os.path.join(self.build_dir, t.as_str) for t in", "gen_build(self): \"\"\"Generate ninja rules to build AOSP\"\"\" env_node = self.conf.get(\"env\", None) if env_node:", "= build_dir def gen_build(self): \"\"\"Generate ninja rules to build AOSP\"\"\" env_node = self.conf.get(\"env\",", "else: env_values = [] env = \" \".join(env_values) variables = { \"build_dir\": self.build_dir,", "for ninja \"\"\" cmd = \" && \".join([ \"export $env\", \"cd $build_dir\", \"build/build.sh\",", "self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return targets def get_targets(self): \"Return list of", "Copyright 2021 EPAM Systems \"\"\" Android kernel builder module \"\"\" import os.path from", "generator.newline() class AndroidKernel: \"\"\" AndroidBuilder class generates Ninja rules for given Android build", "def get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\" Return", "YamlValue from moulin import ninja_syntax def get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps:", "\"\"\" Return configured AndroidKernel class \"\"\" return AndroidKernel(conf, name, build_dir, src_stamps, generator) def", "self.build_dir = build_dir def gen_build(self): \"\"\"Generate ninja rules to build AOSP\"\"\" env_node =", "\" && \".join([ \"export $env\", \"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"',", "for x in env_node] else: env_values = [] env = \" \".join(env_values) variables", "__init__(self, conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): self.conf =", "conf self.name = name self.generator = generator self.src_stamps = src_stamps self.build_dir = build_dir", "return [os.path.join(self.build_dir, t.as_str) for t in self.conf[\"target_images\"]] def capture_state(self): \"\"\" This method should", "should capture Android Kernel state for a reproducible builds. Luckily, there is nothing", "Apache-2.0 # Copyright 2021 EPAM Systems \"\"\" Android kernel builder module \"\"\" import", "name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\" Return configured AndroidKernel class", "from typing import List from moulin.yaml_wrapper import YamlValue from moulin import ninja_syntax def", "[] env = \" \".join(env_values) variables = { \"build_dir\": self.build_dir, \"env\": env, }", "= [x.as_str for x in env_node] else: env_values = [] env = \"", "\"\"\" This method should capture Android Kernel state for a reproducible builds. Luckily,", "configuration \"\"\" def __init__(self, conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator:", "str, src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\" Return configured AndroidKernel class \"\"\" return AndroidKernel(conf,", "there is nothing to do, as Android state is controlled solely by its", "= \" && \".join([ \"export $env\", \"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c", "script\", pool=\"console\") generator.newline() class AndroidKernel: \"\"\" AndroidBuilder class generates Ninja rules for given", "generator: ninja_syntax.Writer): self.conf = conf self.name = name self.generator = generator self.src_stamps =", "get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\" Return configured", "def get_targets(self): \"Return list of targets that are generated by this build\" return", "is nothing to do, as Android state is controlled solely by its repo", "os.path from typing import List from moulin.yaml_wrapper import YamlValue from moulin import ninja_syntax", "\" \".join(env_values) variables = { \"build_dir\": self.build_dir, \"env\": env, } targets = self.get_targets()", "variables = { \"build_dir\": self.build_dir, \"env\": env, } targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\",", "from moulin import ninja_syntax def get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps: List[str],", "YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): self.conf = conf self.name", "} targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return targets def get_targets(self):", "ninja_syntax def get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\"", "targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return targets def get_targets(self): \"Return", "= generator self.src_stamps = src_stamps self.build_dir = build_dir def gen_build(self): \"\"\"Generate ninja rules", "of targets that are generated by this build\" return [os.path.join(self.build_dir, t.as_str) for t", "state is controlled solely by its repo state. And repo state is captured", "SPDX-License-Identifier: Apache-2.0 # Copyright 2021 EPAM Systems \"\"\" Android kernel builder module \"\"\"", "Systems \"\"\" Android kernel builder module \"\"\" import os.path from typing import List", "that are generated by this build\" return [os.path.join(self.build_dir, t.as_str) for t in self.conf[\"target_images\"]]", "command=f'bash -c \"{cmd}\"', description=\"Invoke Android Kernel build script\", pool=\"console\") generator.newline() class AndroidKernel: \"\"\"", "\"\"\" cmd = \" && \".join([ \"export $env\", \"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\",", "env = \" \".join(env_values) variables = { \"build_dir\": self.build_dir, \"env\": env, } targets", "def capture_state(self): \"\"\" This method should capture Android Kernel state for a reproducible", "src_stamps: List[str], generator: ninja_syntax.Writer): self.conf = conf self.name = name self.generator = generator", "This method should capture Android Kernel state for a reproducible builds. Luckily, there", "this build\" return [os.path.join(self.build_dir, t.as_str) for t in self.conf[\"target_images\"]] def capture_state(self): \"\"\" This", "to do, as Android state is controlled solely by its repo state. And", "controlled solely by its repo state. And repo state is captured by repo", "generated by this build\" return [os.path.join(self.build_dir, t.as_str) for t in self.conf[\"target_images\"]] def capture_state(self):", "\"export $env\", \"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke Android Kernel", "yocto build rules for ninja \"\"\" cmd = \" && \".join([ \"export $env\",", "conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer): self.conf = conf", "by this build\" return [os.path.join(self.build_dir, t.as_str) for t in self.conf[\"target_images\"]] def capture_state(self): \"\"\"", "env_node: env_values = [x.as_str for x in env_node] else: env_values = [] env", "return AndroidKernel(conf, name, build_dir, src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto build", "build_dir, src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto build rules for ninja", "ninja rules to build AOSP\"\"\" env_node = self.conf.get(\"env\", None) if env_node: env_values =", "t.as_str) for t in self.conf[\"target_images\"]] def capture_state(self): \"\"\" This method should capture Android", "\"\"\" def __init__(self, conf: YamlValue, name: str, build_dir: str, src_stamps: List[str], generator: ninja_syntax.Writer):", "\"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke Android Kernel build script\",", "self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return targets def get_targets(self): \"Return list of targets", "env_values = [x.as_str for x in env_node] else: env_values = [] env =", "get_targets(self): \"Return list of targets that are generated by this build\" return [os.path.join(self.build_dir,", "generates Ninja rules for given Android build configuration \"\"\" def __init__(self, conf: YamlValue,", "env, } targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return targets def", "capture Android Kernel state for a reproducible builds. Luckily, there is nothing to", "build AOSP\"\"\" env_node = self.conf.get(\"env\", None) if env_node: env_values = [x.as_str for x", "str, src_stamps: List[str], generator: ninja_syntax.Writer): self.conf = conf self.name = name self.generator =", "Ninja rules for given Android build configuration \"\"\" def __init__(self, conf: YamlValue, name:", "AOSP\"\"\" env_node = self.conf.get(\"env\", None) if env_node: env_values = [x.as_str for x in", "= { \"build_dir\": self.build_dir, \"env\": env, } targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps,", "ninja_syntax.Writer): \"\"\" Return configured AndroidKernel class \"\"\" return AndroidKernel(conf, name, build_dir, src_stamps, generator)", "self.conf[\"target_images\"]] def capture_state(self): \"\"\" This method should capture Android Kernel state for a", "[os.path.join(self.build_dir, t.as_str) for t in self.conf[\"target_images\"]] def capture_state(self): \"\"\" This method should capture", "build configuration \"\"\" def __init__(self, conf: YamlValue, name: str, build_dir: str, src_stamps: List[str],", "name, build_dir, src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto build rules for", "= \" \".join(env_values) variables = { \"build_dir\": self.build_dir, \"env\": env, } targets =", "generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke Android Kernel build script\", pool=\"console\") generator.newline() class AndroidKernel:", "list of targets that are generated by this build\" return [os.path.join(self.build_dir, t.as_str) for", "reproducible builds. Luckily, there is nothing to do, as Android state is controlled", "moulin.yaml_wrapper import YamlValue from moulin import ninja_syntax def get_builder(conf: YamlValue, name: str, build_dir:", "= name self.generator = generator self.src_stamps = src_stamps self.build_dir = build_dir def gen_build(self):", "self.src_stamps = src_stamps self.build_dir = build_dir def gen_build(self): \"\"\"Generate ninja rules to build", "Android state is controlled solely by its repo state. And repo state is", "List[str], generator: ninja_syntax.Writer): self.conf = conf self.name = name self.generator = generator self.src_stamps", "src_stamps: List[str], generator: ninja_syntax.Writer): \"\"\" Return configured AndroidKernel class \"\"\" return AndroidKernel(conf, name,", "generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto build rules for ninja \"\"\" cmd", "\"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke Android Kernel build script\", pool=\"console\") generator.newline()", "None) if env_node: env_values = [x.as_str for x in env_node] else: env_values =", "Android Kernel state for a reproducible builds. Luckily, there is nothing to do,", "import os.path from typing import List from moulin.yaml_wrapper import YamlValue from moulin import", "self.conf = conf self.name = name self.generator = generator self.src_stamps = src_stamps self.build_dir", "cmd = \" && \".join([ \"export $env\", \"cd $build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash", "Android kernel builder module \"\"\" import os.path from typing import List from moulin.yaml_wrapper", "import List from moulin.yaml_wrapper import YamlValue from moulin import ninja_syntax def get_builder(conf: YamlValue,", "\"Return list of targets that are generated by this build\" return [os.path.join(self.build_dir, t.as_str)", "ninja_syntax.Writer): self.conf = conf self.name = name self.generator = generator self.src_stamps = src_stamps", "\"\"\" return AndroidKernel(conf, name, build_dir, src_stamps, generator) def gen_build_rules(generator: ninja_syntax.Writer): \"\"\" Generate yocto", "= self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline() return targets def get_targets(self): \"Return list", "self.generator.newline() return targets def get_targets(self): \"Return list of targets that are generated by", "\".join(env_values) variables = { \"build_dir\": self.build_dir, \"env\": env, } targets = self.get_targets() self.generator.build(targets,", "\"build_dir\": self.build_dir, \"env\": env, } targets = self.get_targets() self.generator.build(targets, \"android_kernel_build\", self.src_stamps, variables=variables) self.generator.newline()", "Return configured AndroidKernel class \"\"\" return AndroidKernel(conf, name, build_dir, src_stamps, generator) def gen_build_rules(generator:", "self.generator = generator self.src_stamps = src_stamps self.build_dir = build_dir def gen_build(self): \"\"\"Generate ninja", "$build_dir\", \"build/build.sh\", ]) generator.rule(\"android_kernel_build\", command=f'bash -c \"{cmd}\"', description=\"Invoke Android Kernel build script\", pool=\"console\")", "AndroidBuilder class generates Ninja rules for given Android build configuration \"\"\" def __init__(self,", "generator self.src_stamps = src_stamps self.build_dir = build_dir def gen_build(self): \"\"\"Generate ninja rules to", "pool=\"console\") generator.newline() class AndroidKernel: \"\"\" AndroidBuilder class generates Ninja rules for given Android" ]
[ "KIND, either express or implied. # See the License for the specific language", "the License. # import logging from datetime import datetime from django.conf import settings", "Unless required by applicable law or agreed to in writing, software # distributed", "!= order: screenshot.order = order screenshot.save() else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform,", "'#0092F2', 'font': 'Lato', 'frameScreenshots': 'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'},", ": 'Support', } page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and body: page.body =", "website.background: website.background.decrement_ref_count() website.background = None # TODO(Taylor): Mark as deleted instead of actually", "apps and the web.\\r\\n\\u2022 Relevant notifications: Know when people you invited post new", "} page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and body: page.body = body page.save()", "platform=platform).order_by('order')) screenshot_image_ids = set([i.id for i in screenshot_images]) screenshots_to_delete = [s for s", "and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif page and not body: page.delete() @transaction.atomic", "with the people you choose, away from social media. Make your own groups", "_short_description(long_description): if not long_description: return long_description return '%s...' % long_description[:180] def example_from_itunes_id(itunes_id, country):", "def check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain) if error_message: return False, error_message if cname", "get_fancy_cluster_example(): return { 'id': 'example', 'domain': 'cluster.co', 'template': '', 'appName': 'Cluster', 'tagline': 'Privately", "page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and body: page.body = body page.save() elif", "New Moms! Share photos of a new baby with close friends and family", "through gorgeous mobile apps and the web.\\r\\n\\u2022 Relevant notifications: Know when people you", "in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id: i for i in existing_screenshots} for", "'Support', } page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and body: page.body = body", "screenshot} for screenshot in info.screenshots]}, 'icon': {'url': info.icon_512}, } } return example_website def", "is set but incorrect' return True, None def _short_description(long_description): if not long_description: return", "'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots':", "[s for s in existing_screenshots if s.image_id not in screenshot_image_ids] for screenshot in", "for screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon = None if", "{'url': info.icon_512}, } } return example_website def get_fancy_cluster_example(): return { 'id': 'example', 'domain':", "this file except in compliance with the License. # You may obtain a", "screenshot in info.screenshots]}, 'icon': {'url': info.icon_512}, } } return example_website def get_fancy_cluster_example(): return", "TODO(Taylor): Mark as deleted instead of actually deleting potentially huge number of rows", "country): info = appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline = text.app_name_tagline(info.name) example_website = { 'id':", "'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'},", "'596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato',", "+ 1 if image.id in existing_by_image_id: screenshot = existing_by_image_id[image.id] if screenshot.order != order:", "Moms! Share photos of a new baby with close friends and family without", "set([i.id for i in screenshot_images]) screenshots_to_delete = [s for s in existing_screenshots if", "in screenshot_image_ids] for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id: i for", "& secure: Only invited members of the group can see what you post.\\r\\n\\u2022", "screenshot.save() else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website,", "gorgeous mobile apps and the web.\\r\\n\\u2022 Relevant notifications: Know when people you invited", "ANY KIND, either express or implied. # See the License for the specific", "<gh_stars>1000+ # encoding: utf-8 # # Copyright 2016 Cluster Labs, Inc. # #", "people you invited post new things to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032',", "'Lato', 'frameScreenshots': 'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone':", "'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, }", "import text def check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain) if error_message: return False, error_message", "order = i + 1 if image.id in existing_by_image_id: screenshot = existing_by_image_id[image.id] if", "= body page.save() elif not page and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif", "datetime import datetime from django.conf import settings from django.db import transaction from backend.lk.logic", "from django.conf import settings from django.db import transaction from backend.lk.logic import appstore_fetch from", "'template': '', 'appName': 'Cluster', 'tagline': 'Privately share special moments with friends and family',", "invited post new things to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android',", "info.itunes_id, 'images': { 'screenshots': {'iPhone': [{'url': screenshot} for screenshot in info.screenshots]}, 'icon': {'url':", "'icon': {'url': info.icon_512}, } } return example_website def get_fancy_cluster_example(): return { 'id': 'example',", "to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms',", "if screenshot.order != order: screenshot.order = order screenshot.save() else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website,", "incorrect' return True, None def _short_description(long_description): if not long_description: return long_description return '%s...'", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, } @transaction.atomic def update_website_screenshots(website, screenshot_images, platform): existing_screenshots =", "encoding: utf-8 # # Copyright 2016 Cluster Labs, Inc. # # Licensed under", "utf-8 # # Copyright 2016 Cluster Labs, Inc. # # Licensed under the", "why:\\r\\n\\r\\n\\u2022 Private & secure: Only invited members of the group can see what", "when people you invited post new things to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId':", "text.app_name_tagline(info.name) example_website = { 'id': 'example', 'appName': app_name, 'tagline': app_tagline, 'longDescription': info.description, 'shortDescription':", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "post.\\r\\n\\u2022 An app for everyone: Access Cluster through gorgeous mobile apps and the", "# import logging from datetime import datetime from django.conf import settings from django.db", "import appstore_fetch from backend.lk.models import AppWebsiteScreenshot from backend.lk.models import AppWebsitePage from backend.util import", "not long_description: return long_description return '%s...' % long_description[:180] def example_from_itunes_id(itunes_id, country): info =", "list(website.screenshots.all()) for screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon = None", "def get_fancy_cluster_example(): return { 'id': 'example', 'domain': 'cluster.co', 'template': '', 'appName': 'Cluster', 'tagline':", "appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline = text.app_name_tagline(info.name) example_website = { 'id': 'example', 'appName': app_name,", "and body: page.body = body page.save() elif not page and body: AppWebsitePage.objects.create(website=website, slug=slug,", "people you choose, away from social media. Make your own groups and share", "OF ANY KIND, either express or implied. # See the License for the", "the group can see what you post.\\r\\n\\u2022 An app for everyone: Access Cluster", "about. Create a group with family, a group of friends, coworkers, people from", "backend.util import dnsutil from backend.util import text def check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain)", "= dnsutil.get_cname_for_domain(domain) if error_message: return False, error_message if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME:", "website.icon: website.icon.decrement_ref_count() website.icon = None if website.logo: website.logo.decrement_ref_count() website.logo = None if website.background:", "and memories with the people you choose, away from social media. Make your", "gives you a private space to share photos and memories with the people", "image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug, body):", "existing_by_image_id = {i.image_id: i for i in existing_screenshots} for i, image in enumerate(screenshot_images):", "return { 'id': 'example', 'domain': 'cluster.co', 'template': '', 'appName': 'Cluster', 'tagline': 'Privately share", "'Privately share special moments with friends and family', 'shortDescription': 'Cluster gives you a", "error_message if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME value is", "with friends not appropriate for Facebook\\r\\n\\u2022 Families! Keep in touch even if you\\u2019re", "= {i.image_id: i for i in existing_screenshots} for i, image in enumerate(screenshot_images): order", "website.logo = None if website.background: website.background.decrement_ref_count() website.background = None # TODO(Taylor): Mark as", "from social media. Make your own groups and share pics, videos, comments, and", "dnsutil.get_cname_for_domain(domain) if error_message: return False, error_message if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME: return", "elif page and not body: page.delete() @transaction.atomic def delete_website(website): screenshots = list(website.screenshots.all()) for", "instead of actually deleting potentially huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete() website.domain =", "'example', 'domain': 'cluster.co', 'template': '', 'appName': 'Cluster', 'tagline': 'Privately share special moments with", "datetime from django.conf import settings from django.db import transaction from backend.lk.logic import appstore_fetch", "django.conf import settings from django.db import transaction from backend.lk.logic import appstore_fetch from backend.lk.models", "Facebook\\r\\n\\u2022 Families! Keep in touch even if you\\u2019re not in the same place.\\r\\n\\r\\nTons", "town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share photos of a new baby", "'tagline': 'Privately share special moments with friends and family', 'shortDescription': 'Cluster gives you", "website.background = None # TODO(Taylor): Mark as deleted instead of actually deleting potentially", "'longDescription': u'Cluster makes it possible to create private groups where you share moments", "if page and body: page.body = body page.save() elif not page and body:", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "app for everyone: Access Cluster through gorgeous mobile apps and the web.\\r\\n\\u2022 Relevant", "Make your own groups and share pics, videos, comments, and chat!', 'longDescription': u'Cluster", "the same place.\\r\\n\\r\\nTons of people already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "can see what you post.\\r\\n\\u2022 An app for everyone: Access Cluster through gorgeous", "trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure: Only invited members of the group", "permissions and # limitations under the License. # import logging from datetime import", "existing_by_image_id: screenshot = existing_by_image_id[image.id] if screenshot.order != order: screenshot.order = order screenshot.save() else:", "cname != '%s.' % settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME value is set but", "for i in existing_screenshots} for i, image in enumerate(screenshot_images): order = i +", "import AppWebsiteScreenshot from backend.lk.models import AppWebsitePage from backend.util import dnsutil from backend.util import", "Relevant notifications: Know when people you invited post new things to the group.',", "import settings from django.db import transaction from backend.lk.logic import appstore_fetch from backend.lk.models import", "members of the group can see what you post.\\r\\n\\u2022 An app for everyone:", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "for screenshot in info.screenshots]}, 'icon': {'url': info.icon_512}, } } return example_website def get_fancy_cluster_example():", "= AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and body: page.body = body page.save() elif not", "existing_screenshots} for i, image in enumerate(screenshot_images): order = i + 1 if image.id", "!= '%s.' % settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME value is set but incorrect'", "import datetime from django.conf import settings from django.db import transaction from backend.lk.logic import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "share special moments with friends and family', 'shortDescription': 'Cluster gives you a private", "else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share photos of a new baby with close friends", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "title=hosted_page_titles[slug]) elif page and not body: page.delete() @transaction.atomic def delete_website(website): screenshots = list(website.screenshots.all())", "Only invited members of the group can see what you post.\\r\\n\\u2022 An app", "} } return example_website def get_fancy_cluster_example(): return { 'id': 'example', 'domain': 'cluster.co', 'template':", "{'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } },", "AppWebsitePage from backend.util import dnsutil from backend.util import text def check_domain_for_cname_record(domain): cname, error_message", "= order screenshot.save() else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save() @transaction.atomic", "Share photos of a new baby with close friends and family without spamming", "{ 'terms' : 'Terms and Conditions', 'privacy' : 'Privacy Policy', 'support' : 'Support',", "of actually deleting potentially huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete() website.domain = None", "required by applicable law or agreed to in writing, software # distributed under", "django.db import transaction from backend.lk.logic import appstore_fetch from backend.lk.models import AppWebsiteScreenshot from backend.lk.models", "order: screenshot.order = order screenshot.save() else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order)", "screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug, body): hosted_page_titles", "applicable law or agreed to in writing, software # distributed under the License", "your own groups and share pics, videos, comments, and chat!', 'longDescription': u'Cluster makes", "in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon = None if website.logo: website.logo.decrement_ref_count()", "= appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline = text.app_name_tagline(info.name) example_website = { 'id': 'example', 'appName':", "photos of a new baby with close friends and family without spamming everyone", "and videos with the people you care about. Create a group with family,", "secure: Only invited members of the group can see what you post.\\r\\n\\u2022 An", "None if website.logo: website.logo.decrement_ref_count() website.logo = None if website.background: website.background.decrement_ref_count() website.background = None", "web.\\r\\n\\u2022 Relevant notifications: Know when people you invited post new things to the", "list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id for i in screenshot_images]) screenshots_to_delete = [s for", "it possible to create private groups where you share moments through photos and", "page and not body: page.delete() @transaction.atomic def delete_website(website): screenshots = list(website.screenshots.all()) for screenshot", "or agreed to in writing, software # distributed under the License is distributed", "family', 'shortDescription': 'Cluster gives you a private space to share photos and memories", "country) app_name, app_tagline = text.app_name_tagline(info.name) example_website = { 'id': 'example', 'appName': app_name, 'tagline':", "close friends and family without spamming everyone on other social networks\\r\\n\\u2022 College Students!", "screenshot_image_ids = set([i.id for i in screenshot_images]) screenshots_to_delete = [s for s in", "website.logo: website.logo.decrement_ref_count() website.logo = None if website.background: website.background.decrement_ref_count() website.background = None # TODO(Taylor):", "'Terms and Conditions', 'privacy' : 'Privacy Policy', 'support' : 'Support', } page =", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "makes it possible to create private groups where you share moments through photos", "# encoding: utf-8 # # Copyright 2016 Cluster Labs, Inc. # # Licensed", "chat!', 'longDescription': u'Cluster makes it possible to create private groups where you share", "@transaction.atomic def update_website_screenshots(website, screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id for", "return example_website def get_fancy_cluster_example(): return { 'id': 'example', 'domain': 'cluster.co', 'template': '', 'appName':", "'support' : 'Support', } page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and body: page.body", "[{'url': screenshot} for screenshot in info.screenshots]}, 'icon': {'url': info.icon_512}, } } return example_website", "'images': { 'screenshots': {'iPhone': [{'url': screenshot} for screenshot in info.screenshots]}, 'icon': {'url': info.icon_512},", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "see what you post.\\r\\n\\u2022 An app for everyone: Access Cluster through gorgeous mobile", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "Policy', 'support' : 'Support', } page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and body:", "backend.lk.logic import appstore_fetch from backend.lk.models import AppWebsiteScreenshot from backend.lk.models import AppWebsitePage from backend.util", "of friends, coworkers, people from your home town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New", "transaction from backend.lk.logic import appstore_fetch from backend.lk.models import AppWebsiteScreenshot from backend.lk.models import AppWebsitePage", "License. # You may obtain a copy of the License at # #", "home town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share photos of a new", "body: page.delete() @transaction.atomic def delete_website(website): screenshots = list(website.screenshots.all()) for screenshot in screenshots: screenshot.image.decrement_ref_count()", "for i in screenshot_images]) screenshots_to_delete = [s for s in existing_screenshots if s.image_id", "in touch even if you\\u2019re not in the same place.\\r\\n\\r\\nTons of people already", "% settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME value is set but incorrect' return True,", "in screenshot_images]) screenshots_to_delete = [s for s in existing_screenshots if s.image_id not in", "compliance with the License. # You may obtain a copy of the License", "import dnsutil from backend.util import text def check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain) if", "same place.\\r\\n\\r\\nTons of people already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure: Only", "settings from django.db import transaction from backend.lk.logic import appstore_fetch from backend.lk.models import AppWebsiteScreenshot", "update_website_screenshots(website, screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id for i in", "Labs, Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "memories with the people you choose, away from social media. Make your own", "Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure: Only invited members of the group can see", "enumerate(screenshot_images): order = i + 1 if image.id in existing_by_image_id: screenshot = existing_by_image_id[image.id]", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "value is set but incorrect' return True, None def _short_description(long_description): if not long_description:", "with the people you care about. Create a group with family, a group", "friends and family without spamming everyone on other social networks\\r\\n\\u2022 College Students! Share", "from backend.lk.logic import appstore_fetch from backend.lk.models import AppWebsiteScreenshot from backend.lk.models import AppWebsitePage from", "s.image_id not in screenshot_image_ids] for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id:", "logging from datetime import datetime from django.conf import settings from django.db import transaction", "to create private groups where you share moments through photos and videos with", "not use this file except in compliance with the License. # You may", "{ 'screenshots': {'iPhone': [{'url': screenshot} for screenshot in info.screenshots]}, 'icon': {'url': info.icon_512}, }", "your home town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share photos of a", "Conditions', 'privacy' : 'Privacy Policy', 'support' : 'Support', } page = AppWebsitePage.objects.filter(website=website, slug=slug).first()", "'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots': 'white', 'images':", "@transaction.atomic def create_or_update_hosted_page(website, slug, body): hosted_page_titles = { 'terms' : 'Terms and Conditions',", "AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and body: page.body = body page.save() elif not page", "existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id for i in screenshot_images]) screenshots_to_delete =", "huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete() website.domain = None website.delete_time = datetime.now() website.save()", "License, Version 2.0 (the \"License\"); # you may not use this file except", "a new baby with close friends and family without spamming everyone on other", "info.screenshots]}, 'icon': {'url': info.icon_512}, } } return example_website def get_fancy_cluster_example(): return { 'id':", "{'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ],", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots': 'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'},", "{'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, } @transaction.atomic def update_website_screenshots(website, screenshot_images, platform):", "screenshots_to_delete = [s for s in existing_screenshots if s.image_id not in screenshot_image_ids] for", "def create_or_update_hosted_page(website, slug, body): hosted_page_titles = { 'terms' : 'Terms and Conditions', 'privacy'", "= { 'terms' : 'Terms and Conditions', 'privacy' : 'Privacy Policy', 'support' :", "hosted_page_titles = { 'terms' : 'Terms and Conditions', 'privacy' : 'Privacy Policy', 'support'", "'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'},", "not page and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif page and not body:", "screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon = None if website.logo: website.logo.decrement_ref_count() website.logo =", "= list(website.screenshots.all()) for screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon =", "# you may not use this file except in compliance with the License.", "app_name, app_tagline = text.app_name_tagline(info.name) example_website = { 'id': 'example', 'appName': app_name, 'tagline': app_tagline,", "in existing_screenshots if s.image_id not in screenshot_image_ids] for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete()", "in existing_screenshots} for i, image in enumerate(screenshot_images): order = i + 1 if", "agreed to in writing, software # distributed under the License is distributed on", "}, } @transaction.atomic def update_website_screenshots(website, screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids =", "'terms' : 'Terms and Conditions', 'privacy' : 'Privacy Policy', 'support' : 'Support', }", "platform=platform, order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug, body): hosted_page_titles = { 'terms' :", "page.body = body page.save() elif not page and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug])", "you a private space to share photos and memories with the people you", "website.background.decrement_ref_count() website.background = None # TODO(Taylor): Mark as deleted instead of actually deleting", "(the \"License\"); # you may not use this file except in compliance with", "AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif page and not body: page.delete() @transaction.atomic def delete_website(website):", "'Cluster', 'tagline': 'Privately share special moments with friends and family', 'shortDescription': 'Cluster gives", "everyone: Access Cluster through gorgeous mobile apps and the web.\\r\\n\\u2022 Relevant notifications: Know", "special moments with friends and family', 'shortDescription': 'Cluster gives you a private space", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug,", "= existing_by_image_id[image.id] if screenshot.order != order: screenshot.order = order screenshot.save() else: image.increment_ref_count() screenshot", "the people you care about. Create a group with family, a group of", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "backend.util import text def check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain) if error_message: return False,", "invited members of the group can see what you post.\\r\\n\\u2022 An app for", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "page and body: page.body = body page.save() elif not page and body: AppWebsitePage.objects.create(website=website,", "{i.image_id: i for i in existing_screenshots} for i, image in enumerate(screenshot_images): order =", "% long_description[:180] def example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline = text.app_name_tagline(info.name)", "= None if website.logo: website.logo.decrement_ref_count() website.logo = None if website.background: website.background.decrement_ref_count() website.background =", "file except in compliance with the License. # You may obtain a copy", "people you care about. Create a group with family, a group of friends,", "potentially huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete() website.domain = None website.delete_time = datetime.now()", "share pics, videos, comments, and chat!', 'longDescription': u'Cluster makes it possible to create", "deleted instead of actually deleting potentially huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete() website.domain", "body): hosted_page_titles = { 'terms' : 'Terms and Conditions', 'privacy' : 'Privacy Policy',", "License for the specific language governing permissions and # limitations under the License.", "moments through photos and videos with the people you care about. Create a", "page.delete() @transaction.atomic def delete_website(website): screenshots = list(website.screenshots.all()) for screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete()", "False, 'The CNAME value is set but incorrect' return True, None def _short_description(long_description):", "# TODO(Taylor): Mark as deleted instead of actually deleting potentially huge number of", "for everyone: Access Cluster through gorgeous mobile apps and the web.\\r\\n\\u2022 Relevant notifications:", "'itunesId': info.itunes_id, 'images': { 'screenshots': {'iPhone': [{'url': screenshot} for screenshot in info.screenshots]}, 'icon':", "{'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, } @transaction.atomic def update_website_screenshots(website,", "to in writing, software # distributed under the License is distributed on an", "from backend.util import text def check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain) if error_message: return", "in the same place.\\r\\n\\r\\nTons of people already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private &", "'screenshots': {'iPhone': [{'url': screenshot} for screenshot in info.screenshots]}, 'icon': {'url': info.icon_512}, } }", "'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots': 'white', 'images': { 'logo':", "for Facebook\\r\\n\\u2022 Families! Keep in touch even if you\\u2019re not in the same", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "with friends and family', 'shortDescription': 'Cluster gives you a private space to share", "'privacy' : 'Privacy Policy', 'support' : 'Support', } page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "AppWebsiteScreenshot from backend.lk.models import AppWebsitePage from backend.util import dnsutil from backend.util import text", "'tagline': app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images': { 'screenshots': {'iPhone': [{'url':", "info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images': { 'screenshots': {'iPhone': [{'url': screenshot} for screenshot", "page and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif page and not body: page.delete()", "'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images': { 'screenshots': {'iPhone': [{'url': screenshot} for screenshot in", "you care about. Create a group with family, a group of friends, coworkers,", "everyone on other social networks\\r\\n\\u2022 College Students! Share memories with friends not appropriate", "of people already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure: Only invited members", "College Students! Share memories with friends not appropriate for Facebook\\r\\n\\u2022 Families! Keep in", "s in existing_screenshots if s.image_id not in screenshot_image_ids] for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count()", "backend.lk.models import AppWebsitePage from backend.util import dnsutil from backend.util import text def check_domain_for_cname_record(domain):", "'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots': 'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'},", "return False, 'The CNAME value is set but incorrect' return True, None def", "example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline = text.app_name_tagline(info.name) example_website = {", "or implied. # See the License for the specific language governing permissions and", "info = appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline = text.app_name_tagline(info.name) example_website = { 'id': 'example',", "image=image, platform=platform, order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug, body): hosted_page_titles = { 'terms'", "screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id for i in screenshot_images])", "def update_website_screenshots(website, screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id for i", "body=body, title=hosted_page_titles[slug]) elif page and not body: page.delete() @transaction.atomic def delete_website(website): screenshots =", "for i, image in enumerate(screenshot_images): order = i + 1 if image.id in", "website.icon.decrement_ref_count() website.icon = None if website.logo: website.logo.decrement_ref_count() website.logo = None if website.background: website.background.decrement_ref_count()", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "in info.screenshots]}, 'icon': {'url': info.icon_512}, } } return example_website def get_fancy_cluster_example(): return {", "if website.background: website.background.decrement_ref_count() website.background = None # TODO(Taylor): Mark as deleted instead of", "from your home town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share photos of", "'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots': 'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon':", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "{ 'id': 'example', 'domain': 'cluster.co', 'template': '', 'appName': 'Cluster', 'tagline': 'Privately share special", "i in screenshot_images]) screenshots_to_delete = [s for s in existing_screenshots if s.image_id not", "in writing, software # distributed under the License is distributed on an \"AS", "screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug, body): hosted_page_titles = { 'terms' : 'Terms and", "share moments through photos and videos with the people you care about. Create", "screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon = None if website.logo:", "'shortDescription': 'Cluster gives you a private space to share photos and memories with", "if error_message: return False, error_message if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME: return False,", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "limitations under the License. # import logging from datetime import datetime from django.conf", "Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure: Only invited members of the group can", "i in existing_screenshots} for i, image in enumerate(screenshot_images): order = i + 1", "{ 'id': 'example', 'appName': app_name, 'tagline': app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id,", "photos and videos with the people you care about. Create a group with", "long_description[:180] def example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline = text.app_name_tagline(info.name) example_website", "Create a group with family, a group of friends, coworkers, people from your", "import AppWebsitePage from backend.util import dnsutil from backend.util import text def check_domain_for_cname_record(domain): cname,", "anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share photos of a new baby with close", "the people you choose, away from social media. Make your own groups and", "} @transaction.atomic def update_website_screenshots(website, screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id", "through photos and videos with the people you care about. Create a group", "platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id for i in screenshot_images]) screenshots_to_delete", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "def example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline = text.app_name_tagline(info.name) example_website =", "private space to share photos and memories with the people you choose, away", "social media. Make your own groups and share pics, videos, comments, and chat!',", "you may not use this file except in compliance with the License. #", "possible to create private groups where you share moments through photos and videos", "under the License. # import logging from datetime import datetime from django.conf import", "what you post.\\r\\n\\u2022 An app for everyone: Access Cluster through gorgeous mobile apps", "'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font':", "moments with friends and family', 'shortDescription': 'Cluster gives you a private space to", "media. Make your own groups and share pics, videos, comments, and chat!', 'longDescription':", "True, None def _short_description(long_description): if not long_description: return long_description return '%s...' % long_description[:180]", "An app for everyone: Access Cluster through gorgeous mobile apps and the web.\\r\\n\\u2022", "screenshot_image_ids] for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id: i for i", "'domain': 'cluster.co', 'template': '', 'appName': 'Cluster', 'tagline': 'Privately share special moments with friends", "'The CNAME value is set but incorrect' return True, None def _short_description(long_description): if", "'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], }", "if not long_description: return long_description return '%s...' % long_description[:180] def example_from_itunes_id(itunes_id, country): info", "group with family, a group of friends, coworkers, people from your home town,", "use this file except in compliance with the License. # You may obtain", "= set([i.id for i in screenshot_images]) screenshots_to_delete = [s for s in existing_screenshots", "and chat!', 'longDescription': u'Cluster makes it possible to create private groups where you", "dnsutil from backend.util import text def check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain) if error_message:", "spamming everyone on other social networks\\r\\n\\u2022 College Students! Share memories with friends not", "'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, } @transaction.atomic def", "return '%s...' % long_description[:180] def example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline", "'cluster.co', 'template': '', 'appName': 'Cluster', 'tagline': 'Privately share special moments with friends and", "group of friends, coworkers, people from your home town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022", "2016 Cluster Labs, Inc. # # Licensed under the Apache License, Version 2.0", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "new baby with close friends and family without spamming everyone on other social", "things to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink':", "appropriate for Facebook\\r\\n\\u2022 Families! Keep in touch even if you\\u2019re not in the", "screenshot.order != order: screenshot.order = order screenshot.save() else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image,", "screenshot = existing_by_image_id[image.id] if screenshot.order != order: screenshot.order = order screenshot.save() else: image.increment_ref_count()", "of a new baby with close friends and family without spamming everyone on", "choose, away from social media. Make your own groups and share pics, videos,", "if you\\u2019re not in the same place.\\r\\n\\r\\nTons of people already trust Cluster. Here\\u2019s", "= i + 1 if image.id in existing_by_image_id: screenshot = existing_by_image_id[image.id] if screenshot.order", "groups and share pics, videos, comments, and chat!', 'longDescription': u'Cluster makes it possible", "family without spamming everyone on other social networks\\r\\n\\u2022 College Students! Share memories with", "2.0 (the \"License\"); # you may not use this file except in compliance", "language governing permissions and # limitations under the License. # import logging from", "share photos and memories with the people you choose, away from social media.", "create private groups where you share moments through photos and videos with the", "where you share moments through photos and videos with the people you care", "slug=slug).first() if page and body: page.body = body page.save() elif not page and", "for the specific language governing permissions and # limitations under the License. #", "if website.logo: website.logo.decrement_ref_count() website.logo = None if website.background: website.background.decrement_ref_count() website.background = None #", "= [s for s in existing_screenshots if s.image_id not in screenshot_image_ids] for screenshot", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "and # limitations under the License. # import logging from datetime import datetime", "not in screenshot_image_ids] for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id: i", "you share moments through photos and videos with the people you care about.", "'appName': app_name, 'tagline': app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images': { 'screenshots':", "for:\\r\\n\\u2022 New Moms! Share photos of a new baby with close friends and", "elif not page and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif page and not", "None # TODO(Taylor): Mark as deleted instead of actually deleting potentially huge number", "return False, error_message if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME", "# # Unless required by applicable law or agreed to in writing, software", "# Copyright 2016 Cluster Labs, Inc. # # Licensed under the Apache License,", "'id': 'example', 'domain': 'cluster.co', 'template': '', 'appName': 'Cluster', 'tagline': 'Privately share special moments", "express or implied. # See the License for the specific language governing permissions", "you choose, away from social media. Make your own groups and share pics,", "people from your home town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share photos", "long_description return '%s...' % long_description[:180] def example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id, country) app_name,", "Students! Share memories with friends not appropriate for Facebook\\r\\n\\u2022 Families! Keep in touch", "= text.app_name_tagline(info.name) example_website = { 'id': 'example', 'appName': app_name, 'tagline': app_tagline, 'longDescription': info.description,", "'Privacy Policy', 'support' : 'Support', } page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if page and", "family, a group of friends, coworkers, people from your home town, or anyone", "of the group can see what you post.\\r\\n\\u2022 An app for everyone: Access", "'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor':", "{'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, } @transaction.atomic", "'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots': 'white', 'images': {", "in enumerate(screenshot_images): order = i + 1 if image.id in existing_by_image_id: screenshot =", "either express or implied. # See the License for the specific language governing", "'', 'appName': 'Cluster', 'tagline': 'Privately share special moments with friends and family', 'shortDescription':", "you post.\\r\\n\\u2022 An app for everyone: Access Cluster through gorgeous mobile apps and", "[{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, } @transaction.atomic def update_website_screenshots(website, screenshot_images,", "for s in existing_screenshots if s.image_id not in screenshot_image_ids] for screenshot in screenshots_to_delete:", "'%s.' % settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME value is set but incorrect' return", "if website.icon: website.icon.decrement_ref_count() website.icon = None if website.logo: website.logo.decrement_ref_count() website.logo = None if", "governing permissions and # limitations under the License. # import logging from datetime", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images': { 'screenshots': {'iPhone': [{'url': screenshot} for", ": 'Terms and Conditions', 'privacy' : 'Privacy Policy', 'support' : 'Support', } page", "photos and memories with the people you choose, away from social media. Make", "None if website.background: website.background.decrement_ref_count() website.background = None # TODO(Taylor): Mark as deleted instead", "= None # TODO(Taylor): Mark as deleted instead of actually deleting potentially huge", "return True, None def _short_description(long_description): if not long_description: return long_description return '%s...' %", "def delete_website(website): screenshots = list(website.screenshots.all()) for screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon:", "friends, coworkers, people from your home town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms!", "a group of friends, coworkers, people from your home town, or anyone else!\\r\\n\\r\\nGreat", "{'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, } @transaction.atomic def update_website_screenshots(website, screenshot_images, platform): existing_screenshots", "the License. # You may obtain a copy of the License at #", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "to share photos and memories with the people you choose, away from social", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "other social networks\\r\\n\\u2022 College Students! Share memories with friends not appropriate for Facebook\\r\\n\\u2022", "from backend.util import dnsutil from backend.util import text def check_domain_for_cname_record(domain): cname, error_message =", "for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id: i for i in", "long_description: return long_description return '%s...' % long_description[:180] def example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id,", "app_name, 'tagline': app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images': { 'screenshots': {'iPhone':", "coworkers, people from your home town, or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share", "image.id in existing_by_image_id: screenshot = existing_by_image_id[image.id] if screenshot.order != order: screenshot.order = order", "deleting potentially huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete() website.domain = None website.delete_time =", "if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME value is set", "the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink':", "screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id: i for i in existing_screenshots}", "_short_description(info.description), 'itunesId': info.itunes_id, 'images': { 'screenshots': {'iPhone': [{'url': screenshot} for screenshot in info.screenshots]},", "with close friends and family without spamming everyone on other social networks\\r\\n\\u2022 College", "order screenshot.save() else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save() @transaction.atomic def", "License. # import logging from datetime import datetime from django.conf import settings from", "new things to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help',", "if image.id in existing_by_image_id: screenshot = existing_by_image_id[image.id] if screenshot.order != order: screenshot.order =", "example_website = { 'id': 'example', 'appName': app_name, 'tagline': app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description),", "or anyone else!\\r\\n\\r\\nGreat for:\\r\\n\\u2022 New Moms! Share photos of a new baby with", "and family without spamming everyone on other social networks\\r\\n\\u2022 College Students! Share memories", "with the License. # You may obtain a copy of the License at", "app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images': { 'screenshots': {'iPhone': [{'url': screenshot}", "Private & secure: Only invited members of the group can see what you", "baby with close friends and family without spamming everyone on other social networks\\r\\n\\u2022", "not body: page.delete() @transaction.atomic def delete_website(website): screenshots = list(website.screenshots.all()) for screenshot in screenshots:", "# limitations under the License. # import logging from datetime import datetime from", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "but incorrect' return True, None def _short_description(long_description): if not long_description: return long_description return", "'id': 'example', 'appName': app_name, 'tagline': app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images':", "body: page.body = body page.save() elif not page and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body,", "in existing_by_image_id: screenshot = existing_by_image_id[image.id] if screenshot.order != order: screenshot.order = order screenshot.save()", "already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure: Only invited members of the", "Cluster through gorgeous mobile apps and the web.\\r\\n\\u2022 Relevant notifications: Know when people", "from backend.lk.models import AppWebsiteScreenshot from backend.lk.models import AppWebsitePage from backend.util import dnsutil from", "AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug, body): hosted_page_titles = {", "care about. Create a group with family, a group of friends, coworkers, people", "slug, body): hosted_page_titles = { 'terms' : 'Terms and Conditions', 'privacy' : 'Privacy", "with family, a group of friends, coworkers, people from your home town, or", "screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id: i for i in existing_screenshots} for i,", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "from django.db import transaction from backend.lk.logic import appstore_fetch from backend.lk.models import AppWebsiteScreenshot from", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "groups where you share moments through photos and videos with the people you", "create_or_update_hosted_page(website, slug, body): hosted_page_titles = { 'terms' : 'Terms and Conditions', 'privacy' :", "} }, } @transaction.atomic def update_website_screenshots(website, screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids", "'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots': 'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background':", "without spamming everyone on other social networks\\r\\n\\u2022 College Students! Share memories with friends", "# # Copyright 2016 Cluster Labs, Inc. # # Licensed under the Apache", "memories with friends not appropriate for Facebook\\r\\n\\u2022 Families! Keep in touch even if", "'example', 'appName': app_name, 'tagline': app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId': info.itunes_id, 'images': {", "from backend.lk.models import AppWebsitePage from backend.util import dnsutil from backend.util import text def", "and the web.\\r\\n\\u2022 Relevant notifications: Know when people you invited post new things", "error_message: return False, error_message if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME: return False, 'The", "'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2',", "i, image in enumerate(screenshot_images): order = i + 1 if image.id in existing_by_image_id:", "image in enumerate(screenshot_images): order = i + 1 if image.id in existing_by_image_id: screenshot", "screenshot.order = order screenshot.save() else: image.increment_ref_count() screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save()", "in compliance with the License. # You may obtain a copy of the", "import transaction from backend.lk.logic import appstore_fetch from backend.lk.models import AppWebsiteScreenshot from backend.lk.models import", "Access Cluster through gorgeous mobile apps and the web.\\r\\n\\u2022 Relevant notifications: Know when", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "body page.save() elif not page and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif page", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "None def _short_description(long_description): if not long_description: return long_description return '%s...' % long_description[:180] def", "info.icon_512}, } } return example_website def get_fancy_cluster_example(): return { 'id': 'example', 'domain': 'cluster.co',", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "website.logo.decrement_ref_count() website.logo = None if website.background: website.background.decrement_ref_count() website.background = None # TODO(Taylor): Mark", "{'iPhone': [{'url': screenshot} for screenshot in info.screenshots]}, 'icon': {'url': info.icon_512}, } } return", "screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon = None if website.logo: website.logo.decrement_ref_count() website.logo", "], } }, } @transaction.atomic def update_website_screenshots(website, screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order'))", "from datetime import datetime from django.conf import settings from django.db import transaction from", "mobile apps and the web.\\r\\n\\u2022 Relevant notifications: Know when people you invited post", "friends and family', 'shortDescription': 'Cluster gives you a private space to share photos", "networks\\r\\n\\u2022 College Students! Share memories with friends not appropriate for Facebook\\r\\n\\u2022 Families! Keep", "a group with family, a group of friends, coworkers, people from your home", "See the License for the specific language governing permissions and # limitations under", "group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy',", "you invited post new things to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId':", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "even if you\\u2019re not in the same place.\\r\\n\\r\\nTons of people already trust Cluster.", "and not body: page.delete() @transaction.atomic def delete_website(website): screenshots = list(website.screenshots.all()) for screenshot in", "a private space to share photos and memories with the people you choose,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "{ 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'},", "i + 1 if image.id in existing_by_image_id: screenshot = existing_by_image_id[image.id] if screenshot.order !=", "check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain) if error_message: return False, error_message if cname !=", "u'Cluster makes it possible to create private groups where you share moments through", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "touch even if you\\u2019re not in the same place.\\r\\n\\r\\nTons of people already trust", "'com.getcluster.android', 'supportLink': 'http://cluster.co/help', 'termsLink': 'http://cluster.co/terms', 'privacyLink': 'http://cluster.co/privacy', 'primaryColor': '#0092F2', 'font': 'Lato', 'frameScreenshots': 'white',", "CNAME value is set but incorrect' return True, None def _short_description(long_description): if not", "'font': 'Lato', 'frameScreenshots': 'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots':", "screenshots = list(website.screenshots.all()) for screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon", "screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id = {i.image_id: i for i in existing_screenshots} for i, image", "and family', 'shortDescription': 'Cluster gives you a private space to share photos and", "videos, comments, and chat!', 'longDescription': u'Cluster makes it possible to create private groups", "on other social networks\\r\\n\\u2022 College Students! Share memories with friends not appropriate for", "screenshot.delete() if website.icon: website.icon.decrement_ref_count() website.icon = None if website.logo: website.logo.decrement_ref_count() website.logo = None", "Share memories with friends not appropriate for Facebook\\r\\n\\u2022 Families! Keep in touch even", "not appropriate for Facebook\\r\\n\\u2022 Families! Keep in touch even if you\\u2019re not in", "delete_website(website): screenshots = list(website.screenshots.all()) for screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if website.icon: website.icon.decrement_ref_count()", ": 'Privacy Policy', 'support' : 'Support', } page = AppWebsitePage.objects.filter(website=website, slug=slug).first() if page", "actually deleting potentially huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete() website.domain = None website.delete_time", "the specific language governing permissions and # limitations under the License. # import", "and share pics, videos, comments, and chat!', 'longDescription': u'Cluster makes it possible to", "1 if image.id in existing_by_image_id: screenshot = existing_by_image_id[image.id] if screenshot.order != order: screenshot.order", "and Conditions', 'privacy' : 'Privacy Policy', 'support' : 'Support', } page = AppWebsitePage.objects.filter(website=website,", "cname, error_message = dnsutil.get_cname_for_domain(domain) if error_message: return False, error_message if cname != '%s.'", "Families! Keep in touch even if you\\u2019re not in the same place.\\r\\n\\r\\nTons of", "existing_screenshots if s.image_id not in screenshot_image_ids] for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id", "pics, videos, comments, and chat!', 'longDescription': u'Cluster makes it possible to create private", "def _short_description(long_description): if not long_description: return long_description return '%s...' % long_description[:180] def example_from_itunes_id(itunes_id,", "import logging from datetime import datetime from django.conf import settings from django.db import", "slug=slug, body=body, title=hosted_page_titles[slug]) elif page and not body: page.delete() @transaction.atomic def delete_website(website): screenshots", "text def check_domain_for_cname_record(domain): cname, error_message = dnsutil.get_cname_for_domain(domain) if error_message: return False, error_message if", "'appName': 'Cluster', 'tagline': 'Privately share special moments with friends and family', 'shortDescription': 'Cluster", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "{'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'}, ], } }, } @transaction.atomic def update_website_screenshots(website, screenshot_images, platform): existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id,", "'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'}, {'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'}, {'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'}, {'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'}, {'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'},", "people already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure: Only invited members of", "False, error_message if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME value", "return long_description return '%s...' % long_description[:180] def example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id, country)", "= { 'id': 'example', 'appName': app_name, 'tagline': app_tagline, 'longDescription': info.description, 'shortDescription': _short_description(info.description), 'itunesId':", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "'frameScreenshots': 'white', 'images': { 'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'}, 'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'}, 'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'}, 'screenshots': {'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'},", "backend.lk.models import AppWebsiteScreenshot from backend.lk.models import AppWebsitePage from backend.util import dnsutil from backend.util", "Cluster Labs, Inc. # # Licensed under the Apache License, Version 2.0 (the", "if s.image_id not in screenshot_image_ids] for screenshot in screenshots_to_delete: screenshot.image.decrement_ref_count() screenshot.delete() existing_by_image_id =", "example_website def get_fancy_cluster_example(): return { 'id': 'example', 'domain': 'cluster.co', 'template': '', 'appName': 'Cluster',", "you\\u2019re not in the same place.\\r\\n\\r\\nTons of people already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022", "i for i in existing_screenshots} for i, image in enumerate(screenshot_images): order = i", "body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif page and not body: page.delete() @transaction.atomic def", "order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug, body): hosted_page_titles = { 'terms' : 'Terms", "friends not appropriate for Facebook\\r\\n\\u2022 Families! Keep in touch even if you\\u2019re not", "app_tagline = text.app_name_tagline(info.name) example_website = { 'id': 'example', 'appName': app_name, 'tagline': app_tagline, 'longDescription':", "Mark as deleted instead of actually deleting potentially huge number of rows #", "own groups and share pics, videos, comments, and chat!', 'longDescription': u'Cluster makes it", "private groups where you share moments through photos and videos with the people", "settings.HOSTED_WEBSITE_CNAME: return False, 'The CNAME value is set but incorrect' return True, None", "Copyright 2016 Cluster Labs, Inc. # # Licensed under the Apache License, Version", "'%s...' % long_description[:180] def example_from_itunes_id(itunes_id, country): info = appstore_fetch.app_info_with_id(itunes_id, country) app_name, app_tagline =", "away from social media. Make your own groups and share pics, videos, comments,", "place.\\r\\n\\r\\nTons of people already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private & secure: Only invited", "videos with the people you care about. Create a group with family, a", "existing_by_image_id[image.id] if screenshot.order != order: screenshot.order = order screenshot.save() else: image.increment_ref_count() screenshot =", "screenshot.delete() existing_by_image_id = {i.image_id: i for i in existing_screenshots} for i, image in", "Keep in touch even if you\\u2019re not in the same place.\\r\\n\\r\\nTons of people", "specific language governing permissions and # limitations under the License. # import logging", "social networks\\r\\n\\u2022 College Students! Share memories with friends not appropriate for Facebook\\r\\n\\u2022 Families!", "not in the same place.\\r\\n\\r\\nTons of people already trust Cluster. Here\\u2019s why:\\r\\n\\r\\n\\u2022 Private", "= AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order) screenshot.save() @transaction.atomic def create_or_update_hosted_page(website, slug, body): hosted_page_titles =", "website.icon = None if website.logo: website.logo.decrement_ref_count() website.logo = None if website.background: website.background.decrement_ref_count() website.background", "space to share photos and memories with the people you choose, away from", "@transaction.atomic def delete_website(website): screenshots = list(website.screenshots.all()) for screenshot in screenshots: screenshot.image.decrement_ref_count() screenshot.delete() if", "appstore_fetch from backend.lk.models import AppWebsiteScreenshot from backend.lk.models import AppWebsitePage from backend.util import dnsutil", "as deleted instead of actually deleting potentially huge number of rows # AppWebsiteView.objects.filter(website_id=website.id).delete()", "= None if website.background: website.background.decrement_ref_count() website.background = None # TODO(Taylor): Mark as deleted", "the web.\\r\\n\\u2022 Relevant notifications: Know when people you invited post new things to", "= list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order')) screenshot_image_ids = set([i.id for i in screenshot_images]) screenshots_to_delete = [s", "page.save() elif not page and body: AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug]) elif page and", "set but incorrect' return True, None def _short_description(long_description): if not long_description: return long_description", "error_message = dnsutil.get_cname_for_domain(domain) if error_message: return False, error_message if cname != '%s.' %", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "} return example_website def get_fancy_cluster_example(): return { 'id': 'example', 'domain': 'cluster.co', 'template': '',", "'Cluster gives you a private space to share photos and memories with the", "comments, and chat!', 'longDescription': u'Cluster makes it possible to create private groups where", "notifications: Know when people you invited post new things to the group.', 'keywords':", "group can see what you post.\\r\\n\\u2022 An app for everyone: Access Cluster through", "post new things to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby', 'itunesId': '596595032', 'playStoreId': 'com.getcluster.android', 'supportLink':", "Know when people you invited post new things to the group.', 'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby',", "screenshot_images]) screenshots_to_delete = [s for s in existing_screenshots if s.image_id not in screenshot_image_ids]" ]
[ "config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async def wrapper(message, *args, **kwargs): if not", "def wrapper(message, *args, **kwargs): if not scoped_config.get(allow_games, message.channel): raise CommandError( \"Games aren't allowed", "`yes` for the channel or server.\") if not message.channel.is_private: return await f(message, *args,", "scoped=True, private=False) short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y %I:%M %p %Z\",", "private=False) short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y %I:%M %p %Z\", comment=\"Short", "import scoped_config allow_games = config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether to allow game functions\",", "might be used by plugins.\"\"\" from functools import wraps from plumeria import config", "raise CommandError( \"Games aren't allowed here! Enable games by setting the `common/allow_games` config", "plumeria.command import CommandError from plumeria.config.types import boolstr, dateformatstr from plumeria.core.scoped_config import scoped_config allow_games", "type=boolstr, fallback=False, comment=\"Whether to allow NSFW functions\", scoped=True, private=False) short_date_time_format = config.create(\"common\", \"date_time_short\",", "and time format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async def", "game functions\", scoped=True, private=False) nsfw = config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether to allow", "message.channel): raise CommandError( \"Games aren't allowed here! Enable games by setting the `common/allow_games`", "of common configuration options that might be used by plugins.\"\"\" from functools import", "type=dateformatstr, fallback=\"%b %m, %Y %I:%M %p %Z\", comment=\"Short date and time format\", scoped=True,", "\"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y %I:%M %p %Z\", comment=\"Short date and time format\",", "scoped_config.get(allow_games, message.channel): raise CommandError( \"Games aren't allowed here! Enable games by setting the", "configuration options that might be used by plugins.\"\"\" from functools import wraps from", "config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether to allow NSFW functions\", scoped=True, private=False) short_date_time_format =", "**kwargs): if not scoped_config.get(allow_games, message.channel): raise CommandError( \"Games aren't allowed here! Enable games", "setting to `yes` for the channel or server.\") if not message.channel.is_private: return await", "*args, **kwargs): if not scoped_config.get(allow_games, message.channel): raise CommandError( \"Games aren't allowed here! Enable", "functions\", scoped=True, private=False) short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y %I:%M %p", "the `common/allow_games` config setting to `yes` for the channel or server.\") if not", "fallback=\"%b %m, %Y %I:%M %p %Z\", comment=\"Short date and time format\", scoped=True, private=False)", "%p %Z\", comment=\"Short date and time format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def", "from functools import wraps from plumeria import config from plumeria.command import CommandError from", "= config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y %I:%M %p %Z\", comment=\"Short date and", "time format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async def wrapper(message,", "aren't allowed here! Enable games by setting the `common/allow_games` config setting to `yes`", "config setting to `yes` for the channel or server.\") if not message.channel.is_private: return", "to `yes` for the channel or server.\") if not message.channel.is_private: return await f(message,", "NSFW functions\", scoped=True, private=False) short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y %I:%M", "nsfw = config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether to allow NSFW functions\", scoped=True, private=False)", "from plumeria.core.scoped_config import scoped_config allow_games = config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether to allow", "\"\"\"A list of common configuration options that might be used by plugins.\"\"\" from", "plumeria import config from plumeria.command import CommandError from plumeria.config.types import boolstr, dateformatstr from", "setting the `common/allow_games` config setting to `yes` for the channel or server.\") if", "\"Games aren't allowed here! Enable games by setting the `common/allow_games` config setting to", "wraps from plumeria import config from plumeria.command import CommandError from plumeria.config.types import boolstr,", "import CommandError from plumeria.config.types import boolstr, dateformatstr from plumeria.core.scoped_config import scoped_config allow_games =", "%Z\", comment=\"Short date and time format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f):", "games by setting the `common/allow_games` config setting to `yes` for the channel or", "to allow game functions\", scoped=True, private=False) nsfw = config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether", "from plumeria import config from plumeria.command import CommandError from plumeria.config.types import boolstr, dateformatstr", "from plumeria.command import CommandError from plumeria.config.types import boolstr, dateformatstr from plumeria.core.scoped_config import scoped_config", "config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async def wrapper(message, *args, **kwargs): if not scoped_config.get(allow_games,", "channel or server.\") if not message.channel.is_private: return await f(message, *args, **kwargs) return wrapper", "def games_allowed_only(f): @wraps(f) async def wrapper(message, *args, **kwargs): if not scoped_config.get(allow_games, message.channel): raise", "date and time format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async", "by setting the `common/allow_games` config setting to `yes` for the channel or server.\")", "common configuration options that might be used by plugins.\"\"\" from functools import wraps", "by plugins.\"\"\" from functools import wraps from plumeria import config from plumeria.command import", "scoped_config allow_games = config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether to allow game functions\", scoped=True,", "comment=\"Whether to allow game functions\", scoped=True, private=False) nsfw = config.create(\"common\", \"nsfw\", type=boolstr, fallback=False,", "that might be used by plugins.\"\"\" from functools import wraps from plumeria import", "from plumeria.config.types import boolstr, dateformatstr from plumeria.core.scoped_config import scoped_config allow_games = config.create(\"common\", \"allow_games\",", "config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether to allow game functions\", scoped=True, private=False) nsfw =", "functions\", scoped=True, private=False) nsfw = config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether to allow NSFW", "= config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether to allow game functions\", scoped=True, private=False) nsfw", "be used by plugins.\"\"\" from functools import wraps from plumeria import config from", "CommandError from plumeria.config.types import boolstr, dateformatstr from plumeria.core.scoped_config import scoped_config allow_games = config.create(\"common\",", "plumeria.config.types import boolstr, dateformatstr from plumeria.core.scoped_config import scoped_config allow_games = config.create(\"common\", \"allow_games\", type=boolstr,", "%I:%M %p %Z\", comment=\"Short date and time format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format)", "import boolstr, dateformatstr from plumeria.core.scoped_config import scoped_config allow_games = config.create(\"common\", \"allow_games\", type=boolstr, fallback=False,", "comment=\"Whether to allow NSFW functions\", scoped=True, private=False) short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b", "scoped=True, private=False) nsfw = config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether to allow NSFW functions\",", "format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async def wrapper(message, *args,", "here! Enable games by setting the `common/allow_games` config setting to `yes` for the", "= config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether to allow NSFW functions\", scoped=True, private=False) short_date_time_format", "%m, %Y %I:%M %p %Z\", comment=\"Short date and time format\", scoped=True, private=False) config.add(allow_games)", "games_allowed_only(f): @wraps(f) async def wrapper(message, *args, **kwargs): if not scoped_config.get(allow_games, message.channel): raise CommandError(", "CommandError( \"Games aren't allowed here! Enable games by setting the `common/allow_games` config setting", "allow_games = config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether to allow game functions\", scoped=True, private=False)", "import wraps from plumeria import config from plumeria.command import CommandError from plumeria.config.types import", "if not scoped_config.get(allow_games, message.channel): raise CommandError( \"Games aren't allowed here! Enable games by", "`common/allow_games` config setting to `yes` for the channel or server.\") if not message.channel.is_private:", "functools import wraps from plumeria import config from plumeria.command import CommandError from plumeria.config.types", "fallback=False, comment=\"Whether to allow game functions\", scoped=True, private=False) nsfw = config.create(\"common\", \"nsfw\", type=boolstr,", "used by plugins.\"\"\" from functools import wraps from plumeria import config from plumeria.command", "config from plumeria.command import CommandError from plumeria.config.types import boolstr, dateformatstr from plumeria.core.scoped_config import", "scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async def wrapper(message, *args, **kwargs):", "not scoped_config.get(allow_games, message.channel): raise CommandError( \"Games aren't allowed here! Enable games by setting", "\"allow_games\", type=boolstr, fallback=False, comment=\"Whether to allow game functions\", scoped=True, private=False) nsfw = config.create(\"common\",", "fallback=False, comment=\"Whether to allow NSFW functions\", scoped=True, private=False) short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr,", "config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y %I:%M %p %Z\", comment=\"Short date and time", "config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async def wrapper(message, *args, **kwargs): if not scoped_config.get(allow_games, message.channel):", "plugins.\"\"\" from functools import wraps from plumeria import config from plumeria.command import CommandError", "to allow NSFW functions\", scoped=True, private=False) short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m,", "@wraps(f) async def wrapper(message, *args, **kwargs): if not scoped_config.get(allow_games, message.channel): raise CommandError( \"Games", "plumeria.core.scoped_config import scoped_config allow_games = config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether to allow game", "for the channel or server.\") if not message.channel.is_private: return await f(message, *args, **kwargs)", "import config from plumeria.command import CommandError from plumeria.config.types import boolstr, dateformatstr from plumeria.core.scoped_config", "the channel or server.\") if not message.channel.is_private: return await f(message, *args, **kwargs) return", "<filename>plumeria/config/common.py<gh_stars>10-100 \"\"\"A list of common configuration options that might be used by plugins.\"\"\"", "async def wrapper(message, *args, **kwargs): if not scoped_config.get(allow_games, message.channel): raise CommandError( \"Games aren't", "wrapper(message, *args, **kwargs): if not scoped_config.get(allow_games, message.channel): raise CommandError( \"Games aren't allowed here!", "boolstr, dateformatstr from plumeria.core.scoped_config import scoped_config allow_games = config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether", "Enable games by setting the `common/allow_games` config setting to `yes` for the channel", "comment=\"Short date and time format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f)", "private=False) config.add(allow_games) config.add(nsfw) config.add(short_date_time_format) def games_allowed_only(f): @wraps(f) async def wrapper(message, *args, **kwargs): if", "\"nsfw\", type=boolstr, fallback=False, comment=\"Whether to allow NSFW functions\", scoped=True, private=False) short_date_time_format = config.create(\"common\",", "options that might be used by plugins.\"\"\" from functools import wraps from plumeria", "%Y %I:%M %p %Z\", comment=\"Short date and time format\", scoped=True, private=False) config.add(allow_games) config.add(nsfw)", "short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y %I:%M %p %Z\", comment=\"Short date", "list of common configuration options that might be used by plugins.\"\"\" from functools", "dateformatstr from plumeria.core.scoped_config import scoped_config allow_games = config.create(\"common\", \"allow_games\", type=boolstr, fallback=False, comment=\"Whether to", "allow game functions\", scoped=True, private=False) nsfw = config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether to", "allow NSFW functions\", scoped=True, private=False) short_date_time_format = config.create(\"common\", \"date_time_short\", type=dateformatstr, fallback=\"%b %m, %Y", "allowed here! Enable games by setting the `common/allow_games` config setting to `yes` for", "private=False) nsfw = config.create(\"common\", \"nsfw\", type=boolstr, fallback=False, comment=\"Whether to allow NSFW functions\", scoped=True,", "type=boolstr, fallback=False, comment=\"Whether to allow game functions\", scoped=True, private=False) nsfw = config.create(\"common\", \"nsfw\"," ]
[ "d6_b = Dice.sum( [d6 @ c * 1, d6 @ c * 2,", "12), 6: Fraction(1, 12), }, Fraction(1), ) assert result == expected def test_sum_to_half():", "Dice.from_dice(6) for c in [ Fraction(1), Fraction(1, 2), Fraction(1, 6), ]: assert d6", "d6_b = Dice.sum([1 * d6, 2 * d6, 3 * d6, 4 *", "= Dice.from_external( { 1: Fraction(1, 3), 2: Fraction(1, 3), 3: Fraction(1, 3), 4:", "4), 3: Fraction(1, 4), 4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12),", "== d6_b def test_applyfunction(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda d: d6", "6), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 3), 5: Fraction(1, 3),", "}, Fraction(1), ) assert result == expected def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3)", "def test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1", "@ Fraction(1 / 2),] ) expected = Dice.from_external( { 1: Fraction(1, 4), 2:", "= Dice.from_dice(6) d6_a = Dice.sum(v * d6 for v, c in d6.items()) d6_b", "Dice.from_full( { 1: Fraction(1, 36), 2: Fraction(7, 36), 3: Fraction(7, 36), 4: Fraction(7,", "3: Fraction(1, 4), 4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12), },", "Fraction(7, 36), } ) assert result == expected assert result._total_chance == Fraction(1) def", "36), 2: Fraction(7, 36), 3: Fraction(7, 36), 4: Fraction(7, 36), 5: Fraction(7, 36),", "in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [d6 @ c *", "5, d6 @ c * 6,] ) assert d6_a == d6_b def test_applyfunction():", "d6_b def test_applyfunction(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda d: d6 @", "* 1, d6 @ c * 2, d6 @ c * 3, d6", "d6 @ c * 6,] ) assert d6_a == d6_b def test_applyfunction(): d6", "Fraction from dice_stats import Dice def test_totalchance(): d6 = Dice.from_dice(6) for c in", "= Dice.from_external( { 1: Fraction(1, 4), 2: Fraction(1, 4), 3: Fraction(1, 4), 4:", "== Fraction(1) def test_applyfunction_old(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda _: d6},", "d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [d6 @ c * 1,", "Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c for v, c in d6.items())", "5 * d6 @ c, 6 * d6 @ c,] ) assert d6_a", "Fraction(5, 18), 3: Fraction(5, 18), 4: Fraction(5, 18), 5: Fraction(1, 9), 6: Fraction(1,", "* d6 for v, c in d6.items()) d6_b = Dice.sum([1 * d6, 2", "d6_b assert d6_a._total_chance == Fraction(6) def test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v", "for v, c in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [1", ") expected = Dice.from_external( { 1: Fraction(5, 18), 2: Fraction(5, 18), 3: Fraction(5,", "for v, c in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [d6", "c * 1, d6 @ c * 2, d6 @ c * 3,", "Fraction(1, 3), 5: Fraction(1, 3), 6: Fraction(1, 3), }, Fraction(11, 6), ) assert", "test_nested_d6(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 for v, c in", "== expected assert result._total_chance == Fraction(11, 6) def test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,):", "expected assert result._total_chance == Fraction(1) def test_applyfunction_old(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,):", "Fraction(11, 6), ) assert result == expected assert result._total_chance == Fraction(11, 6) def", "d6 @ d}, lambda d: d) expected = Dice.from_full( { 1: Fraction(1, 36),", "* d6 @ c, 6 * d6 @ c,] ) assert d6_a ==", "* 3, d6 @ c * 4, d6 @ c * 5, d6", "Fraction(1), Fraction(1, 2), Fraction(1, 6), ]: assert d6 @ c * 2 ==", "= Dice.from_external( { 1: Fraction(5, 18), 2: Fraction(5, 18), 3: Fraction(5, 18), 4:", "d6 @ c * 5, d6 @ c * 6,] ) assert d6_a", "@ c * 2, d6 @ c * 3, d6 @ c *", "d6_a = Dice.sum(v * d6 @ c for v, c in d6.items()) c", ") assert result == expected assert result._total_chance == Fraction(1) def test_applyfunction_old(): d6 =", "@ Fraction(1 / 2),]) expected = Dice.from_external( { 1: Fraction(1, 3), 2: Fraction(1,", "assert result == expected assert result._total_chance == Fraction(11, 6) def test_applydice(): result =", "Dice.from_dice(3) @ Fraction(1 / 2),]) expected = Dice.from_external( { 1: Fraction(1, 3), 2:", "3, d6 @ c * 4, d6 @ c * 5, d6 @", "Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected = Dice.from_external( { 1: Fraction(5, 18), 2: Fraction(5,", "Fraction(5, 18), 2: Fraction(5, 18), 3: Fraction(5, 18), 4: Fraction(5, 18), 5: Fraction(1,", "== d6_b assert d6_a._total_chance == Fraction(6) def test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a =", "Fraction(1, 9), 6: Fraction(1, 9), }, Fraction(4, 3), ) assert result == expected", "@ c,] ) assert d6_a == d6_b assert d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared():", "5: Fraction(1, 3), 6: Fraction(1, 3), }, Fraction(11, 6), ) assert result ==", "d: d) expected = Dice.from_external( { 1: Fraction(1, 6), 2: Fraction(1, 3), 3:", "@ c, 4 * d6 @ c, 5 * d6 @ c, 6", "v, c in d6.items()) d6_b = Dice.sum([1 * d6, 2 * d6, 3", "c, 5 * d6 @ c, 6 * d6 @ c,] ) assert", "= d6.apply_functions({(1,): lambda _: d6}, lambda d: d) expected = Dice.from_external( { 1:", "expected def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),]) expected =", "= Dice.sum( [d6 @ c * 1, d6 @ c * 2, d6", "Fraction(1, 36), 2: Fraction(7, 36), 3: Fraction(7, 36), 4: Fraction(7, 36), 5: Fraction(7,", "3: Fraction(5, 18), 4: Fraction(5, 18), 5: Fraction(1, 9), 6: Fraction(1, 9), },", "c in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [1 * d6", "def test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c for", "Dice.sum(v * d6 @ c for v, c in d6.items()) c = Fraction(1,", "{ 1: Fraction(1, 3), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 6),", "d6}, lambda d: d) expected = Dice.from_external( { 1: Fraction(1, 6), 2: Fraction(1,", "d6_a == d6_b assert d6_a._total_chance == Fraction(6) def test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a", "3), ) assert result == expected def test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6) @", "/ 2),]) expected = Dice.from_external( { 1: Fraction(1, 3), 2: Fraction(1, 3), 3:", "* d6, 5 * d6, 6 * d6]) assert d6_a == d6_b assert", "[d6 @ c * 1, d6 @ c * 2, d6 @ c", "assert result == expected def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 /", "Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1 / 2),] ) expected = Dice.from_external( {", "Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 6), 5: Fraction(1, 6), 6: Fraction(1,", "= Dice.sum([1 * d6, 2 * d6, 3 * d6, 4 * d6,", "3), 6: Fraction(1, 3), }, Fraction(11, 6), ) assert result == expected assert", ") assert result == expected def test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6) @ Fraction(1", "lambda _: d6}, lambda d: d) expected = Dice.from_external( { 1: Fraction(1, 6),", "Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 3), 5: Fraction(1, 3), 6: Fraction(1,", "18), 2: Fraction(5, 18), 3: Fraction(5, 18), 4: Fraction(5, 18), 5: Fraction(1, 9),", "result = d6.apply_functions({(1,): lambda _: d6}, lambda d: d) expected = Dice.from_external( {", "2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 3), 5: Fraction(1, 3), 6:", "c * 3, d6 @ c * 4, d6 @ c * 5,", "result = Dice.sum( [Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1 / 2),]", "== 2 * d6 @ c def test_nested_d6(): d6 = Dice.from_dice(6) d6_a =", "Fraction(1, 3), 4: Fraction(1, 3), 5: Fraction(1, 3), 6: Fraction(1, 3), }, Fraction(11,", "4: Fraction(1, 6), 5: Fraction(1, 6), 6: Fraction(1, 6), }, Fraction(3, 2), )", "@ c, 6 * d6 @ c,] ) assert d6_a == d6_b assert", "d6.items()) d6_b = Dice.sum([1 * d6, 2 * d6, 3 * d6, 4", "result == expected def test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6) @ Fraction(1 / 2),", "* 6,] ) assert d6_a == d6_b def test_applyfunction(): d6 = Dice.from_dice(6) result", "assert result == expected assert result._total_chance == Fraction(1) def test_applyfunction_old(): d6 = Dice.from_dice(6)", "expected = Dice.from_external( { 1: Fraction(5, 18), 2: Fraction(5, 18), 3: Fraction(5, 18),", "= Dice.from_dice(6) for c in [ Fraction(1), Fraction(1, 2), Fraction(1, 6), ]: assert", "@ c, 5 * d6 @ c, 6 * d6 @ c,] )", "@ c * 4, d6 @ c * 5, d6 @ c *", "d) expected = Dice.from_external( { 1: Fraction(1, 6), 2: Fraction(1, 3), 3: Fraction(1,", "6) def test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected =", "d6 @ c * 2 == 2 * d6 @ c def test_nested_d6():", "assert d6_a == d6_b def test_applyfunction(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda", "result._total_chance == Fraction(11, 6) def test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6),", "def test_nested_d6(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 for v, c", "3), 3: Fraction(1, 3), 4: Fraction(1, 6), 5: Fraction(1, 6), 6: Fraction(1, 6),", "Dice.from_dice(6) result = d6.apply_functions({(1,): lambda _: d6}, lambda d: d) expected = Dice.from_external(", "c, 6 * d6 @ c,] ) assert d6_a == d6_b assert d6_a._total_chance", "expected = Dice.from_full( { 1: Fraction(1, 36), 2: Fraction(7, 36), 3: Fraction(7, 36),", "6), ) assert result == expected assert result._total_chance == Fraction(11, 6) def test_applydice():", "expected = Dice.from_external( { 1: Fraction(1, 6), 2: Fraction(1, 3), 3: Fraction(1, 3),", "18), 3: Fraction(5, 18), 4: Fraction(5, 18), 5: Fraction(1, 9), 6: Fraction(1, 9),", "* d6 @ c, 4 * d6 @ c, 5 * d6 @", "test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c for v,", "* d6, 4 * d6, 5 * d6, 6 * d6]) assert d6_a", "d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 for v, c in d6.items())", "1: Fraction(1, 36), 2: Fraction(7, 36), 3: Fraction(7, 36), 4: Fraction(7, 36), 5:", "Fraction(1 / 2),] ) expected = Dice.from_external( { 1: Fraction(1, 4), 2: Fraction(1,", "d6 @ c, 2 * d6 @ c, 3 * d6 @ c,", "3), 4: Fraction(1, 3), 5: Fraction(1, 3), 6: Fraction(1, 3), }, Fraction(11, 6),", "4 * d6 @ c, 5 * d6 @ c, 6 * d6", "result == expected def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),])", "def test_totalchance(): d6 = Dice.from_dice(6) for c in [ Fraction(1), Fraction(1, 2), Fraction(1,", "2: Fraction(1, 4), 3: Fraction(1, 4), 4: Fraction(1, 12), 5: Fraction(1, 12), 6:", "c * 5, d6 @ c * 6,] ) assert d6_a == d6_b", "Fraction(1, 4), 2: Fraction(1, 4), 3: Fraction(1, 4), 4: Fraction(1, 12), 5: Fraction(1,", "c def test_nested_d6(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 for v,", "d6 @ c, 6 * d6 @ c,] ) assert d6_a == d6_b", "Fraction(1, 3), 4: Fraction(1, 6), 5: Fraction(1, 6), 6: Fraction(1, 6), }, Fraction(3,", "@ d}, lambda d: d) expected = Dice.from_full( { 1: Fraction(1, 36), 2:", "d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [1 * d6 @ c,", "2 * d6 @ c, 3 * d6 @ c, 4 * d6", "d) expected = Dice.from_full( { 1: Fraction(1, 36), 2: Fraction(7, 36), 3: Fraction(7,", "Fraction(1) def test_applyfunction_old(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda _: d6}, lambda", "Dice.sum( [Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1 / 2),] ) expected", "d6_a == d6_b def test_applyfunction(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda d:", "d6_b assert d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v", "4, d6 @ c * 5, d6 @ c * 6,] ) assert", "d6_a == d6_b assert d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a", ") assert d6_a == d6_b def test_applyfunction(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,):", "fractions import Fraction from dice_stats import Dice def test_totalchance(): d6 = Dice.from_dice(6) for", "2 * d6 @ c def test_nested_d6(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v", "36), } ) assert result == expected assert result._total_chance == Fraction(1) def test_applyfunction_old():", "d6_b = Dice.sum( [1 * d6 @ c, 2 * d6 @ c,", "Dice def test_totalchance(): d6 = Dice.from_dice(6) for c in [ Fraction(1), Fraction(1, 2),", "2, d6 @ c * 3, d6 @ c * 4, d6 @", "6) d6_b = Dice.sum( [1 * d6 @ c, 2 * d6 @", "* d6 @ c,] ) assert d6_a == d6_b assert d6_a._total_chance == Fraction(1)", "2), Dice.from_dice(3) @ Fraction(1 / 2),] ) expected = Dice.from_external( { 1: Fraction(1,", "/ 2), Dice.from_dice(3) @ Fraction(1 / 2),] ) expected = Dice.from_external( { 1:", "d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda _: d6}, lambda d: d) expected", "Dice.sum([1 * d6, 2 * d6, 3 * d6, 4 * d6, 5", "in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [1 * d6 @", "Fraction(1, 6), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 3), 5: Fraction(1,", "@ c * 6,] ) assert d6_a == d6_b def test_applyfunction(): d6 =", "expected = Dice.from_external( { 1: Fraction(1, 3), 2: Fraction(1, 3), 3: Fraction(1, 3),", "== expected assert result._total_chance == Fraction(1) def test_applyfunction_old(): d6 = Dice.from_dice(6) result =", "2 == 2 * d6 @ c def test_nested_d6(): d6 = Dice.from_dice(6) d6_a", "Fraction(1) def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c", "@ c * 3, d6 @ c * 4, d6 @ c *", "Dice.from_dice(6) result = d6.apply_functions({(1,): lambda d: d6 @ d}, lambda d: d) expected", "Fraction(5, 18), 4: Fraction(5, 18), 5: Fraction(1, 9), 6: Fraction(1, 9), }, Fraction(4,", "{ 1: Fraction(1, 6), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 3),", "d6.apply_functions({(1,): lambda _: d6}, lambda d: d) expected = Dice.from_external( { 1: Fraction(1,", "18), 5: Fraction(1, 9), 6: Fraction(1, 9), }, Fraction(4, 3), ) assert result", "== expected def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),]) expected", "5: Fraction(1, 6), 6: Fraction(1, 6), }, Fraction(3, 2), ) assert result ==", "def test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected = Dice.from_external(", "}, Fraction(11, 6), ) assert result == expected assert result._total_chance == Fraction(11, 6)", "== Fraction(6) def test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @", "Fraction(1, 6) d6_b = Dice.sum( [d6 @ c * 1, d6 @ c", "Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12), }, Fraction(1), ) assert result", "}, Fraction(4, 3), ) assert result == expected def test_sum_to_one(): result = Dice.sum(", "== d6_b assert d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a =", "{ 1: Fraction(5, 18), 2: Fraction(5, 18), 3: Fraction(5, 18), 4: Fraction(5, 18),", "Fraction(5, 18), 5: Fraction(1, 9), 6: Fraction(1, 9), }, Fraction(4, 3), ) assert", "import Fraction from dice_stats import Dice def test_totalchance(): d6 = Dice.from_dice(6) for c", "test_totalchance(): d6 = Dice.from_dice(6) for c in [ Fraction(1), Fraction(1, 2), Fraction(1, 6),", "test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected = Dice.from_external( {", "d: d6 @ d}, lambda d: d) expected = Dice.from_full( { 1: Fraction(1,", "c in d6.items()) d6_b = Dice.sum([1 * d6, 2 * d6, 3 *", "expected def test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3) @", ") assert d6_a == d6_b assert d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared(): d6 =", "2: Fraction(5, 18), 3: Fraction(5, 18), 4: Fraction(5, 18), 5: Fraction(1, 9), 6:", "Fraction(7, 36), 6: Fraction(7, 36), } ) assert result == expected assert result._total_chance", "d6]) assert d6_a == d6_b assert d6_a._total_chance == Fraction(6) def test_nested_d6_chance(): d6 =", "import Dice def test_totalchance(): d6 = Dice.from_dice(6) for c in [ Fraction(1), Fraction(1,", "5 * d6, 6 * d6]) assert d6_a == d6_b assert d6_a._total_chance ==", "d6, 3 * d6, 4 * d6, 5 * d6, 6 * d6])", "= Dice.sum( [Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1 / 2),] )", "1: Fraction(1, 3), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 6), 5:", "4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12), }, Fraction(1), ) assert", "]: assert d6 @ c * 2 == 2 * d6 @ c", "36), 5: Fraction(7, 36), 6: Fraction(7, 36), } ) assert result == expected", "d6_a = Dice.sum(v * d6 for v, c in d6.items()) d6_b = Dice.sum([1", "d6 @ c, 4 * d6 @ c, 5 * d6 @ c,", "* d6 @ c, 5 * d6 @ c, 6 * d6 @", "d6 @ c,] ) assert d6_a == d6_b assert d6_a._total_chance == Fraction(1) def", "assert d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v *", "3), 4: Fraction(1, 6), 5: Fraction(1, 6), 6: Fraction(1, 6), }, Fraction(3, 2),", "@ c * 1, d6 @ c * 2, d6 @ c *", ") assert result == expected def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1", "def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),]) expected = Dice.from_external(", "assert result == expected def test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6) @ Fraction(1 /", "Dice.from_external( { 1: Fraction(1, 4), 2: Fraction(1, 4), 3: Fraction(1, 4), 4: Fraction(1,", "{(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected = Dice.from_external( { 1: Fraction(5, 18), 2:", "d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda d: d6 @ d}, lambda d:", "9), 6: Fraction(1, 9), }, Fraction(4, 3), ) assert result == expected def", "* d6, 6 * d6]) assert d6_a == d6_b assert d6_a._total_chance == Fraction(6)", "* 2, d6 @ c * 3, d6 @ c * 4, d6", "from fractions import Fraction from dice_stats import Dice def test_totalchance(): d6 = Dice.from_dice(6)", "Fraction(1, 12), 6: Fraction(1, 12), }, Fraction(1), ) assert result == expected def", "c, 3 * d6 @ c, 4 * d6 @ c, 5 *", "d6 @ c def test_nested_d6(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6", "6,] ) assert d6_a == d6_b def test_applyfunction(): d6 = Dice.from_dice(6) result =", "3), }, Fraction(11, 6), ) assert result == expected assert result._total_chance == Fraction(11,", "d6 = Dice.from_dice(6) for c in [ Fraction(1), Fraction(1, 2), Fraction(1, 6), ]:", "= Fraction(1, 6) d6_b = Dice.sum( [1 * d6 @ c, 2 *", "2),]) expected = Dice.from_external( { 1: Fraction(1, 3), 2: Fraction(1, 3), 3: Fraction(1,", "4: Fraction(1, 3), 5: Fraction(1, 3), 6: Fraction(1, 3), }, Fraction(11, 6), )", "3), 3: Fraction(1, 3), 4: Fraction(1, 3), 5: Fraction(1, 3), 6: Fraction(1, 3),", "Fraction(6) def test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c", "test_applyfunction_old(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda _: d6}, lambda d: d)", "from dice_stats import Dice def test_totalchance(): d6 = Dice.from_dice(6) for c in [", "d6 for v, c in d6.items()) d6_b = Dice.sum([1 * d6, 2 *", "2),] ) expected = Dice.from_external( { 1: Fraction(1, 4), 2: Fraction(1, 4), 3:", ") expected = Dice.from_external( { 1: Fraction(1, 4), 2: Fraction(1, 4), 3: Fraction(1,", "Fraction(1, 4), 3: Fraction(1, 4), 4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1,", "3: Fraction(1, 3), 4: Fraction(1, 3), 5: Fraction(1, 3), 6: Fraction(1, 3), },", "= Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),]) expected = Dice.from_external( { 1: Fraction(1,", "[ Fraction(1), Fraction(1, 2), Fraction(1, 6), ]: assert d6 @ c * 2", "c * 2 == 2 * d6 @ c def test_nested_d6(): d6 =", "4), 4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12), }, Fraction(1), )", "assert d6_a._total_chance == Fraction(6) def test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v *", "d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c for v, c", "= Dice.sum(v * d6 for v, c in d6.items()) d6_b = Dice.sum([1 *", "= Dice.from_full( { 1: Fraction(1, 36), 2: Fraction(7, 36), 3: Fraction(7, 36), 4:", "Fraction(1, 3), }, Fraction(11, 6), ) assert result == expected assert result._total_chance ==", "3), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 6), 5: Fraction(1, 6),", "* 2 == 2 * d6 @ c def test_nested_d6(): d6 = Dice.from_dice(6)", "d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6", "Fraction(4, 3), ) assert result == expected def test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6)", "== Fraction(11, 6) def test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), )", "lambda d: d) expected = Dice.from_full( { 1: Fraction(1, 36), 2: Fraction(7, 36),", "dice_stats import Dice def test_totalchance(): d6 = Dice.from_dice(6) for c in [ Fraction(1),", "d6.apply_functions({(1,): lambda d: d6 @ d}, lambda d: d) expected = Dice.from_full( {", "Dice.from_external( { 1: Fraction(1, 3), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1,", "6), 5: Fraction(1, 6), 6: Fraction(1, 6), }, Fraction(3, 2), ) assert result", "* 5, d6 @ c * 6,] ) assert d6_a == d6_b def", "6), ]: assert d6 @ c * 2 == 2 * d6 @", "d6 @ c * 4, d6 @ c * 5, d6 @ c", "= Fraction(1, 6) d6_b = Dice.sum( [d6 @ c * 1, d6 @", "Dice.from_dice(6) d6_a = Dice.sum(v * d6 for v, c in d6.items()) d6_b =", "c in [ Fraction(1), Fraction(1, 2), Fraction(1, 6), ]: assert d6 @ c", "def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c for", "* 4, d6 @ c * 5, d6 @ c * 6,] )", "Fraction(1 / 2),]) expected = Dice.from_external( { 1: Fraction(1, 3), 2: Fraction(1, 3),", "= Dice.from_dice(6) result = d6.apply_functions({(1,): lambda d: d6 @ d}, lambda d: d)", "2 * d6, 3 * d6, 4 * d6, 5 * d6, 6", "* d6]) assert d6_a == d6_b assert d6_a._total_chance == Fraction(6) def test_nested_d6_chance(): d6", "= Dice.sum( [1 * d6 @ c, 2 * d6 @ c, 3", "{ 1: Fraction(1, 4), 2: Fraction(1, 4), 3: Fraction(1, 4), 4: Fraction(1, 12),", "Fraction(1, 6), 6: Fraction(1, 6), }, Fraction(3, 2), ) assert result == expected", "{ 1: Fraction(1, 36), 2: Fraction(7, 36), 3: Fraction(7, 36), 4: Fraction(7, 36),", "Dice.sum(v * d6 for v, c in d6.items()) d6_b = Dice.sum([1 * d6,", "6: Fraction(7, 36), } ) assert result == expected assert result._total_chance == Fraction(1)", "Fraction(7, 36), 3: Fraction(7, 36), 4: Fraction(7, 36), 5: Fraction(7, 36), 6: Fraction(7,", "Dice.sum( [1 * d6 @ c, 2 * d6 @ c, 3 *", "2), Fraction(1, 6), ]: assert d6 @ c * 2 == 2 *", "result._total_chance == Fraction(1) def test_applyfunction_old(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda _:", "* d6, 3 * d6, 4 * d6, 5 * d6, 6 *", "3: Fraction(1, 3), 4: Fraction(1, 6), 5: Fraction(1, 6), 6: Fraction(1, 6), },", "36), 4: Fraction(7, 36), 5: Fraction(7, 36), 6: Fraction(7, 36), } ) assert", "Fraction(7, 36), 5: Fraction(7, 36), 6: Fraction(7, 36), } ) assert result ==", "2)}, Dice.from_dice(6), ) expected = Dice.from_external( { 1: Fraction(5, 18), 2: Fraction(5, 18),", "c * 6,] ) assert d6_a == d6_b def test_applyfunction(): d6 = Dice.from_dice(6)", "c = Fraction(1, 6) d6_b = Dice.sum( [1 * d6 @ c, 2", "c in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [d6 @ c", "* d6, 2 * d6, 3 * d6, 4 * d6, 5 *", "lambda d: d6 @ d}, lambda d: d) expected = Dice.from_full( { 1:", "Fraction(1, 6), 5: Fraction(1, 6), 6: Fraction(1, 6), }, Fraction(3, 2), ) assert", "4 * d6, 5 * d6, 6 * d6]) assert d6_a == d6_b", "d: d) expected = Dice.from_full( { 1: Fraction(1, 36), 2: Fraction(7, 36), 3:", "[1 * d6 @ c, 2 * d6 @ c, 3 * d6", "Fraction(1, 4), 4: Fraction(1, 12), 5: Fraction(1, 12), 6: Fraction(1, 12), }, Fraction(1),", "assert d6 @ c * 2 == 2 * d6 @ c def", "@ Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1 / 2),] ) expected = Dice.from_external(", "* d6 @ c, 3 * d6 @ c, 4 * d6 @", "3 * d6 @ c, 4 * d6 @ c, 5 * d6", "test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c for v,", "* d6 @ c for v, c in d6.items()) c = Fraction(1, 6)", "5: Fraction(7, 36), 6: Fraction(7, 36), } ) assert result == expected assert", "} ) assert result == expected assert result._total_chance == Fraction(1) def test_applyfunction_old(): d6", "d6_a._total_chance == Fraction(6) def test_nested_d6_chance(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6", "v, c in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [1 *", "12), }, Fraction(1), ) assert result == expected def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6),", "Dice.from_external( { 1: Fraction(1, 6), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1,", "Fraction(7, 36), 4: Fraction(7, 36), 5: Fraction(7, 36), 6: Fraction(7, 36), } )", "c,] ) assert d6_a == d6_b assert d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared(): d6", "d}, lambda d: d) expected = Dice.from_full( { 1: Fraction(1, 36), 2: Fraction(7,", "6 * d6]) assert d6_a == d6_b assert d6_a._total_chance == Fraction(6) def test_nested_d6_chance():", "d6 @ c for v, c in d6.items()) c = Fraction(1, 6) d6_b", "c = Fraction(1, 6) d6_b = Dice.sum( [d6 @ c * 1, d6", "for v, c in d6.items()) d6_b = Dice.sum([1 * d6, 2 * d6,", "4: Fraction(5, 18), 5: Fraction(1, 9), 6: Fraction(1, 9), }, Fraction(4, 3), )", "Dice.sum( [d6 @ c * 1, d6 @ c * 2, d6 @", "3 * d6, 4 * d6, 5 * d6, 6 * d6]) assert", "= Dice.from_dice(6) d6_a = Dice.sum(v * d6 @ c for v, c in", "c * 4, d6 @ c * 5, d6 @ c * 6,]", "= Dice.sum(v * d6 @ c for v, c in d6.items()) c =", "6 * d6 @ c,] ) assert d6_a == d6_b assert d6_a._total_chance ==", "assert result._total_chance == Fraction(1) def test_applyfunction_old(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda", "== expected def test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3)", "3), 5: Fraction(1, 3), 6: Fraction(1, 3), }, Fraction(11, 6), ) assert result", "= d6.apply_functions({(1,): lambda d: d6 @ d}, lambda d: d) expected = Dice.from_full(", "test_applyfunction(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda d: d6 @ d}, lambda", "6: Fraction(1, 9), }, Fraction(4, 3), ) assert result == expected def test_sum_to_one():", "* d6 @ c def test_nested_d6(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v *", "d6, 6 * d6]) assert d6_a == d6_b assert d6_a._total_chance == Fraction(6) def", "Dice.from_dice(3) @ Fraction(1 / 2),] ) expected = Dice.from_external( { 1: Fraction(1, 4),", "6: Fraction(1, 12), }, Fraction(1), ) assert result == expected def test_sum_to_half(): result", "d6, 5 * d6, 6 * d6]) assert d6_a == d6_b assert d6_a._total_chance", "* d6 @ c, 2 * d6 @ c, 3 * d6 @", "= Dice.from_external( { 1: Fraction(1, 6), 2: Fraction(1, 3), 3: Fraction(1, 3), 4:", "_: d6}, lambda d: d) expected = Dice.from_external( { 1: Fraction(1, 6), 2:", "/ 2),] ) expected = Dice.from_external( { 1: Fraction(1, 4), 2: Fraction(1, 4),", "test_sum_to_one(): result = Dice.sum( [Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1 /", "18), 4: Fraction(5, 18), 5: Fraction(1, 9), 6: Fraction(1, 9), }, Fraction(4, 3),", "Fraction(1), ) assert result == expected def test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @", "@ c * 5, d6 @ c * 6,] ) assert d6_a ==", "[Dice.from_dice(6) @ Fraction(1 / 2), Dice.from_dice(3) @ Fraction(1 / 2),] ) expected =", "in [ Fraction(1), Fraction(1, 2), Fraction(1, 6), ]: assert d6 @ c *", "12), 5: Fraction(1, 12), 6: Fraction(1, 12), }, Fraction(1), ) assert result ==", "assert d6_a == d6_b assert d6_a._total_chance == Fraction(1) def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6)", "1: Fraction(5, 18), 2: Fraction(5, 18), 3: Fraction(5, 18), 4: Fraction(5, 18), 5:", "d6 @ c, 3 * d6 @ c, 4 * d6 @ c,", "@ c def test_nested_d6(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 for", "Fraction(1, 6), ]: assert d6 @ c * 2 == 2 * d6", "1: Fraction(1, 6), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 3), 5:", "1, d6 @ c * 2, d6 @ c * 3, d6 @", "result == expected assert result._total_chance == Fraction(1) def test_applyfunction_old(): d6 = Dice.from_dice(6) result", "6: Fraction(1, 3), }, Fraction(11, 6), ) assert result == expected assert result._total_chance", "Fraction(1, 3), 2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 6), 5: Fraction(1,", "d6, 2 * d6, 3 * d6, 4 * d6, 5 * d6,", "c, 4 * d6 @ c, 5 * d6 @ c, 6 *", "= Dice.from_dice(6) result = d6.apply_functions({(1,): lambda _: d6}, lambda d: d) expected =", "v, c in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum( [d6 @", "Dice.from_dice(6), ) expected = Dice.from_external( { 1: Fraction(5, 18), 2: Fraction(5, 18), 3:", "= Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected = Dice.from_external( { 1: Fraction(5,", "c, 2 * d6 @ c, 3 * d6 @ c, 4 *", "d6, 4 * d6, 5 * d6, 6 * d6]) assert d6_a ==", "assert d6_a == d6_b assert d6_a._total_chance == Fraction(6) def test_nested_d6_chance(): d6 = Dice.from_dice(6)", "result == expected assert result._total_chance == Fraction(11, 6) def test_applydice(): result = Dice.from_dice(3).apply_dice(", "Fraction(1, 2), Fraction(1, 6), ]: assert d6 @ c * 2 == 2", "Dice.from_external( { 1: Fraction(5, 18), 2: Fraction(5, 18), 3: Fraction(5, 18), 4: Fraction(5,", ") assert result == expected assert result._total_chance == Fraction(11, 6) def test_applydice(): result", "Fraction(1, 6) d6_b = Dice.sum( [1 * d6 @ c, 2 * d6", "Fraction(1, 3), 6: Fraction(1, 3), }, Fraction(11, 6), ) assert result == expected", "in d6.items()) d6_b = Dice.sum([1 * d6, 2 * d6, 3 * d6,", "for c in [ Fraction(1), Fraction(1, 2), Fraction(1, 6), ]: assert d6 @", "lambda d: d) expected = Dice.from_external( { 1: Fraction(1, 6), 2: Fraction(1, 3),", "Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),]) expected = Dice.from_external( { 1: Fraction(1, 3),", "5: Fraction(1, 12), 6: Fraction(1, 12), }, Fraction(1), ) assert result == expected", "3: Fraction(7, 36), 4: Fraction(7, 36), 5: Fraction(7, 36), 6: Fraction(7, 36), }", "@ c, 2 * d6 @ c, 3 * d6 @ c, 4", "== Fraction(1) def test_nested_d6_chance_squared(): d6 = Dice.from_dice(6) d6_a = Dice.sum(v * d6 @", "expected assert result._total_chance == Fraction(11, 6) def test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4,", "2: Fraction(1, 3), 3: Fraction(1, 3), 4: Fraction(1, 6), 5: Fraction(1, 6), 6:", "5: Fraction(1, 9), 6: Fraction(1, 9), }, Fraction(4, 3), ) assert result ==", "result = d6.apply_functions({(1,): lambda d: d6 @ d}, lambda d: d) expected =", "1: Fraction(1, 4), 2: Fraction(1, 4), 3: Fraction(1, 4), 4: Fraction(1, 12), 5:", "d6 @ c * 3, d6 @ c * 4, d6 @ c", "36), 3: Fraction(7, 36), 4: Fraction(7, 36), 5: Fraction(7, 36), 6: Fraction(7, 36),", "36), 6: Fraction(7, 36), } ) assert result == expected assert result._total_chance ==", "@ c, 3 * d6 @ c, 4 * d6 @ c, 5", "@ c * 2 == 2 * d6 @ c def test_nested_d6(): d6", "4), 2: Fraction(1, 4), 3: Fraction(1, 4), 4: Fraction(1, 12), 5: Fraction(1, 12),", "def test_applyfunction_old(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda _: d6}, lambda d:", "expected = Dice.from_external( { 1: Fraction(1, 4), 2: Fraction(1, 4), 3: Fraction(1, 4),", "c * 2, d6 @ c * 3, d6 @ c * 4,", "d6 @ c * 2, d6 @ c * 3, d6 @ c", "result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),]) expected = Dice.from_external( { 1:", "Fraction(11, 6) def test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected", "9), }, Fraction(4, 3), ) assert result == expected def test_sum_to_one(): result =", "test_sum_to_half(): result = Dice.sum([Dice.from_dice(6), Dice.from_dice(3) @ Fraction(1 / 2),]) expected = Dice.from_external( {", "6) d6_b = Dice.sum( [d6 @ c * 1, d6 @ c *", "assert result._total_chance == Fraction(11, 6) def test_applydice(): result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)},", "d6 @ c, 5 * d6 @ c, 6 * d6 @ c,]", "Fraction(1, 12), }, Fraction(1), ) assert result == expected def test_sum_to_half(): result =", "2: Fraction(7, 36), 3: Fraction(7, 36), 4: Fraction(7, 36), 5: Fraction(7, 36), 6:", "def test_applyfunction(): d6 = Dice.from_dice(6) result = d6.apply_functions({(1,): lambda d: d6 @ d},", "result = Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected = Dice.from_external( { 1:", "4: Fraction(7, 36), 5: Fraction(7, 36), 6: Fraction(7, 36), } ) assert result", "@ c for v, c in d6.items()) c = Fraction(1, 6) d6_b =", "Fraction(1, 9), }, Fraction(4, 3), ) assert result == expected def test_sum_to_one(): result", "c for v, c in d6.items()) c = Fraction(1, 6) d6_b = Dice.sum(", "Dice.from_dice(3).apply_dice( {(1,): Dice.from_dice(4, 2)}, Dice.from_dice(6), ) expected = Dice.from_external( { 1: Fraction(5, 18)," ]
[ "create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client import new_client molecule = new_client.molecule def get_smiles(_id): mol", "to fetch smiles using chembl id # Source: https://www.ebi.ac.uk/chembl/ws # For monkey patching", "Source: https://www.ebi.ac.uk/chembl/ws # For monkey patching (necessary?) # import gevent.monkey # gevent.monkey.patch_all() #", "# create_urllib3_context() from chembl_webresource_client.new_client import new_client molecule = new_client.molecule def get_smiles(_id): mol =", "from chembl_webresource_client.new_client import new_client molecule = new_client.molecule def get_smiles(_id): mol = molecule.get(_id) return", "chembl_webresource_client.new_client import new_client molecule = new_client.molecule def get_smiles(_id): mol = molecule.get(_id) return mol[\"molecule_structures\"][\"canonical_smiles\"]", "import gevent.monkey # gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client", "Script to fetch smiles using chembl id # Source: https://www.ebi.ac.uk/chembl/ws # For monkey", "(necessary?) # import gevent.monkey # gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context()", "id # Source: https://www.ebi.ac.uk/chembl/ws # For monkey patching (necessary?) # import gevent.monkey #", "<gh_stars>10-100 # Script to fetch smiles using chembl id # Source: https://www.ebi.ac.uk/chembl/ws #", "# Script to fetch smiles using chembl id # Source: https://www.ebi.ac.uk/chembl/ws # For", "# gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client import new_client", "# import gevent.monkey # gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context() from", "monkey patching (necessary?) # import gevent.monkey # gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import create_urllib3_context", "gevent.monkey # gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client import", "patching (necessary?) # import gevent.monkey # gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import create_urllib3_context #", "fetch smiles using chembl id # Source: https://www.ebi.ac.uk/chembl/ws # For monkey patching (necessary?)", "# Source: https://www.ebi.ac.uk/chembl/ws # For monkey patching (necessary?) # import gevent.monkey # gevent.monkey.patch_all()", "For monkey patching (necessary?) # import gevent.monkey # gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import", "# from requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client import new_client molecule =", "import new_client molecule = new_client.molecule def get_smiles(_id): mol = molecule.get(_id) return mol[\"molecule_structures\"][\"canonical_smiles\"] print(get_smiles(\"CHEMBL300797\"))", "create_urllib3_context() from chembl_webresource_client.new_client import new_client molecule = new_client.molecule def get_smiles(_id): mol = molecule.get(_id)", "https://www.ebi.ac.uk/chembl/ws # For monkey patching (necessary?) # import gevent.monkey # gevent.monkey.patch_all() # from", "chembl id # Source: https://www.ebi.ac.uk/chembl/ws # For monkey patching (necessary?) # import gevent.monkey", "smiles using chembl id # Source: https://www.ebi.ac.uk/chembl/ws # For monkey patching (necessary?) #", "# For monkey patching (necessary?) # import gevent.monkey # gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_", "gevent.monkey.patch_all() # from requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client import new_client molecule", "requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client import new_client molecule = new_client.molecule def", "using chembl id # Source: https://www.ebi.ac.uk/chembl/ws # For monkey patching (necessary?) # import", "from requests.packages.urllib3.util.ssl_ import create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client import new_client molecule = new_client.molecule", "import create_urllib3_context # create_urllib3_context() from chembl_webresource_client.new_client import new_client molecule = new_client.molecule def get_smiles(_id):" ]
[ "Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the", "writing, software # distributed under the License is distributed on an \"AS IS\"", "tensorflow as tf from tensorflow.python.platform import gfile from sqlflow_submitter import db import os", "as tf from tensorflow.python.platform import gfile from sqlflow_submitter import db import os def", "KIND, either express or implied. # See the License for the specific language", "permissions and # limitations under the License. import io import pickle import tarfile", "Unless required by applicable law or agreed to in writing, software # distributed", "import gfile from sqlflow_submitter import db import os def save(oss_model_dir, *meta): ''' Save", "len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) reader", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "License. # You may obtain a copy of the License at # #", "A list contains the saved python objects ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts)", "using pickle. Args: oss_model_dir: OSS URI that the model will be saved to.", "are saved by `model.save` from a MaxCompute table Args: oss_model_dir: OSS URI that", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "a MaxCompute table Args: oss_model_dir: OSS URI that the model will be saved", "pickle. Args: oss_model_dir: OSS URI that the model will be saved to. *meta:", "be saved. Return: None ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise", "tensorflow.python.platform import gfile from sqlflow_submitter import db import os def save(oss_model_dir, *meta): '''", "and metadata that are saved by `model.save` from a MaxCompute table Args: oss_model_dir:", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "def load(oss_model_dir): ''' Load and restore a directory and metadata that are saved", "table Args: oss_model_dir: OSS URI that the model will be saved to. Return:", "will be saved to. *meta: python objects to be saved. Return: None '''", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "Return: None ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir:", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir)", "specific language governing permissions and # limitations under the License. import io import", "model will be saved to. Return: A list contains the saved python objects", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "saved by `model.save` from a MaxCompute table Args: oss_model_dir: OSS URI that the", "# Copyright 2019 The SQLFlow Authors. All rights reserved. # Licensed under the", "ANY KIND, either express or implied. # See the License for the specific", "saved python objects ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error", "training SQL statements to OSS directory. Data are saved using pickle. Args: oss_model_dir:", "from a MaxCompute table Args: oss_model_dir: OSS URI that the model will be", "Args: oss_model_dir: OSS URI that the model will be saved to. Return: A", "import os def save(oss_model_dir, *meta): ''' Save model descriptions like the training SQL", "objects ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \",", "load(oss_model_dir): ''' Load and restore a directory and metadata that are saved by", "in compliance with the License. # You may obtain a copy of the", "directory and metadata that are saved by `model.save` from a MaxCompute table Args:", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "import io import pickle import tarfile import odps import tensorflow as tf from", "oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close()", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "directory. Data are saved using pickle. Args: oss_model_dir: OSS URI that the model", "use this file except in compliance with the License. # You may obtain", "oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"),", "and restore a directory and metadata that are saved by `model.save` from a", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "2019 The SQLFlow Authors. All rights reserved. # Licensed under the Apache License,", "raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w')", "not use this file except in compliance with the License. # You may", "= oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "!= 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer =", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "governing permissions and # limitations under the License. import io import pickle import", "that are saved by `model.save` from a MaxCompute table Args: oss_model_dir: OSS URI", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "under the License. import io import pickle import tarfile import odps import tensorflow", "language governing permissions and # limitations under the License. import io import pickle", "URI that the model will be saved to. Return: A list contains the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "writer.flush() writer.close() def load(oss_model_dir): ''' Load and restore a directory and metadata that", "io import pickle import tarfile import odps import tensorflow as tf from tensorflow.python.platform", "import odps import tensorflow as tf from tensorflow.python.platform import gfile from sqlflow_submitter import", "OF ANY KIND, either express or implied. # See the License for the", "oss_model_dir: OSS URI that the model will be saved to. *meta: python objects", "objects to be saved. Return: None ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) !=", "Copyright 2019 The SQLFlow Authors. All rights reserved. # Licensed under the Apache", "2.0 (the \"License\"); # you may not use this file except in compliance", "the model will be saved to. *meta: python objects to be saved. Return:", "that the model will be saved to. Return: A list contains the saved", "ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta),", "rights reserved. # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# you may not use this file except in compliance with the License.", "odps import tensorflow as tf from tensorflow.python.platform import gfile from sqlflow_submitter import db", "agreed to in writing, software # distributed under the License is distributed on", "MaxCompute table Args: oss_model_dir: OSS URI that the model will be saved to.", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "def save(oss_model_dir, *meta): ''' Save model descriptions like the training SQL statements to", "oss_model_dir: OSS URI that the model will be saved to. Return: A list", "(the \"License\"); # you may not use this file except in compliance with", "like the training SQL statements to OSS directory. Data are saved using pickle.", "descriptions like the training SQL statements to OSS directory. Data are saved using", "2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) reader = gfile.GFile(oss_path,", "import tarfile import odps import tensorflow as tf from tensorflow.python.platform import gfile from", "restore a directory and metadata that are saved by `model.save` from a MaxCompute", "= gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close() def load(oss_model_dir): ''' Load and restore", "# # Unless required by applicable law or agreed to in writing, software", "*meta: python objects to be saved. Return: None ''' uri_parts = oss_model_dir.split(\"?\") if", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "SQLFlow Authors. All rights reserved. # Licensed under the Apache License, Version 2.0", "mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close() def load(oss_model_dir): ''' Load and restore a directory", "Data are saved using pickle. Args: oss_model_dir: OSS URI that the model will", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "saved using pickle. Args: oss_model_dir: OSS URI that the model will be saved", "contains the saved python objects ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2:", "# limitations under the License. import io import pickle import tarfile import odps", "either express or implied. # See the License for the specific language governing", "saved to. *meta: python objects to be saved. Return: None ''' uri_parts =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "tf from tensorflow.python.platform import gfile from sqlflow_submitter import db import os def save(oss_model_dir,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "sqlflow_submitter import db import os def save(oss_model_dir, *meta): ''' Save model descriptions like", "list contains the saved python objects ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) !=", "writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close() def load(oss_model_dir): ''' Load and", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "''' Load and restore a directory and metadata that are saved by `model.save`", "2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path,", "None ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \",", "file except in compliance with the License. # You may obtain a copy", "a directory and metadata that are saved by `model.save` from a MaxCompute table", "import db import os def save(oss_model_dir, *meta): ''' Save model descriptions like the", "SQL statements to OSS directory. Data are saved using pickle. Args: oss_model_dir: OSS", "URI that the model will be saved to. *meta: python objects to be", "the specific language governing permissions and # limitations under the License. import io", "*meta): ''' Save model descriptions like the training SQL statements to OSS directory.", "oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close() def", "reserved. # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writer) writer.flush() writer.close() def load(oss_model_dir): ''' Load and restore a directory and metadata", "be saved to. *meta: python objects to be saved. Return: None ''' uri_parts", "the training SQL statements to OSS directory. Data are saved using pickle. Args:", "= \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close() def load(oss_model_dir):", "`model.save` from a MaxCompute table Args: oss_model_dir: OSS URI that the model will", "the saved python objects ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise", "saved. Return: None ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error", "Save model descriptions like the training SQL statements to OSS directory. Data are", "to. *meta: python objects to be saved. Return: None ''' uri_parts = oss_model_dir.split(\"?\")", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "Args: oss_model_dir: OSS URI that the model will be saved to. *meta: python", "OSS URI that the model will be saved to. Return: A list contains", "to OSS directory. Data are saved using pickle. Args: oss_model_dir: OSS URI that", "License for the specific language governing permissions and # limitations under the License.", "uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Load and restore a directory and metadata that are saved by `model.save` from", "gfile from sqlflow_submitter import db import os def save(oss_model_dir, *meta): ''' Save model", "the License. # You may obtain a copy of the License at #", "writer.close() def load(oss_model_dir): ''' Load and restore a directory and metadata that are", "model descriptions like the training SQL statements to OSS directory. Data are saved", "to in writing, software # distributed under the License is distributed on an", "will be saved to. Return: A list contains the saved python objects '''", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer", "metadata that are saved by `model.save` from a MaxCompute table Args: oss_model_dir: OSS", "the model will be saved to. Return: A list contains the saved python", "OSS URI that the model will be saved to. *meta: python objects to", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "are saved using pickle. Args: oss_model_dir: OSS URI that the model will be", "implied. # See the License for the specific language governing permissions and #", "oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) reader = gfile.GFile(oss_path, mode='r') return pickle.load(reader)", "\"License\"); # you may not use this file except in compliance with the", "License. import io import pickle import tarfile import odps import tensorflow as tf", "python objects to be saved. Return: None ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts)", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "db import os def save(oss_model_dir, *meta): ''' Save model descriptions like the training", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "statements to OSS directory. Data are saved using pickle. Args: oss_model_dir: OSS URI", "required by applicable law or agreed to in writing, software # distributed under", "by `model.save` from a MaxCompute table Args: oss_model_dir: OSS URI that the model", "pickle import tarfile import odps import tensorflow as tf from tensorflow.python.platform import gfile", "The SQLFlow Authors. All rights reserved. # Licensed under the Apache License, Version", "ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) reader = gfile.GFile(oss_path, mode='r') return", "to be saved. Return: None ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2:", "applicable law or agreed to in writing, software # distributed under the License", "that the model will be saved to. *meta: python objects to be saved.", "import tensorflow as tf from tensorflow.python.platform import gfile from sqlflow_submitter import db import", "OSS directory. Data are saved using pickle. Args: oss_model_dir: OSS URI that the", "os def save(oss_model_dir, *meta): ''' Save model descriptions like the training SQL statements", "''' Save model descriptions like the training SQL statements to OSS directory. Data", "if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"])", "from sqlflow_submitter import db import os def save(oss_model_dir, *meta): ''' Save model descriptions", "be saved to. Return: A list contains the saved python objects ''' uri_parts", "python objects ''' uri_parts = oss_model_dir.split(\"?\") if len(uri_parts) != 2: raise ValueError(\"error oss_model_dir:", "or agreed to in writing, software # distributed under the License is distributed", "limitations under the License. import io import pickle import tarfile import odps import", "\"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close() def load(oss_model_dir): '''", "or implied. # See the License for the specific language governing permissions and", "raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) reader = gfile.GFile(oss_path, mode='r')", "saved to. Return: A list contains the saved python objects ''' uri_parts =", "from tensorflow.python.platform import gfile from sqlflow_submitter import db import os def save(oss_model_dir, *meta):", "tarfile import odps import tensorflow as tf from tensorflow.python.platform import gfile from sqlflow_submitter", "Return: A list contains the saved python objects ''' uri_parts = oss_model_dir.split(\"?\") if", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer)", "!= 2: raise ValueError(\"error oss_model_dir: \", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) reader =", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "and # limitations under the License. import io import pickle import tarfile import", "model will be saved to. *meta: python objects to be saved. Return: None", "with the License. # You may obtain a copy of the License at", "\"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close() def load(oss_model_dir): ''' Load", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "All rights reserved. # Licensed under the Apache License, Version 2.0 (the \"License\");", "the License. import io import pickle import tarfile import odps import tensorflow as", "gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush() writer.close() def load(oss_model_dir): ''' Load and restore a", "to. Return: A list contains the saved python objects ''' uri_parts = oss_model_dir.split(\"?\")", "save(oss_model_dir, *meta): ''' Save model descriptions like the training SQL statements to OSS", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "pickle.dump(list(meta), writer) writer.flush() writer.close() def load(oss_model_dir): ''' Load and restore a directory and", "\", oss_model_dir) oss_path = \"/\".join([uri_parts[0].rstrip(\"/\"), \"sqlflow_model_desc\"]) writer = gfile.GFile(oss_path, mode='w') pickle.dump(list(meta), writer) writer.flush()", "import pickle import tarfile import odps import tensorflow as tf from tensorflow.python.platform import" ]
[ "os.path.dirname(os.path.dirname(path)) else: path = '' return path def build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir,", "[]): command += ['-I', os.path.join(library_dir, path)] for path in config.get('dsoLibdir', []): command +=", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "section.add_variable('PYTHONPATH', '&') # Create or update the section for this library. directory =", "'&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') # Create or update the section for this", "this library. directory = os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' + config['libraryName']) if not section:", "path)] for lib in config.get('dsoLibs', []): command += ['-l', lib] command += ['-i',", "dso_dir = os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files = [] for name", "not section: section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically generated by", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "= False print('Done.') return len(files), ok class InstallError(Exception): pass class NotALibraryError(InstallError): pass class", "default paths do not get messed up. section = env.get_named_section('DEFAULT') if not section:", "[] for name in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if ext in ('.c', '.cc',", "= subprocess.call(current_command, cwd=dso_dir) if res != 0: print('Error: hcustom failed with exit code',", "section = env.get_named_section('DEFAULT') if not section: section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear()", "the Software without restriction, including without limitation the rights # to use, copy,", "envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result def load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json') if not", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "for path in config.get('dsoLibdir', []): command += ['-L', os.path.join(library_dir, path)] for lib in", "sees # # the ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') #", "person obtaining a copy # of this software and associated documentation files (the", "'nt' else 'bin/hcustom') library_dir = os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource',", "path def build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name == 'nt' else", "files = [] for name in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if ext in", "\"{}\" ...'.format(config['libraryName'])) ok = True for filename in files: current_command = command +", "path)] for path in config.get('dsoLibdir', []): command += ['-L', os.path.join(library_dir, path)] for lib", "+= ['-L', os.path.join(library_dir, path)] for lib in config.get('dsoLibs', []): command += ['-l', lib]", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "'.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if not files: return 0, True command = [hcustom]", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "info in HOUDINI_PATH_ENVVARS: # # Houdini will use the default value of the", "now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__ # Initialize the default section. It's", "in os.listdir(directory): envfile = os.path.join(directory, name, 'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile))", "in config.get('dsoLibs', []): command += ['-l', lib] command += ['-i', dso_dir] print('Building DSOs", "above copyright notice and this permission notice shall be included in # all", "if install_dir: return install_dir if os.name == 'nt': import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT,", "library:') for line in config['environment']: section.add_line(line) def remove_library(env, name): section = env.get_library(name) if", "if not info['dir']: continue # vardir = os.path.join(directory, info['dir']) # if not os.path.isdir(vardir):", "os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment", "in HOUDINI_PATH_ENVVARS: # if not info['dir']: continue # vardir = os.path.join(directory, info['dir']) #", "files.append(os.path.join(dso_source, name)) if not files: return 0, True command = [hcustom] if config.get('dsoDebug'):", "'') else: previous = True if not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically", "not os.path.isdir(dso_dir): os.makedirs(dso_dir) files = [] for name in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower()", "SOFTWARE. import datetime import json import os import operator import shlex import subprocess", "overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update:", "is hereby granted, free of charge, to any person obtaining a copy #", "persons to whom the Software is # furnished to do so, subject to", "{}'.format(config_file)) with open(config_file) as fp: return json.load(fp) def install_library(env, directory, overwrite=False): # Open", "= env.get_named_section('DEFAULT') if not section: section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear() section.add_comment('", "config.get('houdiniapp') if install_dir: return install_dir if os.name == 'nt': import winreg key =", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "for lib in config.get('dsoLibs', []): command += ['-l', lib] command += ['-i', dso_dir]", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables specified by the library:') for line in config['environment']:", "(C) 2017 <NAME> # # Permission is hereby granted, free of charge, to", "section = env.get_library(name) if section: env.remove_section(section) return True return False def get_houdini_application_dir(): install_dir", "to permit persons to whom the Software is # furnished to do so,", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "Create or update the section for this library. directory = os.path.normpath(os.path.abspath(directory)) section =", "[filename] print() print(' {} ...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command, cwd=dso_dir) if res !=", "notice and this permission notice shall be included in # all copies or", "of charge, to any person obtaining a copy # of this software and", "DEALINGS IN # THE SOFTWARE. import datetime import json import os import operator", "update the section for this library. directory = os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' +", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import datetime", "operator import shlex import subprocess from . import __version__ from .config import config", "section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_' +", "hou + '/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if not os.path.isdir(directory):", "config.get('houdinienv', 'houdini16.0') if not '/' in hou and not os.sep in hou: hou", "library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name == 'nt' else 'bin/hcustom') library_dir =", "env.get_named_section('library:' + config['libraryName']) if not section: previous = False section = env.add_named_section('library:' +", "__version__ # Initialize the default section. It's purpose is to make sure that", "section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables specified by the library:') for", "and this permission notice shall be included in # all copies or substantial", "lib in config.get('dsoLibs', []): command += ['-l', lib] command += ['-i', dso_dir] print('Building", "= env.get_named_section('library:' + config['libraryName']) if not section: previous = False section = env.add_named_section('library:'", "section = env.get_named_section('library:' + config['libraryName']) if not section: previous = False section =", "so, subject to the following conditions: # # The above copyright notice and", "shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path)) else: path = '' return path def build_dso(hou_app_dir,", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "# Open the librarie's configuration file. config = load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')", "filename in files: current_command = command + [filename] print() print(' {} ...'.format(os.path.basename(filename))) print()", "copy # of this software and associated documentation files (the \"Software\"), to deal", "section: previous = False section = env.add_named_section('library:' + config['libraryName'], '') else: previous =", "in ('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if not files: return 0, True", "class InstallError(Exception): pass class NotALibraryError(InstallError): pass class PreviousInstallationFoundError(InstallError): def __init__(self, library_name): self.library_name =", "to the following conditions: # # The above copyright notice and this permission", "included in # all copies or substantial portions of the Software. # #", "config_file = os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file): raise NotALibraryError('missing library configuration file: {}'.format(config_file))", "houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # # Houdini", "config.get('environment'): section.add_comment('Environment variables specified by the library:') for line in config['environment']: section.add_line(line) def", "[]): command += ['-l', lib] command += ['-i', dso_dir] print('Building DSOs for \"{}\"", "fp: return json.load(fp) def install_library(env, directory, overwrite=False): # Open the librarie's configuration file.", "def build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name == 'nt' else 'bin/hcustom')", "{}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # # Houdini will use the default value", "make sure that # Houdini's default paths do not get messed up. section", "import shlex import subprocess from . import __version__ from .config import config def", "library_dir = os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "library. directory = os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' + config['libraryName']) if not section: previous", "directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion'])", "return install_dir if os.name == 'nt': import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path", "os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if ext in ('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name))", "if name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result def load_library_config(directory): config_file", "in hou and not os.sep in hou: hou = os.path.expanduser('~/Documents/' + hou +", "and associated documentation files (the \"Software\"), to deal # in the Software without", "get_houdini_environment_path(hou=None): hou = hou or config.get('houdinienv', 'houdini16.0') if not '/' in hou and", "False def get_houdini_application_dir(): install_dir = config.get('houdiniapp') if install_dir: return install_dir if os.name ==", "OTHER DEALINGS IN # THE SOFTWARE. import datetime import json import os import", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "Initialize the default section. It's purpose is to make sure that # Houdini's", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "env.get_library(name) if section: env.remove_section(section) return True return False def get_houdini_application_dir(): install_dir = config.get('houdiniapp')", "'dso_source')) if not os.path.isdir(dso_source): return 0, True dso_dir = os.path.join(library_dir, 'dso') if not", "sublicense, and/or sell # copies of the Software, and to permit persons to", "Software is # furnished to do so, subject to the following conditions: #", "import json import os import operator import shlex import subprocess from . import", "sure that # Houdini's default paths do not get messed up. section =", "config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables specified by the", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result def load_library_config(directory):", "def get_houdini_application_dir(): install_dir = config.get('houdiniapp') if install_dir: return install_dir if os.name == 'nt':", "= True for filename in files: current_command = command + [filename] print() print('", "= os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' + config['libraryName']) if not section: previous = False", "with exit code', res) ok = False print('Done.') return len(files), ok class InstallError(Exception):", "import subprocess from . import __version__ from .config import config def get_houdini_environment_path(hou=None): hou", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "True command = [hcustom] if config.get('dsoDebug'): command += '-g' for path in config.get('dsoInclude',", "== 'nt': import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0] path", "= [hcustom] if config.get('dsoDebug'): command += '-g' for path in config.get('dsoInclude', []): command", "!= 0: print('Error: hcustom failed with exit code', res) ok = False print('Done.')", "'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables", "Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # if not info['dir']: continue #", "or config.get('houdinienv', 'houdini16.0') if not '/' in hou and not os.sep in hou:", "= os.path.splitext(name)[1].lower() if ext in ('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if not", "os.makedirs(dso_dir) files = [] for name in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if ext", "#for info in HOUDINI_PATH_ENVVARS: # if not info['dir']: continue # vardir = os.path.join(directory,", "from .config import config def get_houdini_environment_path(hou=None): hou = hou or config.get('houdinienv', 'houdini16.0') if", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "# copies of the Software, and to permit persons to whom the Software", "update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # if not info['dir']: continue # vardir", "+= '-g' for path in config.get('dsoInclude', []): command += ['-I', os.path.join(library_dir, path)] for", "Houdini's default paths do not get messed up. section = env.get_named_section('DEFAULT') if not", "ext in ('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if not files: return 0,", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' + config['libraryName']) if not section: previous = False section", "in config.get('dsoInclude', []): command += ['-I', os.path.join(library_dir, path)] for path in config.get('dsoLibdir', []):", "= datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__ # Initialize the default section. It's purpose", "res != 0: print('Error: hcustom failed with exit code', res) ok = False", "'&') section.add_variable('PYTHONPATH', '&') # Create or update the section for this library. directory", "variables specified by the library:') for line in config['environment']: section.add_line(line) def remove_library(env, name):", "build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name == 'nt' else 'bin/hcustom') library_dir", "this permission notice shall be included in # all copies or substantial portions", "config['libraryName']) if not section: previous = False section = env.add_named_section('library:' + config['libraryName'], '')", "= command + [filename] print() print(' {} ...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command, cwd=dso_dir)", "vardir = os.path.join(directory, info['dir']) # if not os.path.isdir(vardir): continue # section.add_variable(info['var'], '$' +", "if not section: section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically generated", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "if config.get('dsoDebug'): command += '-g' for path in config.get('dsoInclude', []): command += ['-I',", "software and associated documentation files (the \"Software\"), to deal # in the Software", "do not get messed up. section = env.get_named_section('DEFAULT') if not section: section =", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "result = [] for name in os.listdir(directory): envfile = os.path.join(directory, name, 'houdini.env') if", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "and not os.sep in hou: hou = os.path.expanduser('~/Documents/' + hou + '/houdini.env') return", "hou = os.path.expanduser('~/Documents/' + hou + '/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory =", "specified by the library:') for line in config['environment']: section.add_line(line) def remove_library(env, name): section", "def remove_library(env, name): section = env.get_library(name) if section: env.remove_section(section) return True return False", "load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__ # Initialize the default section.", "result def load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file): raise NotALibraryError('missing library", "and to permit persons to whom the Software is # furnished to do", "hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name == 'nt' else 'bin/hcustom') library_dir = os.path.abspath(library_dir)", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "print(' {} ...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command, cwd=dso_dir) if res != 0: print('Error:", "os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file): raise NotALibraryError('missing library configuration file: {}'.format(config_file)) with open(config_file)", "name in os.listdir(directory): envfile = os.path.join(directory, name, 'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile): result.append((name,", "the following conditions: # # The above copyright notice and this permission notice", "= config.get('houdiniapp') if install_dir: return install_dir if os.name == 'nt': import winreg key", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "ok = True for filename in files: current_command = command + [filename] print()", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "path = os.path.dirname(os.path.dirname(path)) else: path = '' return path def build_dso(hou_app_dir, library_dir): hcustom", "ext = os.path.splitext(name)[1].lower() if ext in ('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if", "('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if not files: return 0, True command", "import config def get_houdini_environment_path(hou=None): hou = hou or config.get('houdinienv', 'houdini16.0') if not '/'", "InstallError(Exception): pass class NotALibraryError(InstallError): pass class PreviousInstallationFoundError(InstallError): def __init__(self, library_name): self.library_name = library_name", "result.sort(key=operator.itemgetter(0), reverse=True) return result def load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file):", "datetime import json import os import operator import shlex import subprocess from .", "'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files = [] for name in os.listdir(dso_source): ext", "section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # # Houdini will use", "import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path))", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS:", "section = env.add_named_section('library:' + config['libraryName'], '') else: previous = True if not overwrite:", "file. config = load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__ # Initialize", "return len(files), ok class InstallError(Exception): pass class NotALibraryError(InstallError): pass class PreviousInstallationFoundError(InstallError): def __init__(self,", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "if not files: return 0, True command = [hcustom] if config.get('dsoDebug'): command +=", "PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source): return 0, True dso_dir = os.path.join(library_dir, 'dso')", "__version__ from .config import config def get_houdini_environment_path(hou=None): hou = hou or config.get('houdinienv', 'houdini16.0')", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "else: section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for", "in # all copies or substantial portions of the Software. # # THE", "...'.format(config['libraryName'])) ok = True for filename in files: current_command = command + [filename]", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import", "to do so, subject to the following conditions: # # The above copyright", "before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now))", "config['libraryName'], '') else: previous = True if not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment('", "ok = False print('Done.') return len(files), ok class InstallError(Exception): pass class NotALibraryError(InstallError): pass", "key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path)) else: path", "get messed up. section = env.get_named_section('DEFAULT') if not section: section = env.add_named_section('DEFAULT', '',", "the variable when it sees # # the ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH',", "directory = os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' + config['libraryName']) if not section: previous =", "= load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source): return 0, True", "'' return path def build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name ==", "+ config['libraryName'], '') else: previous = True if not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear()", "command = [hcustom] if config.get('dsoDebug'): command += '-g' for path in config.get('dsoInclude', []):", "path = '' return path def build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if", "os import operator import shlex import subprocess from . import __version__ from .config", "whom the Software is # furnished to do so, subject to the following", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "False section = env.add_named_section('library:' + config['libraryName'], '') else: previous = True if not", "Houdini will use the default value of the variable when it sees #", "dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source): return 0, True dso_dir =", "when it sees # # the ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH',", "free of charge, to any person obtaining a copy # of this software", "os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if not os.path.isdir(directory): return [] result =", "# if not info['dir']: continue # vardir = os.path.join(directory, info['dir']) # if not", "os.path.join(directory, info['dir']) # if not os.path.isdir(vardir): continue # section.add_variable(info['var'], '$' + info['var'], vardir)", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "in config['environment']: section.add_line(line) def remove_library(env, name): section = env.get_library(name) if section: env.remove_section(section) return", "= shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path)) else: path = '' return path def", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "return path def build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name == 'nt'", "directory = os.path.expanduser('~/Documents') if not os.path.isdir(directory): return [] result = [] for name", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "is # furnished to do so, subject to the following conditions: # #", "import operator import shlex import subprocess from . import __version__ from .config import", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "os.sep in hou: hou = os.path.expanduser('~/Documents/' + hou + '/houdini.env') return os.path.normpath(hou) def", "NotALibraryError('missing library configuration file: {}'.format(config_file)) with open(config_file) as fp: return json.load(fp) def install_library(env,", "return False def get_houdini_application_dir(): install_dir = config.get('houdiniapp') if install_dir: return install_dir if os.name", "name in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if ext in ('.c', '.cc', '.cxx', '.cpp'):", "['-I', os.path.join(library_dir, path)] for path in config.get('dsoLibdir', []): command += ['-L', os.path.join(library_dir, path)]", "import __version__ from .config import config def get_houdini_environment_path(hou=None): hou = hou or config.get('houdinienv',", "0, True command = [hcustom] if config.get('dsoDebug'): command += '-g' for path in", "os.listdir(directory): envfile = os.path.join(directory, name, 'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0),", "not '/' in hou and not os.sep in hou: hou = os.path.expanduser('~/Documents/' +", "= __version__ # Initialize the default section. It's purpose is to make sure", "hou and not os.sep in hou: hou = os.path.expanduser('~/Documents/' + hou + '/houdini.env')", "to deal # in the Software without restriction, including without limitation the rights", "config['environment']: section.add_line(line) def remove_library(env, name): section = env.get_library(name) if section: env.remove_section(section) return True", "return 0, True command = [hcustom] if config.get('dsoDebug'): command += '-g' for path", "to any person obtaining a copy # of this software and associated documentation", "len(files), ok class InstallError(Exception): pass class NotALibraryError(InstallError): pass class PreviousInstallationFoundError(InstallError): def __init__(self, library_name):", "return json.load(fp) def install_library(env, directory, overwrite=False): # Open the librarie's configuration file. config", "not os.path.isdir(directory): return [] result = [] for name in os.listdir(directory): envfile =", "env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last", "load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source): return 0, True dso_dir", "remove_library(env, name): section = env.get_library(name) if section: env.remove_section(section) return True return False def", "+ config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables specified by", "previous = False section = env.add_named_section('library:' + config['libraryName'], '') else: previous = True", "permission notice shall be included in # all copies or substantial portions of", "import datetime import json import os import operator import shlex import subprocess from", "shlex import subprocess from . import __version__ from .config import config def get_houdini_environment_path(hou=None):", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", ".config import config def get_houdini_environment_path(hou=None): hou = hou or config.get('houdinienv', 'houdini16.0') if not", "# # the ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') # Create", "json.load(fp) def install_library(env, directory, overwrite=False): # Open the librarie's configuration file. config =", "+= ['-i', dso_dir] print('Building DSOs for \"{}\" ...'.format(config['libraryName'])) ok = True for filename", "if not os.path.isdir(vardir): continue # section.add_variable(info['var'], '$' + info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory)", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "files: return 0, True command = [hcustom] if config.get('dsoDebug'): command += '-g' for", "info in HOUDINI_PATH_ENVVARS: # if not info['dir']: continue # vardir = os.path.join(directory, info['dir'])", "res = subprocess.call(current_command, cwd=dso_dir) if res != 0: print('Error: hcustom failed with exit", "name): section = env.get_library(name) if section: env.remove_section(section) return True return False def get_houdini_application_dir():", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "not os.path.isdir(dso_source): return 0, True dso_dir = os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir)", "section. It's purpose is to make sure that # Houdini's default paths do", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "= os.path.join(directory, name, 'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return", "up. section = env.get_named_section('DEFAULT') if not section: section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else:", "return True return False def get_houdini_application_dir(): install_dir = config.get('houdiniapp') if install_dir: return install_dir", "command += ['-I', os.path.join(library_dir, path)] for path in config.get('dsoLibdir', []): command += ['-L',", "= os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files = [] for name in", "directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables specified by the library:')", "Software, and to permit persons to whom the Software is # furnished to", "return [] result = [] for name in os.listdir(directory): envfile = os.path.join(directory, name,", "it sees # # the ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&')", "HOUDINI_PATH_ENVVARS: # if not info['dir']: continue # vardir = os.path.join(directory, info['dir']) # if", ". import __version__ from .config import config def get_houdini_environment_path(hou=None): hou = hou or", "os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result def load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json')", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "else 'bin/hcustom') library_dir = os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source'))", "# section.add_variable(info['var'], '$' + info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python'))", "this software and associated documentation files (the \"Software\"), to deal # in the", "os.path.isdir(vardir): continue # section.add_variable(info['var'], '$' + info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH',", "to make sure that # Houdini's default paths do not get messed up.", "if not os.path.isfile(config_file): raise NotALibraryError('missing library configuration file: {}'.format(config_file)) with open(config_file) as fp:", "info['dir']: continue # vardir = os.path.join(directory, info['dir']) # if not os.path.isdir(vardir): continue #", "= os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file): raise NotALibraryError('missing library configuration file: {}'.format(config_file)) with", "section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') # Create or update the section for this library.", "%H:%M') version = __version__ # Initialize the default section. It's purpose is to", "for path in config.get('dsoInclude', []): command += ['-I', os.path.join(library_dir, path)] for path in", "shall be included in # all copies or substantial portions of the Software.", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "== 'nt' else 'bin/hcustom') library_dir = os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source = os.path.join(library_dir,", "value of the variable when it sees # # the ampersand. # section.add_variable(info['var'],", "messed up. section = env.get_named_section('DEFAULT') if not section: section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section())", "granted, free of charge, to any person obtaining a copy # of this", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import datetime import", "None))[0] path = os.path.dirname(os.path.dirname(path)) else: path = '' return path def build_dso(hou_app_dir, library_dir):", "os.path.isdir(directory): return [] result = [] for name in os.listdir(directory): envfile = os.path.join(directory,", "command + [filename] print() print(' {} ...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command, cwd=dso_dir) if", "purpose is to make sure that # Houdini's default paths do not get", "section: env.remove_section(section) return True return False def get_houdini_application_dir(): install_dir = config.get('houdiniapp') if install_dir:", "for \"{}\" ...'.format(config['libraryName'])) ok = True for filename in files: current_command = command", "section: section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically generated by houdini-manage", "'bin\\\\hcustom.exe' if os.name == 'nt' else 'bin/hcustom') library_dir = os.path.abspath(library_dir) config = load_library_config(library_dir)", "env.get_named_section('DEFAULT') if not section: section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically", "It's purpose is to make sure that # Houdini's default paths do not", "furnished to do so, subject to the following conditions: # # The above", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import datetime import json", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "= '' return path def build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name", "# Permission is hereby granted, free of charge, to any person obtaining a", "the default section. It's purpose is to make sure that # Houdini's default", "hou = hou or config.get('houdinienv', 'houdini16.0') if not '/' in hou and not", "'-g' for path in config.get('dsoInclude', []): command += ['-I', os.path.join(library_dir, path)] for path", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "True dso_dir = os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files = [] for", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "json import os import operator import shlex import subprocess from . import __version__", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "'.cpp'): files.append(os.path.join(dso_source, name)) if not files: return 0, True command = [hcustom] if", "else: path = '' return path def build_dso(hou_app_dir, library_dir): hcustom = os.path.join(hou_app_dir, 'bin\\\\hcustom.exe'", "be included in # all copies or substantial portions of the Software. #", "previous = True if not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated by", "import os import operator import shlex import subprocess from . import __version__ from", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "True return False def get_houdini_application_dir(): install_dir = config.get('houdiniapp') if install_dir: return install_dir if", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "install_dir: return install_dir if os.name == 'nt': import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command')", "os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name == 'nt' else 'bin/hcustom') library_dir = os.path.abspath(library_dir) config =", "'$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if config.get('environment'):", "config.get('dsoInclude', []): command += ['-I', os.path.join(library_dir, path)] for path in config.get('dsoLibdir', []): command", "command += '-g' for path in config.get('dsoInclude', []): command += ['-I', os.path.join(library_dir, path)]", "...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command, cwd=dso_dir) if res != 0: print('Error: hcustom failed", "'bin/hcustom') library_dir = os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if", "if not section: previous = False section = env.add_named_section('library:' + config['libraryName'], '') else:", "result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result def load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json') if", "# THE SOFTWARE. import datetime import json import os import operator import shlex", "subprocess from . import __version__ from .config import config def get_houdini_environment_path(hou=None): hou =", "datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__ # Initialize the default section. It's purpose is", "'/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if not os.path.isdir(directory): return []", "in the Software without restriction, including without limitation the rights # to use,", "that # Houdini's default paths do not get messed up. section = env.get_named_section('DEFAULT')", "# Houdini will use the default value of the variable when it sees", "not os.path.isfile(config_file): raise NotALibraryError('missing library configuration file: {}'.format(config_file)) with open(config_file) as fp: return", "will use the default value of the variable when it sees # #", "line in config['environment']: section.add_line(line) def remove_library(env, name): section = env.get_library(name) if section: env.remove_section(section)", "['-i', dso_dir] print('Building DSOs for \"{}\" ...'.format(config['libraryName'])) ok = True for filename in", "res) ok = False print('Done.') return len(files), ok class InstallError(Exception): pass class NotALibraryError(InstallError):", "ok class InstallError(Exception): pass class NotALibraryError(InstallError): pass class PreviousInstallationFoundError(InstallError): def __init__(self, library_name): self.library_name", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "os.path.expanduser('~/Documents/' + hou + '/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if", "copies of the Software, and to permit persons to whom the Software is", "use the default value of the variable when it sees # # the", "section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info", "= [] for name in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if ext in ('.c',", "v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # if not info['dir']:", "update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # # Houdini will use the default", "the section for this library. directory = os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' + config['libraryName'])", "<NAME> # # Permission is hereby granted, free of charge, to any person", "if not os.path.isdir(dso_source): return 0, True dso_dir = os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir):", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "install_dir = config.get('houdiniapp') if install_dir: return install_dir if os.name == 'nt': import winreg", "config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source): return 0, True dso_dir = os.path.join(library_dir, 'dso') if", "print('Building DSOs for \"{}\" ...'.format(config['libraryName'])) ok = True for filename in files: current_command", "winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path)) else:", "os.path.isdir(dso_dir): os.makedirs(dso_dir) files = [] for name in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if", "with open(config_file) as fp: return json.load(fp) def install_library(env, directory, overwrite=False): # Open the", "{}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # if not info['dir']: continue # vardir =", "if not '/' in hou and not os.sep in hou: hou = os.path.expanduser('~/Documents/'", "install_library(env, directory, overwrite=False): # Open the librarie's configuration file. config = load_library_config(directory) now", "lib] command += ['-i', dso_dir] print('Building DSOs for \"{}\" ...'.format(config['libraryName'])) ok = True", "for name in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if ext in ('.c', '.cc', '.cxx',", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "exit code', res) ok = False print('Done.') return len(files), ok class InstallError(Exception): pass", "load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file): raise NotALibraryError('missing library configuration file:", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files = [] for name in os.listdir(dso_source): ext =", "is to make sure that # Houdini's default paths do not get messed", "failed with exit code', res) ok = False print('Done.') return len(files), ok class", "envfile = os.path.join(directory, name, 'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True)", "in HOUDINI_PATH_ENVVARS: # # Houdini will use the default value of the variable", "# # Houdini will use the default value of the variable when it", "config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables specified by the library:') for line in", "section = env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version))", "and/or sell # copies of the Software, and to permit persons to whom", "section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') # Create or update the section for", "if section: env.remove_section(section) return True return False def get_houdini_application_dir(): install_dir = config.get('houdiniapp') if", "False print('Done.') return len(files), ok class InstallError(Exception): pass class NotALibraryError(InstallError): pass class PreviousInstallationFoundError(InstallError):", "config.get('dsoDebug'): command += '-g' for path in config.get('dsoInclude', []): command += ['-I', os.path.join(library_dir,", "the ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') # Create or update", "command += ['-i', dso_dir] print('Building DSOs for \"{}\" ...'.format(config['libraryName'])) ok = True for", "# in the Software without restriction, including without limitation the rights # to", "file: {}'.format(config_file)) with open(config_file) as fp: return json.load(fp) def install_library(env, directory, overwrite=False): #", "return 0, True dso_dir = os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files =", "True if not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version))", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "= [] for name in os.listdir(directory): envfile = os.path.join(directory, name, 'houdini.env') if name.startswith('houdini')", "section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables specified", "'.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if not files: return 0, True command =", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "[hcustom] if config.get('dsoDebug'): command += '-g' for path in config.get('dsoInclude', []): command +=", "subprocess.call(current_command, cwd=dso_dir) if res != 0: print('Error: hcustom failed with exit code', res)", "cwd=dso_dir) if res != 0: print('Error: hcustom failed with exit code', res) ok", "def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if not os.path.isdir(directory): return [] result = []", "os.path.join(library_dir, path)] for path in config.get('dsoLibdir', []): command += ['-L', os.path.join(library_dir, path)] for", "any person obtaining a copy # of this software and associated documentation files", "[] for name in os.listdir(directory): envfile = os.path.join(directory, name, 'houdini.env') if name.startswith('houdini') and", "# # The above copyright notice and this permission notice shall be included", "if os.name == 'nt': import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key,", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "[]): command += ['-L', os.path.join(library_dir, path)] for lib in config.get('dsoLibs', []): command +=", "= os.path.dirname(os.path.dirname(path)) else: path = '' return path def build_dso(hou_app_dir, library_dir): hcustom =", "copyright notice and this permission notice shall be included in # all copies", "not info['dir']: continue # vardir = os.path.join(directory, info['dir']) # if not os.path.isdir(vardir): continue", "if os.name == 'nt' else 'bin/hcustom') library_dir = os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "[] result = [] for name in os.listdir(directory): envfile = os.path.join(directory, name, 'houdini.env')", "current_command = command + [filename] print() print(' {} ...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command,", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "paths do not get messed up. section = env.get_named_section('DEFAULT') if not section: section", "version = __version__ # Initialize the default section. It's purpose is to make", "print('Error: hcustom failed with exit code', res) ok = False print('Done.') return len(files),", "as fp: return json.load(fp) def install_library(env, directory, overwrite=False): # Open the librarie's configuration", "not section: previous = False section = env.add_named_section('library:' + config['libraryName'], '') else: previous", "#for info in HOUDINI_PATH_ENVVARS: # # Houdini will use the default value of", "the library:') for line in config['environment']: section.add_line(line) def remove_library(env, name): section = env.get_library(name)", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "if ext in ('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if not files: return", "# Initialize the default section. It's purpose is to make sure that #", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "section.add_comment('Environment variables specified by the library:') for line in config['environment']: section.add_line(line) def remove_library(env,", "for this library. directory = os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' + config['libraryName']) if not", "winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path)) else: path = ''", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "env.remove_section(section) return True return False def get_houdini_application_dir(): install_dir = config.get('houdiniapp') if install_dir: return", "generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: #", "charge, to any person obtaining a copy # of this software and associated", "the librarie's configuration file. config = load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version =", "= load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__ # Initialize the default", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "not os.sep in hou: hou = os.path.expanduser('~/Documents/' + hou + '/houdini.env') return os.path.normpath(hou)", "info['dir']) # if not os.path.isdir(vardir): continue # section.add_variable(info['var'], '$' + info['var'], vardir) section.add_variable('HOUDINI_PATH',", "# Copyright (C) 2017 <NAME> # # Permission is hereby granted, free of", "= os.path.expanduser('~/Documents') if not os.path.isdir(directory): return [] result = [] for name in", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "os.path.join(library_dir, path)] for lib in config.get('dsoLibs', []): command += ['-l', lib] command +=", "config.get('dsoLibdir', []): command += ['-L', os.path.join(library_dir, path)] for lib in config.get('dsoLibs', []): command", "if not os.path.isdir(directory): return [] result = [] for name in os.listdir(directory): envfile", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "to whom the Software is # furnished to do so, subject to the", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "Copyright (C) 2017 <NAME> # # Permission is hereby granted, free of charge,", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "= False section = env.add_named_section('library:' + config['libraryName'], '') else: previous = True if", "'nt': import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0] path =", "if config.get('environment'): section.add_comment('Environment variables specified by the library:') for line in config['environment']: section.add_line(line)", "print() res = subprocess.call(current_command, cwd=dso_dir) if res != 0: print('Error: hcustom failed with", "in os.listdir(dso_source): ext = os.path.splitext(name)[1].lower() if ext in ('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source,", "config = load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__ # Initialize the", "os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source): return", "= env.add_named_section('library:' + config['libraryName'], '') else: previous = True if not overwrite: raise", "0, True dso_dir = os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files = []", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "os.path.isfile(config_file): raise NotALibraryError('missing library configuration file: {}'.format(config_file)) with open(config_file) as fp: return json.load(fp)", "by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # if", "librarie's configuration file. config = load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__", "the default value of the variable when it sees # # the ampersand.", "get_houdini_application_dir(): install_dir = config.get('houdiniapp') if install_dir: return install_dir if os.name == 'nt': import", "install_dir if os.name == 'nt': import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path =", "info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory)", "section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'], config['libraryVersion']) if", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "reverse=True) return result def load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file): raise", "'$' + info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' +", "do so, subject to the following conditions: # # The above copyright notice", "return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if not os.path.isdir(directory): return [] result", "# section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') # Create or update the section", "section.add_variable(info['var'], '$' + info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_'", "'houdini16.0') if not '/' in hou and not os.sep in hou: hou =", "Open the librarie's configuration file. config = load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version", "hcustom failed with exit code', res) ok = False print('Done.') return len(files), ok", "open(config_file) as fp: return json.load(fp) def install_library(env, directory, overwrite=False): # Open the librarie's", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "config.get('dsoLibs', []): command += ['-l', lib] command += ['-i', dso_dir] print('Building DSOs for", "in hou: hou = os.path.expanduser('~/Documents/' + hou + '/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories():", "default value of the variable when it sees # # the ampersand. #", "HOUDINI_PATH_ENVVARS: # # Houdini will use the default value of the variable when", "in files: current_command = command + [filename] print() print(' {} ...'.format(os.path.basename(filename))) print() res", "code', res) ok = False print('Done.') return len(files), ok class InstallError(Exception): pass class", "permit persons to whom the Software is # furnished to do so, subject", "os.name == 'nt' else 'bin/hcustom') library_dir = os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source =", "continue # vardir = os.path.join(directory, info['dir']) # if not os.path.isdir(vardir): continue # section.add_variable(info['var'],", "def get_houdini_environment_path(hou=None): hou = hou or config.get('houdinienv', 'houdini16.0') if not '/' in hou", "# Houdini's default paths do not get messed up. section = env.get_named_section('DEFAULT') if", "in config.get('dsoLibdir', []): command += ['-L', os.path.join(library_dir, path)] for lib in config.get('dsoLibs', []):", "Permission is hereby granted, free of charge, to any person obtaining a copy", "# vardir = os.path.join(directory, info['dir']) # if not os.path.isdir(vardir): continue # section.add_variable(info['var'], '$'", "= os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source): return 0, True dso_dir = os.path.join(library_dir,", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "{} ...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command, cwd=dso_dir) if res != 0: print('Error: hcustom", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "hou: hou = os.path.expanduser('~/Documents/' + hou + '/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Software without restriction, including without limitation the rights # to use, copy, modify,", "os.path.expanduser('~/Documents') if not os.path.isdir(directory): return [] result = [] for name in os.listdir(directory):", "if not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment('", "# The above copyright notice and this permission notice shall be included in", "# of this software and associated documentation files (the \"Software\"), to deal #", "def install_library(env, directory, overwrite=False): # Open the librarie's configuration file. config = load_library_config(directory)", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "if res != 0: print('Error: hcustom failed with exit code', res) ok =", "+ hou + '/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if not", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "sell # copies of the Software, and to permit persons to whom the", "not os.path.isdir(vardir): continue # section.add_variable(info['var'], '$' + info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH',", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "0: print('Error: hcustom failed with exit code', res) ok = False print('Done.') return", "for filename in files: current_command = command + [filename] print() print(' {} ...'.format(os.path.basename(filename)))", "# all copies or substantial portions of the Software. # # THE SOFTWARE", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "print('Done.') return len(files), ok class InstallError(Exception): pass class NotALibraryError(InstallError): pass class PreviousInstallationFoundError(InstallError): def", "raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now))", "section.add_line(line) def remove_library(env, name): section = env.get_library(name) if section: env.remove_section(section) return True return", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "# # Permission is hereby granted, free of charge, to any person obtaining", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files = [] for name in os.listdir(dso_source):", "os.path.splitext(name)[1].lower() if ext in ('.c', '.cc', '.cxx', '.cpp'): files.append(os.path.join(dso_source, name)) if not files:", "by the library:') for line in config['environment']: section.add_line(line) def remove_library(env, name): section =", "def load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file): raise NotALibraryError('missing library configuration", "default section. It's purpose is to make sure that # Houdini's default paths", "else: previous = True if not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "dso_dir] print('Building DSOs for \"{}\" ...'.format(config['libraryName'])) ok = True for filename in files:", "'&') # Create or update the section for this library. directory = os.path.normpath(os.path.abspath(directory))", "continue # section.add_variable(info['var'], '$' + info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory,", "directory, overwrite=False): # Open the librarie's configuration file. config = load_library_config(directory) now =", "configuration file: {}'.format(config_file)) with open(config_file) as fp: return json.load(fp) def install_library(env, directory, overwrite=False):", "library configuration file: {}'.format(config_file)) with open(config_file) as fp: return json.load(fp) def install_library(env, directory,", "hou or config.get('houdinienv', 'houdini16.0') if not '/' in hou and not os.sep in", "command += ['-L', os.path.join(library_dir, path)] for lib in config.get('dsoLibs', []): command += ['-l',", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "section for this library. directory = os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:' + config['libraryName']) if", "'houdini-library.json') if not os.path.isfile(config_file): raise NotALibraryError('missing library configuration file: {}'.format(config_file)) with open(config_file) as", "files (the \"Software\"), to deal # in the Software without restriction, including without", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "True for filename in files: current_command = command + [filename] print() print(' {}", "+ config['libraryName'], config['libraryVersion']) if config.get('environment'): section.add_comment('Environment variables specified by the library:') for line", "print() print(' {} ...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command, cwd=dso_dir) if res != 0:", "name)) if not files: return 0, True command = [hcustom] if config.get('dsoDebug'): command", "'$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_' + config['libraryName'],", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "= env.add_named_section('DEFAULT', '', before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment('", "= os.path.join(directory, info['dir']) # if not os.path.isdir(vardir): continue # section.add_variable(info['var'], '$' + info['var'],", "not files: return 0, True command = [hcustom] if config.get('dsoDebug'): command += '-g'", "path in config.get('dsoInclude', []): command += ['-I', os.path.join(library_dir, path)] for path in config.get('dsoLibdir',", "by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # #", "name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result def load_library_config(directory): config_file =", "following conditions: # # The above copyright notice and this permission notice shall", "of the Software, and to permit persons to whom the Software is #", "= winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path)) else: path =", "= os.path.abspath(library_dir) config = load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source):", "variable when it sees # # the ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&')", "= env.get_library(name) if section: env.remove_section(section) return True return False def get_houdini_application_dir(): install_dir =", "houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # if not", "IN # THE SOFTWARE. import datetime import json import os import operator import", "vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'], directory) section.add_variable('HLIBVERSION_'", "for line in config['environment']: section.add_line(line) def remove_library(env, name): section = env.get_library(name) if section:", "2017 <NAME> # # Permission is hereby granted, free of charge, to any", "The above copyright notice and this permission notice shall be included in #", "return result def load_library_config(directory): config_file = os.path.join(directory, 'houdini-library.json') if not os.path.isfile(config_file): raise NotALibraryError('missing", "= os.path.join(hou_app_dir, 'bin\\\\hcustom.exe' if os.name == 'nt' else 'bin/hcustom') library_dir = os.path.abspath(library_dir) config", "os.path.isdir(dso_source): return 0, True dso_dir = os.path.join(library_dir, 'dso') if not os.path.isdir(dso_dir): os.makedirs(dso_dir) files", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "'', before=env.get_first_named_section()) else: section.clear() section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update:", "and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result def load_library_config(directory): config_file = os.path.join(directory,", "= hou or config.get('houdinienv', 'houdini16.0') if not '/' in hou and not os.sep", "+ info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH', directory) section.add_variable('PYTHONPATH', '$PYTHONPATH', os.path.join(directory, 'python')) section.add_variable('HLIBPATH_' + config['libraryName'],", "config def get_houdini_environment_path(hou=None): hou = hou or config.get('houdinienv', 'houdini16.0') if not '/' in", "+= ['-l', lib] command += ['-i', dso_dir] print('Building DSOs for \"{}\" ...'.format(config['libraryName'])) ok", "configuration file. config = load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') version = __version__ #", "os.name == 'nt': import winreg key = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, 'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0]", "section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # if not info['dir']: continue", "overwrite=False): # Open the librarie's configuration file. config = load_library_config(directory) now = datetime.datetime.now().strftime('%Y-%m-%d", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "DSOs for \"{}\" ...'.format(config['libraryName'])) ok = True for filename in files: current_command =", "= True if not overwrite: raise PreviousInstallationFoundError(config['libraryName']) section.clear() section.add_comment(' Automatically generated by houdini-manage", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "OR OTHER DEALINGS IN # THE SOFTWARE. import datetime import json import os", "path in config.get('dsoLibdir', []): command += ['-L', os.path.join(library_dir, path)] for lib in config.get('dsoLibs',", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "env.add_named_section('library:' + config['libraryName'], '') else: previous = True if not overwrite: raise PreviousInstallationFoundError(config['libraryName'])", "config = load_library_config(library_dir) dso_source = os.path.join(library_dir, config.get('dsoSource', 'dso_source')) if not os.path.isdir(dso_source): return 0,", "= os.path.expanduser('~/Documents/' + hou + '/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents')", "os.path.join(directory, name, 'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result", "command += ['-l', lib] command += ['-i', dso_dir] print('Building DSOs for \"{}\" ...'.format(config['libraryName']))", "files: current_command = command + [filename] print() print(' {} ...'.format(os.path.basename(filename))) print() res =", "+ config['libraryName']) if not section: previous = False section = env.add_named_section('library:' + config['libraryName'],", "name, 'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile): result.append((name, envfile)) result.sort(key=operator.itemgetter(0), reverse=True) return result def", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "raise NotALibraryError('missing library configuration file: {}'.format(config_file)) with open(config_file) as fp: return json.load(fp) def", "not get messed up. section = env.get_named_section('DEFAULT') if not section: section = env.add_named_section('DEFAULT',", "['-l', lib] command += ['-i', dso_dir] print('Building DSOs for \"{}\" ...'.format(config['libraryName'])) ok =", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "'Houdini.hip\\\\shell\\\\open\\\\command') path = shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path)) else: path = '' return", "['-L', os.path.join(library_dir, path)] for lib in config.get('dsoLibs', []): command += ['-l', lib] command", "or update the section for this library. directory = os.path.normpath(os.path.abspath(directory)) section = env.get_named_section('library:'", "section.add_comment(' Automatically generated by houdini-manage v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "# the ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') # Create or", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "of the variable when it sees # # the ampersand. # section.add_variable(info['var'], '&')", "USE OR OTHER DEALINGS IN # THE SOFTWARE. import datetime import json import", "+ '/houdini.env') return os.path.normpath(hou) def get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if not os.path.isdir(directory): return", "path = shlex.split(winreg.QueryValue(key, None))[0] path = os.path.dirname(os.path.dirname(path)) else: path = '' return path", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "for name in os.listdir(directory): envfile = os.path.join(directory, name, 'houdini.env') if name.startswith('houdini') and os.path.isfile(envfile):", "get_houdini_user_prefs_directories(): directory = os.path.expanduser('~/Documents') if not os.path.isdir(directory): return [] result = [] for", "'/' in hou and not os.sep in hou: hou = os.path.expanduser('~/Documents/' + hou", "notice shall be included in # all copies or substantial portions of the", "# if not os.path.isdir(vardir): continue # section.add_variable(info['var'], '$' + info['var'], vardir) section.add_variable('HOUDINI_PATH', '$HOUDINI_PATH',", "THE SOFTWARE. import datetime import json import os import operator import shlex import", "# Create or update the section for this library. directory = os.path.normpath(os.path.abspath(directory)) section", "+ [filename] print() print(' {} ...'.format(os.path.basename(filename))) print() res = subprocess.call(current_command, cwd=dso_dir) if res", "from . import __version__ from .config import config def get_houdini_environment_path(hou=None): hou = hou", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # # Houdini will use the", "v{}'.format(version)) section.add_comment(' Last update: {}'.format(now)) #for info in HOUDINI_PATH_ENVVARS: # # Houdini will", "+= ['-I', os.path.join(library_dir, path)] for path in config.get('dsoLibdir', []): command += ['-L', os.path.join(library_dir,", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "ampersand. # section.add_variable(info['var'], '&') section.add_variable('HOUDINI_PATH', '&') section.add_variable('PYTHONPATH', '&') # Create or update the" ]
[ "= secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data, experiment) if callback: cb_sec(1) # dump it", "if experiment is None: # load experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment", "cb = lambda x: cb_pri_steps(x, i, 3) else: cb = None print(' -", "df_type in enumerate(['bounds', 'terminals', 'sizes']): if callback: cb = lambda x: cb_pri_steps(x, i,", "input, range) # standard library import functools # third party # project specific", "['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC = 0.08 # TODO remove", "callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p, step, num_steps): cb_pri((step +", "= primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment) # TODO: remove this commented method. it", "from __future__ import print_function, absolute_import, unicode_literals, division import six from six.moves import (zip,", "experiment def summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\" intermediate summary data. \"\"\" if verbose:", "- Saving to CSVs...') dumped_keys = [] for key, value in six.iteritems(data): talk('", "blob data talk(' - Summarizing raw data...') data = {} for i, df_type", "\"\"\" if verbose: talk = print else: talk = lambda *a, **k: None", "index=False) dumped_keys.append(key) # free up memory once this is saved for key in", "primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment) # TODO: remove this commented method. it keeps", "data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4) save_processed_data(data, experiment)", "+ CALLBACK_PRIMARY_FRAC * p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p)", "x: cb_pri_steps(x, i, 3) else: cb = None print(' - Summarizing {df} data...'.format(df=df_type))", "up memory once this is saved for key in dumped_keys: del data[key] #", "= None talk('preparing blob files') if experiment is None: # load experiment experiment", "project specific from waldo.conf import settings from waldo import wio from . import", "save_processed_data(data, experiment) # TODO: remove this commented method. it keeps failing. # data", "(zip, filter, map, reduce, input, range) # standard library import functools # third", "data...') # data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4)", "cb_pri((step + p) / num_steps) else: cb_load = cb_pri = cb_sec = cb_pri_steps", "dumped_keys: del data[key] # process the basic blob data talk(' - Summarizing raw", "bounds=None) if callback: cb_sec(0.4) save_processed_data(data, experiment) if callback: cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'],", "= 0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC = 0.08 # TODO remove ex_id from", "def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p, step, num_steps):", "step, num_steps): cb_pri((step + p) / num_steps) else: cb_load = cb_pri = cb_sec", "if callback: cb = lambda x: cb_pri_steps(x, i, 3) else: cb = None", "# project specific from waldo.conf import settings from waldo import wio from .", "cb = None print(' - Summarizing {df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb)", "data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment) # TODO: remove this commented method.", "# data = primary.summarize(experiment, callback=cb_pri) # generate secondary data talk(' - Generating secondary", "* p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p,", "= secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data, experiment) if callback:", "callback: cb_sec(0.4) save_processed_data(data, experiment) if callback: cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved']", "experiment) if callback: cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if", "primary.summarize(experiment, callback=cb_pri) # generate secondary data talk(' - Generating secondary data...') # data['roi']", "= secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4) save_processed_data(data, experiment) if callback: cb_sec(0.6) # data['moved']", "TODO remove ex_id from parameters. rely solely on experiment def summarize(ex_id, experiment=None, verbose=False,", "save_processed_data(data, experiment) if callback: cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment)", "'sizes']): if callback: cb = lambda x: cb_pri_steps(x, i, 3) else: cb =", "callback: cb = lambda x: cb_pri_steps(x, i, 3) else: cb = None print('", "third party # project specific from waldo.conf import settings from waldo import wio", "lambda *a, **k: None if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p):", "def cb_pri_steps(p, step, num_steps): cb_pri((step + p) / num_steps) else: cb_load = cb_pri", "secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4) save_processed_data(data, experiment) if callback:", "keeps failing. # data = primary.summarize(experiment, callback=cb_pri) # generate secondary data talk(' -", "= cb_pri_steps = None talk('preparing blob files') if experiment is None: # load", "value in six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) # free up", "- Summarizing {df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment) # TODO:", "files') if experiment is None: # load experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded", "# load experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id)) def save_processed_data(data,", "failing. # data = primary.summarize(experiment, callback=cb_pri) # generate secondary data talk(' - Generating", "+ CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p, step, num_steps): cb_pri((step + p) / num_steps)", "secondary data...') # data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if callback:", "commented method. it keeps failing. # data = primary.summarize(experiment, callback=cb_pri) # generate secondary", "talk = print else: talk = lambda *a, **k: None if callback: def", "talk(' - Generating secondary data...') # data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment,", "absolute_import, unicode_literals, division import six from six.moves import (zip, filter, map, reduce, input,", "verbose: talk = print else: talk = lambda *a, **k: None if callback:", "# free up memory once this is saved for key in dumped_keys: del", "from waldo import wio from . import secondary from . import primary __all__", "reduce, input, range) # standard library import functools # third party # project", "dumped_keys.append(key) # free up memory once this is saved for key in dumped_keys:", "- Summarizing raw data...') data = {} for i, df_type in enumerate(['bounds', 'terminals',", "enumerate(['bounds', 'terminals', 'sizes']): if callback: cb = lambda x: cb_pri_steps(x, i, 3) else:", "- Generating secondary data...') # data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None)", "data talk(' - Summarizing raw data...') data = {} for i, df_type in", "CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC = 0.08 # TODO remove ex_id from parameters. rely", "CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p, step, num_steps): cb_pri((step + p) / num_steps) else:", "map, reduce, input, range) # standard library import functools # third party #", "verbose=False, callback=None): \"\"\" intermediate summary data. \"\"\" if verbose: talk = print else:", "solely on experiment def summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\" intermediate summary data. \"\"\"", "wio from . import secondary from . import primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC", "cb_sec(0.4) save_processed_data(data, experiment) if callback: cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] =", "ex_id from parameters. rely solely on experiment def summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\"", "is None: # load experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id))", "wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id)) def save_processed_data(data, experiment): talk(' - Saving to", "= {} for i, df_type in enumerate(['bounds', 'terminals', 'sizes']): if callback: cb =", "* p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC +", "import settings from waldo import wio from . import secondary from . import", "CALLBACK_SECONDARY_FRAC = 0.08 # TODO remove ex_id from parameters. rely solely on experiment", "if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC *", "key, value in six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) # free", "talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) # free up memory once this", "talk(' - Saving to CSVs...') dumped_keys = [] for key, value in six.iteritems(data):", "data = primary.summarize(experiment, callback=cb_pri) # generate secondary data talk(' - Generating secondary data...')", "cb_sec = cb_pri_steps = None talk('preparing blob files') if experiment is None: #", "party # project specific from waldo.conf import settings from waldo import wio from", "df_type, callback=cb) save_processed_data(data, experiment) # TODO: remove this commented method. it keeps failing.", "saved for key in dumped_keys: del data[key] # process the basic blob data", "secondary data talk(' - Generating secondary data...') # data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi']", "secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data, experiment) if callback: cb_sec(1)", "from . import primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC = 0.90", "callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC", "data talk(' - Generating secondary data...') # data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] =", "cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p, step, num_steps): cb_pri((step", "def save_processed_data(data, experiment): talk(' - Saving to CSVs...') dumped_keys = [] for key,", "this is saved for key in dumped_keys: del data[key] # process the basic", "= print else: talk = lambda *a, **k: None if callback: def cb_load(p):", "in enumerate(['bounds', 'terminals', 'sizes']): if callback: cb = lambda x: cb_pri_steps(x, i, 3)", "= wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id)) def save_processed_data(data, experiment): talk(' - Saving", "# TODO: remove this commented method. it keeps failing. # data = primary.summarize(experiment,", "library import functools # third party # project specific from waldo.conf import settings", "0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC = 0.08 # TODO remove ex_id from parameters.", "intermediate summary data. \"\"\" if verbose: talk = print else: talk = lambda", "memory once this is saved for key in dumped_keys: del data[key] # process", "TODO: remove this commented method. it keeps failing. # data = primary.summarize(experiment, callback=cb_pri)", "it keeps failing. # data = primary.summarize(experiment, callback=cb_pri) # generate secondary data talk('", "experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id)) def save_processed_data(data, experiment): talk(' -", "{}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) # free up memory once this is saved", "print_function, absolute_import, unicode_literals, division import six from six.moves import (zip, filter, map, reduce,", "print(' - Summarizing {df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment) #", "Generating secondary data...') # data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if", "from waldo.conf import settings from waldo import wio from . import secondary from", "data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4) save_processed_data(data, experiment) if callback: cb_sec(0.6) #", "lambda x: cb_pri_steps(x, i, 3) else: cb = None print(' - Summarizing {df}", "cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data,", "p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC", "load experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id)) def save_processed_data(data, experiment):", "talk('Loaded experiment ID: {}'.format(experiment.id)) def save_processed_data(data, experiment): talk(' - Saving to CSVs...') dumped_keys", "callback=None): \"\"\" intermediate summary data. \"\"\" if verbose: talk = print else: talk", "this commented method. it keeps failing. # data = primary.summarize(experiment, callback=cb_pri) # generate", "+ p) / num_steps) else: cb_load = cb_pri = cb_sec = cb_pri_steps =", "basic blob data talk(' - Summarizing raw data...') data = {} for i,", "sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data, experiment) if callback: cb_sec(1) #", "filter, map, reduce, input, range) # standard library import functools # third party", "specific from waldo.conf import settings from waldo import wio from . import secondary", "print else: talk = lambda *a, **k: None if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC", "key in dumped_keys: del data[key] # process the basic blob data talk(' -", "data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data, experiment) if", "def summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\" intermediate summary data. \"\"\" if verbose: talk", "raw data...') data = {} for i, df_type in enumerate(['bounds', 'terminals', 'sizes']): if", ". import primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC", "save_processed_data(data, experiment): talk(' - Saving to CSVs...') dumped_keys = [] for key, value", "callback: cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8)", "for key in dumped_keys: del data[key] # process the basic blob data talk('", "= primary.summarize(experiment, callback=cb_pri) # generate secondary data talk(' - Generating secondary data...') #", "for i, df_type in enumerate(['bounds', 'terminals', 'sizes']): if callback: cb = lambda x:", "CSVs...') dumped_keys = [] for key, value in six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key,", "data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment) # TODO: remove this commented", "callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p)", "data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data, experiment) if callback: cb_sec(1) # dump", "import print_function, absolute_import, unicode_literals, division import six from six.moves import (zip, filter, map,", "range) # standard library import functools # third party # project specific from", "talk('preparing blob files') if experiment is None: # load experiment experiment = wio.Experiment(experiment_id=ex_id,", "data. \"\"\" if verbose: talk = print else: talk = lambda *a, **k:", "import secondary from . import primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC", "= 0.90 CALLBACK_SECONDARY_FRAC = 0.08 # TODO remove ex_id from parameters. rely solely", "= 0.08 # TODO remove ex_id from parameters. rely solely on experiment def", "- {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) # free up memory once this is", "dumped_keys = [] for key, value in six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value,", "in dumped_keys: del data[key] # process the basic blob data talk(' - Summarizing", "free up memory once this is saved for key in dumped_keys: del data[key]", "cb_pri_steps(p, step, num_steps): cb_pri((step + p) / num_steps) else: cb_load = cb_pri =", "= cb_sec = cb_pri_steps = None talk('preparing blob files') if experiment is None:", "Summarizing raw data...') data = {} for i, df_type in enumerate(['bounds', 'terminals', 'sizes']):", "callback=cb) save_processed_data(data, experiment) # TODO: remove this commented method. it keeps failing. #", "process the basic blob data talk(' - Summarizing raw data...') data = {}", "import functools # third party # project specific from waldo.conf import settings from", "# data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data, experiment)", "CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC = 0.08 # TODO remove ex_id", "p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p, step,", "dataframe=value, index=False) dumped_keys.append(key) # free up memory once this is saved for key", "settings from waldo import wio from . import secondary from . import primary", "__future__ import print_function, absolute_import, unicode_literals, division import six from six.moves import (zip, filter,", "i, 3) else: cb = None print(' - Summarizing {df} data...'.format(df=df_type)) data[df_type] =", "rely solely on experiment def summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\" intermediate summary data.", "**k: None if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC +", "= None print(' - Summarizing {df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data,", "summary data. \"\"\" if verbose: talk = print else: talk = lambda *a,", "experiment) # TODO: remove this commented method. it keeps failing. # data =", "secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4) save_processed_data(data, experiment) if callback: cb_sec(0.6) # data['moved'] =", "num_steps): cb_pri((step + p) / num_steps) else: cb_load = cb_pri = cb_sec =", "Summarizing {df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment) # TODO: remove", "generate secondary data talk(' - Generating secondary data...') # data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds'])", "0.90 CALLBACK_SECONDARY_FRAC = 0.08 # TODO remove ex_id from parameters. rely solely on", "cb_pri_steps(x, i, 3) else: cb = None print(' - Summarizing {df} data...'.format(df=df_type)) data[df_type]", "once this is saved for key in dumped_keys: del data[key] # process the", "# third party # project specific from waldo.conf import settings from waldo import", "None if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC", "from . import secondary from . import primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC =", "= [] for key, value in six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False)", "del data[key] # process the basic blob data talk(' - Summarizing raw data...')", "[] for key, value in six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key)", "CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p, step, num_steps): cb_pri((step + p) /", "blob files') if experiment is None: # load experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load)", "remove ex_id from parameters. rely solely on experiment def summarize(ex_id, experiment=None, verbose=False, callback=None):", "waldo import wio from . import secondary from . import primary __all__ =", "experiment): talk(' - Saving to CSVs...') dumped_keys = [] for key, value in", "def cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p) def", "cb_load = cb_pri = cb_sec = cb_pri_steps = None talk('preparing blob files') if", "if verbose: talk = print else: talk = lambda *a, **k: None if", "= cb_pri = cb_sec = cb_pri_steps = None talk('preparing blob files') if experiment", "# standard library import functools # third party # project specific from waldo.conf", "method. it keeps failing. # data = primary.summarize(experiment, callback=cb_pri) # generate secondary data", "# data['roi'] = secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4) save_processed_data(data,", "to CSVs...') dumped_keys = [] for key, value in six.iteritems(data): talk(' - {}'.format(key))", "data...') data = {} for i, df_type in enumerate(['bounds', 'terminals', 'sizes']): if callback:", "0.08 # TODO remove ex_id from parameters. rely solely on experiment def summarize(ex_id,", "bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4) save_processed_data(data, experiment) if callback: cb_sec(0.6)", "division import six from six.moves import (zip, filter, map, reduce, input, range) #", "__all__ = ['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC = 0.08 #", "on experiment def summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\" intermediate summary data. \"\"\" if", "cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC", "p) def cb_pri_steps(p, step, num_steps): cb_pri((step + p) / num_steps) else: cb_load =", "functools # third party # project specific from waldo.conf import settings from waldo", "callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC *", "num_steps) else: cb_load = cb_pri = cb_sec = cb_pri_steps = None talk('preparing blob", "{} for i, df_type in enumerate(['bounds', 'terminals', 'sizes']): if callback: cb = lambda", "talk = lambda *a, **k: None if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC * p)", "None talk('preparing blob files') if experiment is None: # load experiment experiment =", "else: cb_load = cb_pri = cb_sec = cb_pri_steps = None talk('preparing blob files')", "import primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC =", "the basic blob data talk(' - Summarizing raw data...') data = {} for", "None print(' - Summarizing {df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment)", "data = {} for i, df_type in enumerate(['bounds', 'terminals', 'sizes']): if callback: cb", ". import secondary from . import primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC = 0.02", "# TODO remove ex_id from parameters. rely solely on experiment def summarize(ex_id, experiment=None,", "experiment is None: # load experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID:", "import (zip, filter, map, reduce, input, range) # standard library import functools #", "cb_pri_steps = None talk('preparing blob files') if experiment is None: # load experiment", "i, df_type in enumerate(['bounds', 'terminals', 'sizes']): if callback: cb = lambda x: cb_pri_steps(x,", "remove this commented method. it keeps failing. # data = primary.summarize(experiment, callback=cb_pri) #", "in six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) # free up memory", "+ CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p) def cb_pri_steps(p, step, num_steps): cb_pri((step + p)", "import six from six.moves import (zip, filter, map, reduce, input, range) # standard", "'terminals', 'sizes']): if callback: cb = lambda x: cb_pri_steps(x, i, 3) else: cb", "cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p) def cb_sec(p):", "for key, value in six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) #", "{}'.format(experiment.id)) def save_processed_data(data, experiment): talk(' - Saving to CSVs...') dumped_keys = [] for", "{df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type, callback=cb) save_processed_data(data, experiment) # TODO: remove this", "experiment=None, verbose=False, callback=None): \"\"\" intermediate summary data. \"\"\" if verbose: talk = print", "= ['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC = 0.08 # TODO", "else: talk = lambda *a, **k: None if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC *", "* p) def cb_pri_steps(p, step, num_steps): cb_pri((step + p) / num_steps) else: cb_load", "else: cb = None print(' - Summarizing {df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment, df_type,", "six from six.moves import (zip, filter, map, reduce, input, range) # standard library", "experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) # free up memory once this is saved for", "waldo.conf import settings from waldo import wio from . import secondary from .", "talk(' - Summarizing raw data...') data = {} for i, df_type in enumerate(['bounds',", "unicode_literals, division import six from six.moves import (zip, filter, map, reduce, input, range)", "= lambda *a, **k: None if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def", "cb_pri = cb_sec = cb_pri_steps = None talk('preparing blob files') if experiment is", "is saved for key in dumped_keys: del data[key] # process the basic blob", "if callback: cb_sec(0.4) save_processed_data(data, experiment) if callback: cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes'])", "experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id)) def save_processed_data(data, experiment): talk('", "None: # load experiment experiment = wio.Experiment(experiment_id=ex_id, callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id)) def", "primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC = 0.90 CALLBACK_SECONDARY_FRAC = 0.08", "callback=cb_pri) # generate secondary data talk(' - Generating secondary data...') # data['roi'] =", "if callback: cb_sec(0.6) # data['moved'] = secondary.bodylengths_moved(bounds=data['bounds'], sizes=data['sizes']) data['moved'] = secondary.bodylengths_moved(experiment=experiment) if callback:", "ID: {}'.format(experiment.id)) def save_processed_data(data, experiment): talk(' - Saving to CSVs...') dumped_keys = []", "/ num_steps) else: cb_load = cb_pri = cb_sec = cb_pri_steps = None talk('preparing", "summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\" intermediate summary data. \"\"\" if verbose: talk =", "standard library import functools # third party # project specific from waldo.conf import", "3) else: cb = None print(' - Summarizing {df} data...'.format(df=df_type)) data[df_type] = primary.create_primary_df(experiment,", "from six.moves import (zip, filter, map, reduce, input, range) # standard library import", "p) / num_steps) else: cb_load = cb_pri = cb_sec = cb_pri_steps = None", "def cb_pri(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC * p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC +", "# process the basic blob data talk(' - Summarizing raw data...') data =", "data[key] # process the basic blob data talk(' - Summarizing raw data...') data", "secondary.bodylengths_moved(experiment=experiment) if callback: cb_sec(0.8) save_processed_data(data, experiment) if callback: cb_sec(1) # dump it out", "= secondary.in_roi(experiment=experiment, bounds=data['bounds']) data['roi'] = secondary.in_roi(experiment=experiment, bounds=None) if callback: cb_sec(0.4) save_processed_data(data, experiment) if", "CALLBACK_PRIMARY_FRAC * p) def cb_sec(p): callback(CALLBACK_LOAD_FRAC + CALLBACK_PRIMARY_FRAC + CALLBACK_SECONDARY_FRAC * p) def", "Saving to CSVs...') dumped_keys = [] for key, value in six.iteritems(data): talk(' -", "import wio from . import secondary from . import primary __all__ = ['summarize']", "experiment ID: {}'.format(experiment.id)) def save_processed_data(data, experiment): talk(' - Saving to CSVs...') dumped_keys =", "parameters. rely solely on experiment def summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\" intermediate summary", "# generate secondary data talk(' - Generating secondary data...') # data['roi'] = secondary.in_roi(experiment=experiment,", "*a, **k: None if callback: def cb_load(p): callback(CALLBACK_LOAD_FRAC * p) def cb_pri(p): callback(CALLBACK_LOAD_FRAC", "= lambda x: cb_pri_steps(x, i, 3) else: cb = None print(' - Summarizing", "six.iteritems(data): talk(' - {}'.format(key)) experiment.prepdata.dump(data_type=key, dataframe=value, index=False) dumped_keys.append(key) # free up memory once", "from parameters. rely solely on experiment def summarize(ex_id, experiment=None, verbose=False, callback=None): \"\"\" intermediate", "callback=cb_load) talk('Loaded experiment ID: {}'.format(experiment.id)) def save_processed_data(data, experiment): talk(' - Saving to CSVs...')", "secondary from . import primary __all__ = ['summarize'] CALLBACK_LOAD_FRAC = 0.02 CALLBACK_PRIMARY_FRAC =", "\"\"\" intermediate summary data. \"\"\" if verbose: talk = print else: talk =", "six.moves import (zip, filter, map, reduce, input, range) # standard library import functools" ]
[ "#self.last_aa_pos = aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel", "= [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3,", "range(3, 7)] self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p = range(4)", "def _send_command(self, command, addr, sock): sock.sendto(command, addr) def _get_current_state(self): #aa_data = self.aa.get()['data'] with", "% (plant, self.pack_vel(vel[ndof], ndof)), ia, sock) def pack_vel(self, vel, n_dof): format_str = \"%f", "#common_state_lists.rh_pos_states self.rh_v = range(4, 8) #common_state_lists.rh_vel_states self.aa_v = range(3, 6) #self.aa = udp_feedback_client.ArmAssistData()", "time from ismore import common_state_lists, ismore_bmi_lib import pandas as pd import pickle import", "pd import pickle import os class Patient(object): def __init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR,", "= { 'call_rate': 20, 'xy_cutoff': 5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file))", "lines[-2] aa_data = np.array([float(i) for i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f:", "np.zeros((7, ))))).T assist_kwargs = self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self,", "#common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v = range(4, 8) #common_state_lists.rh_vel_states self.aa_v = range(3,", "in range(3)]) #aa_vel = daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data = self.rh.get()['data']", "ia, sock) def pack_vel(self, vel, n_dof): format_str = \"%f \" * len(n_dof) return", "import numpy as np import socket, struct from ismore import settings, udp_feedback_client import", "udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs", "def send_vel(self, vel): for i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs,", "} self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for i, (ia,", "'r') as f: lines = f.read().splitlines() last_line = lines[-2] aa_data = np.array([float(i) for", "= lines[-2] rh_data = np.array([float(i) for i in last_line.split(',')]) #daa = np.array([aa_data[0][i] -", "def __init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]", "with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines = f.read().splitlines() last_line = lines[-2] aa_data =", "self.pack_vel(vel[ndof], ndof)), ia, sock) def pack_vel(self, vel, n_dof): format_str = \"%f \" *", "vel, n_dof): format_str = \"%f \" * len(n_dof) return format_str % tuple(vel) def", "i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines = f.read().splitlines() last_line =", ")) return np.hstack((pos, vel)) def get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos,", "= np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos,", "f: lines = f.read().splitlines() last_line = lines[-2] aa_data = np.array([float(i) for i in", "in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines = f.read().splitlines() last_line = lines[-2]", "class Patient(object): def __init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM),", "= self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v]", "rh_data[self.rh_v] )) return np.hstack((pos, vel)) def get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T target_state =", "pack_vel(self, vel, n_dof): format_str = \"%f \" * len(n_dof) return format_str % tuple(vel)", "np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos, vel)) def get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T", "as pd import pickle import os class Patient(object): def __init__(self, targets_matrix_file): self.addrs =", "import os class Patient(object): def __init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks =", "f.read().splitlines() last_line = lines[-2] rh_data = np.array([float(i) for i in last_line.split(',')]) #daa =", "daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p],", "addr) def _get_current_state(self): #aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines =", "for i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines = f.read().splitlines() last_line", "range(4, 8) #common_state_lists.rh_vel_states self.aa_v = range(3, 6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData()", "np.array([float(i) for i in last_line.split(',')]) #daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in", "os class Patient(object): def __init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET,", "range(3) #common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v = range(4, 8) #common_state_lists.rh_vel_states self.aa_v =", "#common_state_lists.rh_vel_states self.aa_v = range(3, 6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos", "self.rh_v = range(4, 8) #common_state_lists.rh_vel_states self.aa_v = range(3, 6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh", "ndof)), ia, sock) def pack_vel(self, vel, n_dof): format_str = \"%f \" * len(n_dof)", "- self.last_aa_pos[0][i] for i in range(3)]) #aa_vel = daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos =", "self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock) def pack_vel(self,", "tix=0): if len(self.targets_matrix[target_name].shape) > 1: targ = self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name] d", "rh_data = np.array([float(i) for i in last_line.split(',')]) #daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for", "for i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s", "np.hstack((pos, vel)) def get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T", "6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p)", "as np import socket, struct from ismore import settings, udp_feedback_client import time from", "last_line.split(',')]) #daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)]) #aa_vel = daa/(time.time()", "self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v = range(4, 8) #common_state_lists.rh_vel_states", "last_line = lines[-2] aa_data = np.array([float(i) for i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r')", "import time from ismore import common_state_lists, ismore_bmi_lib import pandas as pd import pickle", "pandas as pd import pickle import os class Patient(object): def __init__(self, targets_matrix_file): self.addrs", "#self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time()", "np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs = self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14])))", "__init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs", "1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape) > 1:", "i in range(3)]) #aa_vel = daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data =", "np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)]) #aa_vel = daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos", "assister_kwargs = { 'call_rate': 20, 'xy_cutoff': 5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix =", "np import socket, struct from ismore import settings, udp_feedback_client import time from ismore", "= [range(3), range(3, 7)] self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p", "range(3, 6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )),", "= self.targets_matrix[target_name] d = 100 while d > 20: d = self.get_to_target(targ) print", "target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs = self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return", "targ = self.targets_matrix[target_name] d = 100 while d > 20: d = self.get_to_target(targ)", "self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for i, (ia, sock, ndof, plant) in", "self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3),", "= range(3) #common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v = range(4, 8) #common_state_lists.rh_vel_states self.aa_v", "plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r' % (plant, self.pack_vel(vel[ndof], ndof)),", "pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks,", "i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r'", "= udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t", "= f.read().splitlines() last_line = lines[-2] rh_data = np.array([float(i) for i in last_line.split(',')]) #daa", "lines = f.read().splitlines() last_line = lines[-2] rh_data = np.array([float(i) for i in last_line.split(',')])", "aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel = np.hstack((", "5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for i,", "settings, udp_feedback_client import time from ismore import common_state_lists, ismore_bmi_lib import pandas as pd", "_send_command(self, command, addr, sock): sock.sendto(command, addr) def _get_current_state(self): #aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'),", "lines = f.read().splitlines() last_line = lines[-2] aa_data = np.array([float(i) for i in last_line.split(',')])", "numpy as np import socket, struct from ismore import settings, udp_feedback_client import time", "= ['ArmAssist', 'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v =", "= aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel =", "struct from ismore import settings, udp_feedback_client import time from ismore import common_state_lists, ismore_bmi_lib", "self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape)", "8) #common_state_lists.rh_vel_states self.aa_v = range(3, 6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start()", "socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3, 7)] self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p =", "'call_rate': 20, 'xy_cutoff': 5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self,", "= f.read().splitlines() last_line = lines[-2] aa_data = np.array([float(i) for i in last_line.split(',')]) with", "self.targets_matrix[target_name] d = 100 while d > 20: d = self.get_to_target(targ) print d", "target_pos): current_state = np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs = self.assister(current_state, target_state,", "#self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs = {", "import common_state_lists, ismore_bmi_lib import pandas as pd import pickle import os class Patient(object):", "= lines[-2] aa_data = np.array([float(i) for i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as", "= self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0): if", "enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock)", "= self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name] d = 100 while d > 20:", "pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs = { 'call_rate': 20,", "in last_line.split(',')]) #daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)]) #aa_vel =", "current_state = np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs = self.assister(current_state, target_state, 1.,", "else: targ = self.targets_matrix[target_name] d = 100 while d > 20: d =", "np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs = self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def", "aa_data[self.aa_p], rh_data[self.rh_p] )) vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos, vel)) def", "= time.time() #self.rh.start() assister_kwargs = { 'call_rate': 20, 'xy_cutoff': 5, } self.assister =", "for i in range(3)]) #aa_vel = daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data", "#self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs = { 'call_rate': 20, 'xy_cutoff': 5, } self.assister", "= [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3, 7)] self.plant_types = ['ArmAssist',", "['ArmAssist', 'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v = range(4,", "settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3, 7)] self.plant_types", "Patient(object): def __init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET,", "= np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)]) #aa_vel = daa/(time.time() - self.last_aa_pos_t)", "return np.hstack((pos, vel)) def get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7,", "= np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs = self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2)", "format_str = \"%f \" * len(n_dof) return format_str % tuple(vel) def _send_command(self, command,", "= np.array([float(i) for i in last_line.split(',')]) #daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i", "return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape) > 1: targ = self.targets_matrix[target_name][tix]", "= np.array([float(i) for i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines =", "#rh_data = self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel = np.hstack(( aa_data[self.aa_v],", "self.aa_v = range(3, 6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos =", ")) vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos, vel)) def get_to_target(self, target_pos):", "import pickle import os class Patient(object): def __init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR]", "send_vel(self, vel): for i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)):", "ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r' % (plant, self.pack_vel(vel[ndof],", "ismore import common_state_lists, ismore_bmi_lib import pandas as pd import pickle import os class", ")), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs = { 'call_rate': 20, 'xy_cutoff':", "aa_data = np.array([float(i) for i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines", "target_name, tix=0): if len(self.targets_matrix[target_name].shape) > 1: targ = self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name]", "self.last_aa_pos[0][i] for i in range(3)]) #aa_vel = daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p]", "vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos, vel)) def get_to_target(self, target_pos): current_state", "target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape) >", "range(3)]) #aa_vel = daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos", "self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3, 7)] self.plant_types =", "open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines = f.read().splitlines() last_line = lines[-2] aa_data = np.array([float(i)", "dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs = { 'call_rate': 20, 'xy_cutoff': 5,", "self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] ))", "(plant, self.pack_vel(vel[ndof], ndof)), ia, sock) def pack_vel(self, vel, n_dof): format_str = \"%f \"", "command, addr, sock): sock.sendto(command, addr) def _get_current_state(self): #aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r')", "import pandas as pd import pickle import os class Patient(object): def __init__(self, targets_matrix_file):", "np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape) > 1: targ = self.targets_matrix[target_name][tix] else:", "from ismore import settings, udp_feedback_client import time from ismore import common_state_lists, ismore_bmi_lib import", "import socket, struct from ismore import settings, udp_feedback_client import time from ismore import", "as f: lines = f.read().splitlines() last_line = lines[-2] aa_data = np.array([float(i) for i", "lines[-2] rh_data = np.array([float(i) for i in last_line.split(',')]) #daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i]", "def pack_vel(self, vel, n_dof): format_str = \"%f \" * len(n_dof) return format_str %", "socket, struct from ismore import settings, udp_feedback_client import time from ismore import common_state_lists,", "def go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape) > 1: targ = self.targets_matrix[target_name][tix] else: targ", "ismore import settings, udp_feedback_client import time from ismore import common_state_lists, ismore_bmi_lib import pandas", "open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines = f.read().splitlines() last_line = lines[-2] rh_data = np.array([float(i)", "np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos, vel))", "f: lines = f.read().splitlines() last_line = lines[-2] rh_data = np.array([float(i) for i in", "socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3, 7)] self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p = range(3)", "= daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos = np.hstack((", "1: targ = self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name] d = 100 while d", "self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v = range(4, 8) #common_state_lists.rh_vel_states self.aa_v = range(3, 6)", "self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name] d = 100 while d > 20: d", "= udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start()", "= pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs = { 'call_rate':", "in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia,", "self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] ))", "= np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos, vel)) def get_to_target(self, target_pos): current_state =", "import settings, udp_feedback_client import time from ismore import common_state_lists, ismore_bmi_lib import pandas as", "20, 'xy_cutoff': 5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self, vel):", "aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos, vel)) def get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T target_state", "sock): sock.sendto(command, addr) def _get_current_state(self): #aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f:", "> 1: targ = self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name] d = 100 while", "#self.aa = udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p]", "#aa_vel = daa/(time.time() - self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos =", "with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines = f.read().splitlines() last_line = lines[-2] rh_data =", "#self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs = { 'call_rate': 20, 'xy_cutoff': 5, }", "i in last_line.split(',')]) #daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)]) #aa_vel", "\"%f \" * len(n_dof) return format_str % tuple(vel) def _send_command(self, command, addr, sock):", "as f: lines = f.read().splitlines() last_line = lines[-2] rh_data = np.array([float(i) for i", "% tuple(vel) def _send_command(self, command, addr, sock): sock.sendto(command, addr) def _get_current_state(self): #aa_data =", "len(n_dof) return format_str % tuple(vel) def _send_command(self, command, addr, sock): sock.sendto(command, addr) def", "= np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs = self.assister(current_state, target_state, 1., mode=None)", "for i in last_line.split(',')]) #daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)])", "sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r' % (plant,", "= ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for i, (ia, sock, ndof,", "self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape) > 1: targ =", "go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape) > 1: targ = self.targets_matrix[target_name][tix] else: targ =", "self.plant_types)): self._send_command('SetSpeed %s %s\\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock) def pack_vel(self, vel,", "sock) def pack_vel(self, vel, n_dof): format_str = \"%f \" * len(n_dof) return format_str", "n_dof): format_str = \"%f \" * len(n_dof) return format_str % tuple(vel) def _send_command(self,", "= range(4) #common_state_lists.rh_pos_states self.rh_v = range(4, 8) #common_state_lists.rh_vel_states self.aa_v = range(3, 6) #self.aa", "#daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)]) #aa_vel = daa/(time.time() -", "= self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines = f.read().splitlines() last_line = lines[-2]", "tuple(vel) def _send_command(self, command, addr, sock): sock.sendto(command, addr) def _get_current_state(self): #aa_data = self.aa.get()['data']", "mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0): if len(self.targets_matrix[target_name].shape) > 1: targ", "self.n_dofs = [range(3), range(3, 7)] self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states", "if len(self.targets_matrix[target_name].shape) > 1: targ = self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name] d =", "targ = self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name] d = 100 while d >", "= range(4, 8) #common_state_lists.rh_vel_states self.aa_v = range(3, 6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh =", "common_state_lists, ismore_bmi_lib import pandas as pd import pickle import os class Patient(object): def", "time.time() #self.rh.start() assister_kwargs = { 'call_rate': 20, 'xy_cutoff': 5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs)", "ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for i, (ia, sock, ndof, plant)", "udp_feedback_client import time from ismore import common_state_lists, ismore_bmi_lib import pandas as pd import", "#aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines = f.read().splitlines() last_line =", "{ 'call_rate': 20, 'xy_cutoff': 5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def", "(ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r' %", "socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3, 7)] self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p", "[range(3), range(3, 7)] self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p =", "= \"%f \" * len(n_dof) return format_str % tuple(vel) def _send_command(self, command, addr,", "np.array([float(i) for i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines = f.read().splitlines()", "last_line = lines[-2] rh_data = np.array([float(i) for i in last_line.split(',')]) #daa = np.array([aa_data[0][i]", "'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v = range(4, 8)", "assist_kwargs = self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name, tix=0):", "* len(n_dof) return format_str % tuple(vel) def _send_command(self, command, addr, sock): sock.sendto(command, addr)", "get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs = self.assister(current_state,", "_get_current_state(self): #aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines = f.read().splitlines() last_line", "last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f: lines = f.read().splitlines() last_line = lines[-2] rh_data", "len(self.targets_matrix[target_name].shape) > 1: targ = self.targets_matrix[target_name][tix] else: targ = self.targets_matrix[target_name] d = 100", "range(4) #common_state_lists.rh_pos_states self.rh_v = range(4, 8) #common_state_lists.rh_vel_states self.aa_v = range(3, 6) #self.aa =", "#self.rh.start() assister_kwargs = { 'call_rate': 20, 'xy_cutoff': 5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix", "pickle import os class Patient(object): def __init__(self, targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks", "\" * len(n_dof) return format_str % tuple(vel) def _send_command(self, command, addr, sock): sock.sendto(command,", "rh_data[self.rh_p] )) vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return np.hstack((pos, vel)) def get_to_target(self,", "self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states self.rh_v", "self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines = f.read().splitlines() last_line = lines[-2] aa_data", "self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed %s %s\\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock) def", "#self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t = time.time() #self.rh.start() assister_kwargs =", "format_str % tuple(vel) def _send_command(self, command, addr, sock): sock.sendto(command, addr) def _get_current_state(self): #aa_data", "pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] )) vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] )) return", "))))).T assist_kwargs = self.assister(current_state, target_state, 1., mode=None) self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14]))) return np.sum((np.array(current_state)-np.array(target_state))**2) def go_to_target(self, target_name,", "def get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs =", "= pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs,", "= range(3, 6) #self.aa = udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3,", "from ismore import common_state_lists, ismore_bmi_lib import pandas as pd import pickle import os", "addr, sock): sock.sendto(command, addr) def _get_current_state(self): #aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as", "[settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3, 7)]", "- self.last_aa_pos_t) #self.last_aa_pos = aa_data[self.aa_p] #rh_data = self.rh.get()['data'] pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p]", "f.read().splitlines() last_line = lines[-2] aa_data = np.array([float(i) for i in last_line.split(',')]) with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'),", "return format_str % tuple(vel) def _send_command(self, command, addr, sock): sock.sendto(command, addr) def _get_current_state(self):", "[socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs = [range(3), range(3, 7)] self.plant_types = ['ArmAssist', 'ReHand']", "udp_feedback_client.ArmAssistData() #self.rh = udp_feedback_client.ReHandData() #self.aa.start() #self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p] #self.last_aa_pos_t =", "'r') as f: lines = f.read().splitlines() last_line = lines[-2] rh_data = np.array([float(i) for", "vel)) def get_to_target(self, target_pos): current_state = np.mat(self._get_current_state()).T target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T assist_kwargs", "self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for i, (ia, sock,", "ismore_bmi_lib import pandas as pd import pickle import os class Patient(object): def __init__(self,", "targets_matrix_file): self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR] self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)] self.n_dofs =", "7)] self.plant_types = ['ArmAssist', 'ReHand'] self.aa_p = range(3) #common_state_lists.aa_pos_states self.rh_p = range(4) #common_state_lists.rh_pos_states", "%s %s\\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock) def pack_vel(self, vel, n_dof): format_str", "def _get_current_state(self): #aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines = f.read().splitlines()", "vel): for i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)): self._send_command('SetSpeed", "sock.sendto(command, addr) def _get_current_state(self): #aa_data = self.aa.get()['data'] with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f: lines", "self._send_command('SetSpeed %s %s\\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock) def pack_vel(self, vel, n_dof):", "'xy_cutoff': 5, } self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs) self.targets_matrix = pickle.load(open(targets_matrix_file)) def send_vel(self, vel): for", "%s\\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock) def pack_vel(self, vel, n_dof): format_str =" ]
[ "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype ) # Fisher information matrix", ":param constraints_scale: tensor (all parameters x dependent parameters) Tensor that encodes how complete", "tensor describes this relation for the dispersion model. This form of constraints is", "# Fisher information matrix of submodel which is to be trained. if train_a", "= True, train_scale: bool = True, provide_optimizers: Union[dict, None] = None, termination_type: str", "tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if extended_summary: pass self.saver = tf.train.Saver() self.merged_summary = tf.summary.merge_all()", "1), data=batched_data, map_fn=lambda idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood = log_likelihood /", "as xr from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external import EstimatorGraphGLM, FullDataModelGraphGLM,", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) # Define the jacobian on the", "which are optimized. :param constraints_loc: tensor (all parameters x dependent parameters) Tensor that", "None, dtype=\"float32\" ): \"\"\" :param fetch_fn: TODO :param feature_isnonzero: Whether all observations of", "self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if extended_summary: pass self.saver = tf.train.Saver()", "fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True,", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype ) else: fim_train =", "size_factors = data model = BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var,", "with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched data model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn,", "= batch_model.sigma2 self.probs = batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood =", "self.hessians_train = hessians_train self.fim = fim_full self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational", "train_b, noise_model: str, dtype ): \"\"\" :param sample_indices: TODO :param fetch_fn: TODO :param", "model.r self.sigma2 = model.sigma2 self.probs = model.probs self.log_probs = model.log_probs # custom self.sample_indices", "hessians_full self.hessians_train = hessians_train self.fim = fim_full self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\"", "- norm_log_likelihood with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian of full", ") else: hessians_train = hessians_full else: hessians_train = None fim_full = FIM( batched_data=batched_data,", "train_b: jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True,", "Whether to train mean model. If False, the initialisation is kept. :param train_r:", "TrainerGraphGLM from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import op_utils from .external", "TODO :param fetch_fn: TODO :param batch_size: int Size of mini-batches used. :param model_vars:", "Contains tf.Variables which are optimized. :param constraints_loc: tensor (all parameters x dependent parameters)", "constraints_scale: tensor (all parameters x dependent parameters) Tensor that encodes how complete parameter", "the case are not fitted. :param num_observations: int Number of observations. :param num_features:", "graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) # initial graph elements with self.graph.as_default(): with", "self.log_probs = batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood", "= self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim?", "import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import", "op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1, )", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype ) # Hessian of", "dtype=dtype ) else: hessians_train = hessians_full else: hessians_train = None fim_full = FIM(", "Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype", "noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype ) # Jacobian of submodel which is to", "__init__( self, num_observations, fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size: int, model_vars, constraints_loc, constraints_scale, train_a,", "= batch_model.loss self.jac_train = batch_jac self.hessians_train = batch_hessians self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM):", "model. :param num_design_scale_params: int Number of parameters per feature in scale model. :param", "(VGLMs). :param train_loc: bool Whether to train mean model. If False, the initialisation", "used in tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph, ModelVars,", "dtype=dtype ) # Hessian of submodel which is to be trained. if train_a", "axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if extended_summary:", "a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood =", "from .external import op_utils from .external import pkg_constants logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM):", "indepedent parameters: all = <constraints, indep>. This tensor describes this relation for the", "model. :param graph: tf.Graph :param batch_size: int Size of mini-batches used. :param init_a:", "BatchedDataModelGraphGLM from .external import op_utils from .external import pkg_constants logger = logging.getLogger(__name__) class", "if not train_a or not train_b: fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors", "be trained. if train_a or train_b: if not train_a or not train_b: fim_train", "parameters x dependent parameters) Tensor that encodes how complete parameter set which includes", "= BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) #", "self.r = batch_model.r self.sigma2 = batch_model.sigma2 self.probs = batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices", "\"global\", extended_summary=False, noise_model: str = None, dtype=\"float32\" ): \"\"\" :param fetch_fn: TODO :param", "negative binomial GLM metrics on full data set. \"\"\" def __init__( self, sample_indices:", "the initialisation is kept. :param train_scale: bool Whether to train dispersion model. If", "mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: batch_hessians = None # Define", "or train_b: batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False,", "of model. Contains tf.Variables which are optimized. :param constraints_loc: tensor (all parameters x", "= FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype", "batch_sample_index, batch_data = iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data with tf.name_scope(\"batch\"): batch_model", "update_b=train_b, dtype=dtype ) else: batch_fim = None self.X = batch_model.X self.design_loc = batch_model.design_loc", "= noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data", ":param model_vars: ModelVars Variables of model. Contains tf.Variables which are optimized. :param constraints_loc:", "X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model model =", "\"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM else:", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: hessians_train =", ") self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc,", "\"\"\" :param fetch_fn: TODO :param batch_size: int Size of mini-batches used. :param model_vars:", "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype ) else: batch_fim = None", "self.design_scale = batch_model.design_scale self.batched_data = batch_data self.mu = batch_model.mu self.r = batch_model.r self.sigma2", "op_utils from .external import pkg_constants logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph", "self.noise_model = noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS)", "= batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train =", "not train_b: jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model,", "num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph = None,", "= batch_data self.mu = batch_model.mu self.r = batch_model.r self.sigma2 = batch_model.sigma2 self.probs =", "tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars,", "num_observations, fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size: int, model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model:", "model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype ) # Jacobian of submodel which", "these are the IRLS matrix blocks # of the trained subset of parameters).", "Define the hessian on the batched model for newton-rhapson: # (note that these", "batch_jac self.hessians_train = batch_hessians self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor", "or train_b: batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False,", "batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype ) # Jacobian", "parallel_iterations=1, ) norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood with", "in vector generalized linear models (VGLMs). :param train_mu: bool Whether to train mean", "Hessian of submodel which is to be trained. if train_a or train_b: if", "X, design_loc, design_scale, size_factors = data model = BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc,", "fim_train = fim_full else: fim_train = None with tf.name_scope(\"jacobians\"): # Jacobian of full", "train_a or train_b: batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE,", "update_b=train_b, dtype=dtype ) else: fim_train = fim_full else: fim_train = None with tf.name_scope(\"jacobians\"):", "set. \"\"\" def __init__( self, num_observations, fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size: int, model_vars,", "not fitted. :param num_observations: int Number of observations. :param num_features: int Number of", "not train_a or not train_b: fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", "self.model_vars = ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged ==", "parameters). if train_a or train_b: batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", "else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data", "trained subset of parameters). if train_a or train_b: batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index,", "bool = True, provide_optimizers: Union[dict, None] = None, termination_type: str = \"global\", extended_summary=False,", "in mean model. :param num_design_scale_params: int Number of parameters per feature in scale", "switch for fim? # Summary statistics on feature-wise model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full),", "tensorflow as tf import numpy as np import xarray as xr from .external", "dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged == False)[0] # ###", ":param sample_indices: TODO :param fetch_fn: TODO :param batch_size: int Size of mini-batches used.", "self.probs = batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood", "= - norm_log_likelihood with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian of", "# (note that these are the IRLS matrix blocks # of the trained", "Size of mini-batches used. :param init_a: nd.array (mean model size x features) Initialisation", "which includes dependent parameters arises from indepedent parameters: all = <constraints, indep>. This", "parameters: all = <constraints, indep>. This tensor describes this relation for the dispersion", "constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\" ** Build full data", "None, init_a=None, init_b=None, train_loc: bool = True, train_scale: bool = True, provide_optimizers: Union[dict,", "observations of a feature are zero. Features for which this is the case", "= jacobian_train self.hessians = hessians_full self.hessians_train = hessians_train self.fim = fim_full self.fim_train =", "jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a,", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\" ** Build full", "class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM metrics on batched", "<constraints, indep>. This tensor describes this relation for the mean model. This form", "of constraints is used in vector generalized linear models (VGLMs). :param train_loc: bool", "Computational graph to evaluate negative binomial GLM metrics on full data set. \"\"\"", "import Union import logging import tensorflow as tf import numpy as np import", "(note that these are the Hessian matrix blocks # of the trained subset", "tf.Tensor], buffer_size: int, model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\"", "all observations of a feature are zero. Features for which this is the", "model. If False, the initialisation is kept. :param provide_optimizers: :param termination_type: :param extended_summary:", "ValueError(\"noise model not recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params,", "tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def map_model(idx,", "self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss = loss self.jac = jacobian_full.jac self.jac_train", "tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if extended_summary: pass self.saver", "self.log_likelihood = self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for", "= iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data with tf.name_scope(\"batch\"): batch_model = BasicModelGraph(", "tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param sample_indices:", "model for newton-rhapson: # (note that these are the Jacobian matrix blocks #", "of mini-batches used. :param init_a: nd.array (mean model size x features) Initialisation for", "for custom observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph(", "self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate',", "on the batched model: # (note that these are the IRLS matrix blocks", "model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype ) else: fim_train = fim_full else:", "self.loss = loss self.jac = jacobian_full.jac self.jac_train = jacobian_train self.hessians = hessians_full self.hessians_train", "optimized. :param constraints_loc: tensor (all parameters x dependent parameters) Tensor that encodes how", "batch_hessians self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor sigma2: tf.Tensor def", "features. :param num_design_loc_params: int Number of parameters per feature in mean model. :param", "batch_model.design_scale self.batched_data = batch_data self.mu = batch_model.mu self.r = batch_model.r self.sigma2 = batch_model.sigma2", "= map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda", "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype ) else: fim_train = fim_full", "= model.sigma2 self.probs = model.probs self.log_probs = model.log_probs # custom self.sample_indices = sample_indices", "constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param sample_indices: TODO :param fetch_fn:", "else: batch_fim = None self.X = batch_model.X self.design_loc = batch_model.design_loc self.design_scale = batch_model.design_scale", "used in tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph, Jacobians,", "mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype ) # Hessian of submodel which is", "train_a, train_b, noise_model: str, dtype ): \"\"\" :param sample_indices: TODO :param fetch_fn: TODO", "kept. :param train_r: bool Whether to train dispersion model. If False, the initialisation", "# (note that these are the Jacobian matrix blocks # of the trained", "self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim? #", "these are the Jacobian matrix blocks # of the trained subset of parameters).", "with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size))", "feature-wise model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var)", "tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if extended_summary: pass self.saver =", "import op_utils from .external import pkg_constants logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational", "xr.DataArray, graph: tf.Graph = None, batch_size: int = None, init_a=None, init_b=None, train_loc: bool", "Hessian of full model for reporting. hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "if noise_model == \"nb\": from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM else: raise", "data: map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood =", "= model.design_loc self.design_scale = model.design_scale self.batched_data = batched_data self.mu = model.mu self.r =", "self.probs = model.probs self.log_probs = model.log_probs # custom self.sample_indices = sample_indices self.log_likelihood =", ":param graph: tf.Graph :param batch_size: int Size of mini-batches used. :param init_a: nd.array", "not recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params,", "are the Jacobian matrix blocks # of the trained subset of parameters). if", "== \"nb\": from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM else: raise ValueError(\"noise model", "initialisation is kept. :param dtype: Precision used in tensorflow. \"\"\" if noise_model ==", "= batch_model.mu self.r = batch_model.r self.sigma2 = batch_model.sigma2 self.probs = batch_model.probs self.log_probs =", "in tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph, Jacobians, Hessians,", "= batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood =", "Tensor that encodes how complete parameter set which includes dependent parameters arises from", "update_b=True, dtype=dtype ) # Fisher information matrix of submodel which is to be", "noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype ) else: batch_fim = None self.X = batch_model.X", "performance related settings buffer_size = 4 with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched data", "= <constraints, indep>. This tensor describes this relation for the mean model. This", "tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM", "dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype ) # Define output metrics:", "tensor (all parameters x dependent parameters) Tensor that encodes how complete parameter set", "batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b,", "batch_size: int Size of mini-batches used. :param init_a: nd.array (mean model size x", "jacobian_train = None self.X = model.X self.design_loc = model.design_loc self.design_scale = model.design_scale self.batched_data", "is used in vector generalized linear models (VGLMs). :param constraints_scale: tensor (all parameters", "sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype ) else: fim_train", "to evaluate negative binomial GLM metrics on full data set. \"\"\" def __init__(", "training_data = training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) # sort indices training_data = training_data.map(fetch_fn,", "with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if extended_summary: pass", "constraints is used in vector generalized linear models (VGLMs). :param train_mu: bool Whether", "bool Whether to train dispersion model. If False, the initialisation is kept. :param", "be trained. if train_a or train_b: if not train_a or not train_b: jacobian_train", "model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param sample_indices: TODO", "fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size: int, model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str,", "graph: tf.Graph = None, batch_size: int = None, init_a=None, init_b=None, train_loc: bool =", "for all parameters of dispersion model. :param constraints_loc: tensor (all parameters x dependent", "fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"):", "update_a=True, update_b=True, dtype=dtype ) # Fisher information matrix of submodel which is to", "linear models (VGLMs). :param train_mu: bool Whether to train mean model. If False,", "True, provide_optimizers: Union[dict, None] = None, termination_type: str = \"global\", extended_summary=False, noise_model: str", "int = None, init_a=None, init_b=None, train_loc: bool = True, train_scale: bool = True,", "self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train", "custom observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph( sample_indices=sample_selection,", "= norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss = loss self.jac = jacobian_full.jac self.jac_train =", "graph to evaluate negative binomial GLM metrics on full data set. \"\"\" def", "self.X = batch_model.X self.design_loc = batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data = batch_data self.mu", "tf.Graph = None, batch_size: int = None, init_a=None, init_b=None, train_loc: bool = True,", "= training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors)", "batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors )", "batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) # initial graph elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"):", "train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype ) # Define", "from .external import pkg_constants logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to", "xr from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM", "= hessians_train self.fim = fim_full self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph", "name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True) training_data =", "num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) # initial graph elements", "constraints_scale=constraints_scale, dtype=dtype ) # initial graph elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars =", ") else: fim_train = fim_full else: fim_train = None with tf.name_scope(\"jacobians\"): # Jacobian", ") with tf.name_scope(\"full_data\"): logger.debug(\" ** Build full data model\") # ### alternative definitions", "self.mu = model.mu self.r = model.r self.sigma2 = model.sigma2 self.probs = model.probs self.log_probs", "tensor describes this relation for the mean model. This form of constraints is", ") # Fisher information matrix of submodel which is to be trained. if", "mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype ) # Fisher information matrix of submodel", "self.sigma2 = model.sigma2 self.probs = model.probs self.log_probs = model.log_probs # custom self.sample_indices =", "\"\"\" Computational graph to evaluate negative binomial GLM metrics on full data set.", "model not rewcognized\") self.noise_model = noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data", "# Jacobian of submodel which is to be trained. if train_a or train_b:", "typing import Union import logging import tensorflow as tf import numpy as np", "import xarray as xr from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external import", "iterator=True, hess_a=True, hess_b=True, dtype=dtype ) # Hessian of submodel which is to be", "map_fn=lambda idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype)", "model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: hessians_train = hessians_full else:", "self.batched_data = batched_data self.mu = model.mu self.r = model.r self.sigma2 = model.sigma2 self.probs", "\"\"\" Computational graph to evaluate negative binomial GLM metrics on batched data set.", "from typing import Union import logging import tensorflow as tf import numpy as", "negative binomial GLM metrics on batched data set. \"\"\" def __init__( self, num_observations,", "b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) # Define the jacobian on the batched model for", "IRLS matrix blocks # of the trained subset of parameters). if train_a or", "termination_type: :param extended_summary: :param dtype: Precision used in tensorflow. \"\"\" if noise_model ==", "parameters of dispersion model. :param constraints_loc: tensor (all parameters x dependent parameters) Tensor", "tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian of full model for reporting. hessians_full = Hessians(", "<reponame>SabrinaRichter/batchglm from typing import Union import logging import tensorflow as tf import numpy", "class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM metrics on full", "data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data =", "these are the Hessian matrix blocks # of the trained subset of parameters).", "model model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1),", ") norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood with tf.name_scope(\"loss\"):", "jacobian on the batched model for newton-rhapson: # (note that these are the", "relation for the dispersion model. This form of constraints is used in vector", "noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: batch_jac = None # Define the", "(all parameters x dependent parameters) Tensor that encodes how complete parameter set which", "batch_model.r self.sigma2 = batch_model.sigma2 self.probs = batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices = batch_sample_index", "num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph = None, batch_size: int = None,", "of a feature are zero. Features for which this is the case are", "/ tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with", "batched_data self.mu = model.mu self.r = model.r self.sigma2 = model.sigma2 self.probs = model.probs", "pkg_constants logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial", "is kept. :param provide_optimizers: :param termination_type: :param extended_summary: :param dtype: Precision used in", "self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor sigma2: tf.Tensor def __init__(", "data) -> BasicModelGraph: X, design_loc, design_scale, size_factors = data model = BasicModelGraph( X=X,", "= data model = BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype,", "dispersion model. This form of constraints is used in vector generalized linear models", "** Build batched data model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars,", "= 4 with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched data model\") self.batched_data_model = BatchedDataModelGraph(", "self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype )", "noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\" ** Build full data model\") # ###", "dtype=dtype, size_factors=batch_size_factors ) # Define the jacobian on the batched model for newton-rhapson:", "to be trained. if train_a or train_b: if not train_a or not train_b:", "last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood", "None self.X = model.X self.design_loc = model.design_loc self.design_scale = model.design_scale self.batched_data = batched_data", "= batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data = batch_data self.mu = batch_model.mu self.r =", "are not fitted. :param num_observations: int Number of observations. :param num_features: int Number", "tf.Graph :param batch_size: int Size of mini-batches used. :param init_a: nd.array (mean model", "initialisation is kept. :param provide_optimizers: :param termination_type: :param extended_summary: :param dtype: Precision used", "constraints_scale: xr.DataArray, graph: tf.Graph = None, batch_size: int = None, init_a=None, init_b=None, train_loc:", "nd.array (mean model size x features) Initialisation for all parameters of mean model.", "the dispersion model. This form of constraints is used in vector generalized linear", "model for reporting. hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model,", "tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True)", "design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) # Define the jacobian on", "== False)[0] # ### performance related settings buffer_size = 4 with tf.name_scope(\"batched_data\"): logger.debug(\"", "jac_b=train_b, dtype=dtype ) else: batch_jac = None # Define the hessian on the", "batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train = batch_jac", "model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss',", "logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM", "sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype ) # Hessian", "BasicModelGraph, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model", "ValueError(\"noise model not rewcognized\") self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations,", "int Number of observations. :param num_features: int Number of features. :param num_design_loc_params: int", "relation for the mean model. This form of constraints is used in vector", "Number of observations. :param num_features: int Number of features. :param num_design_loc_params: int Number", "FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices)", "is the case are not fitted. :param num_observations: int Number of observations. :param", "(batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data with tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc,", "generalized linear models (VGLMs). :param train_loc: bool Whether to train mean model. If", "tf.name_scope(\"full_data\"): logger.debug(\" ** Build full data model\") # ### alternative definitions for custom", "provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype ) # Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype )", "int Number of features. :param num_design_loc_params: int Number of parameters per feature in", "mini-batches used. :param init_a: nd.array (mean model size x features) Initialisation for all", "train_a or not train_b: fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE,", "noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data =", "num_observations: int Number of observations. :param num_features: int Number of features. :param num_design_loc_params:", "= None # Define the IRLS components on the batched model: # (note", "fetch_fn, batch_size: Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ):", "= norm_neg_log_likelihood self.loss = loss self.jac = jacobian_full.jac self.jac_train = jacobian_train self.hessians =", "tf.Tensor sigma2: tf.Tensor def __init__( self, fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params,", "vector generalized linear models (VGLMs). :param constraints_scale: tensor (all parameters x dependent parameters)", "noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype ) # Fisher information matrix of submodel which", "constraints_loc: tensor (all parameters x dependent parameters) Tensor that encodes how complete parameter", "train_b: batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a,", "initialisation is kept. :param train_r: bool Whether to train dispersion model. If False,", "batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train = batch_jac self.hessians_train = batch_hessians", "the IRLS matrix blocks # of the trained subset of parameters). if train_a", "= tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian of full model for reporting. hessians_full =", "mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype ) else: fim_train = fim_full else: fim_train", "batch_size: int = None, init_a=None, init_b=None, train_loc: bool = True, train_scale: bool =", "sample_indices: TODO :param fetch_fn: TODO :param batch_size: int Size of mini-batches used. :param", "mu: tf.Tensor sigma2: tf.Tensor def __init__( self, fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params,", "iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: batch_jac = None # Define the hessian", ") self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype ) # Define output metrics: self._set_out_var(", "Hessians, FIM else: raise ValueError(\"noise model not recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self,", "full model for reporting. jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", ":param batch_size: int Size of mini-batches used. :param init_a: nd.array (mean model size", "self.X = model.X self.design_loc = model.design_loc self.design_scale = model.design_scale self.batched_data = batched_data self.mu", "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype ) # Jacobian of submodel", "= data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) # sort", "jac_a=train_a, jac_b=train_b, dtype=dtype ) else: jacobian_train = jacobian_full else: jacobian_train = None self.X", "model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param fetch_fn: TODO", ") self.loss = self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian)", "dependent parameters) Tensor that encodes how complete parameter set which includes dependent parameters", "mean model. This form of constraints is used in vector generalized linear models", "train_a or train_b: batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model,", "sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: batch_hessians", "rewcognized\") self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data", ":param fetch_fn: TODO :param feature_isnonzero: Whether all observations of a feature are zero.", "logger.debug(\" ** Build full data model\") # ### alternative definitions for custom observations:", "Jacobian of full model for reporting. jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc,", "trained subset of parameters). if train_a or train_b: batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index,", "\"nb\": from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not", "as np import xarray as xr from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from", "noise_model with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 *", "batch_model.X self.design_loc = batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data = batch_data self.mu = batch_model.mu", "batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype ) #", "self.mu = batch_model.mu self.r = batch_model.r self.sigma2 = batch_model.sigma2 self.probs = batch_model.probs self.log_probs", "TODO :param batch_size: int Size of mini-batches used. :param model_vars: ModelVars Variables of", "self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch", "batch_jac = None # Define the hessian on the batched model for newton-rhapson:", "model_vars: ModelVars Variables of model. Contains tf.Variables which are optimized. :param constraints_loc: tensor", "dtype=dtype ) else: batch_fim = None self.X = batch_model.X self.design_loc = batch_model.design_loc self.design_scale", "noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "all parameters of dispersion model. :param constraints_loc: tensor (all parameters x dependent parameters)", "Jacobians, Hessians, FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model with", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype ) # Fisher information", "init_a: nd.array (mean model size x features) Initialisation for all parameters of mean", "Variables of model. Contains tf.Variables which are optimized. :param constraints_loc: tensor (all parameters", "= model.log_probs # custom self.sample_indices = sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood = norm_log_likelihood", "= ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged == False)[0]", "batched model for newton-rhapson: # (note that these are the Jacobian matrix blocks", "describes this relation for the dispersion model. This form of constraints is used", ":param num_design_loc_params: int Number of parameters per feature in mean model. :param num_design_scale_params:", "# Summary statistics on feature-wise model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'):", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: batch_hessians =", "# Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss = self.full_data_model.loss self.log_likelihood =", "norm_log_likelihood with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian of full model", "dtype=dtype ) self.loss = self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv =", "for reporting. hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True,", "tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype", "import pkg_constants logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to evaluate negative", "full data set. \"\"\" def __init__( self, sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor],", "Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype )", "hessians_full else: hessians_train = None fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", "of full model for reporting. jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood", "arises from indepedent parameters: all = <constraints, indep>. This tensor describes this relation", "with tf.name_scope(\"jacobians\"): # Jacobian of full model for reporting. jacobian_full = Jacobians( batched_data=batched_data,", "self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss = loss self.jac = jacobian_full.jac self.jac_train = jacobian_train self.hessians", "batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) # sort indices training_data =", "else: batch_jac = None # Define the hessian on the batched model for", "tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians,", ":param num_observations: int Number of observations. :param num_features: int Number of features. :param", "if train_a or train_b: batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE,", "dtype=\"float32\" ): \"\"\" :param fetch_fn: TODO :param feature_isnonzero: Whether all observations of a", "feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph", "BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM metrics on batched data", "= Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype", "raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices((", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: jacobian_train =", "batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor sigma2: tf.Tensor def __init__( self, fetch_fn,", "of features. :param num_design_loc_params: int Number of parameters per feature in mean model.", "### performance related settings buffer_size = 4 with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched", "tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size,", "the jacobian on the batched model for newton-rhapson: # (note that these are", "fim_train = None with tf.name_scope(\"jacobians\"): # Jacobian of full model for reporting. jacobian_full", "rewcognized\") self.noise_model = noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn,", "if not train_a or not train_b: jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc,", "= batch_model.r self.sigma2 = batch_model.sigma2 self.probs = batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices =", "a feature are zero. Features for which this is the case are not", "model.log_probs # custom self.sample_indices = sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood", "<constraints, indep>. This tensor describes this relation for the dispersion model. This form", "self.sample_indices = sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss", "norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss = loss self.jac = jacobian_full.jac self.jac_train = jacobian_train", "or train_b: if not train_a or not train_b: fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices,", "sort indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index,", "model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype ) # Hessian of submodel which", "Hessian matrix blocks # of the trained subset of parameters). if train_a or", "on batched data set. \"\"\" def __init__( self, num_observations, fetch_fn, batch_size: Union[int, tf.Tensor],", "= batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor sigma2: tf.Tensor def __init__( self,", "import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not recognized\") self.noise_model", "# ### performance related settings buffer_size = 4 with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build", ") # Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss = self.full_data_model.loss self.log_likelihood", ":param feature_isnonzero: Whether all observations of a feature are zero. Features for which", "initialisation is kept. :param train_scale: bool Whether to train dispersion model. If False,", "metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss = self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians =", "is kept. :param train_r: bool Whether to train dispersion model. If False, the", "Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype )", ":param train_r: bool Whether to train dispersion model. If False, the initialisation is", "= noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc,", "data model = BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors)", "batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def map_model(idx, data) -> BasicModelGraph: X, design_loc, design_scale,", "self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train = batch_jac self.hessians_train = batch_hessians self.fim_train", "name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) # Define the jacobian on the batched", "= training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next()", "FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import op_utils from .external import pkg_constants logger = logging.getLogger(__name__)", "== \"nb\": from .external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM else: raise ValueError(\"noise", "how complete parameter set which includes dependent parameters arises from indepedent parameters: all", "# TODO switch for fim? # Summary statistics on feature-wise model gradients: self.gradients", "feature are zero. Features for which this is the case are not fitted.", "Union import logging import tensorflow as tf import numpy as np import xarray", "self.design_loc = batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data = batch_data self.mu = batch_model.mu self.r", "self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM", ") # Define the jacobian on the batched model for newton-rhapson: # (note", "= batch_jac self.hessians_train = batch_hessians self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu:", "the initialisation is kept. :param provide_optimizers: :param termination_type: :param extended_summary: :param dtype: Precision", "design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model model = map_model(*fetch_fn(sample_indices)) with", "all = <constraints, indep>. This tensor describes this relation for the dispersion model.", "submodel which is to be trained. if train_a or train_b: if not train_a", ":param num_design_scale_params: int Number of parameters per feature in scale model. :param graph:", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype", "constraints is used in vector generalized linear models (VGLMs). :param train_loc: bool Whether", "metrics on full data set. \"\"\" def __init__( self, sample_indices: tf.Tensor, fetch_fn, batch_size:", "of constraints is used in vector generalized linear models (VGLMs). :param constraints_scale: tensor", "FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype )", "train_a or train_b: if not train_a or not train_b: fim_train = FIM( batched_data=batched_data,", "self.jac = jacobian_full.jac self.jac_train = jacobian_train self.hessians = hessians_full self.hessians_train = hessians_train self.fim", "reporting. jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True,", "to train dispersion model. If False, the initialisation is kept. :param provide_optimizers: :param", "model.X self.design_loc = model.design_loc self.design_scale = model.design_scale self.batched_data = batched_data self.mu = model.mu", "hess_b=train_b, dtype=dtype ) else: batch_hessians = None # Define the IRLS components on", "of the trained subset of parameters). if train_a or train_b: batch_hessians = Hessians(", "= batch_hessians self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor sigma2: tf.Tensor", "of parameters). if train_a or train_b: batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "settings buffer_size = 4 with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched data model\") self.batched_data_model", "on the batched model for newton-rhapson: # (note that these are the Hessian", "parameters). if train_a or train_b: batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", "EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor sigma2: tf.Tensor def __init__( self, fetch_fn, feature_isnonzero, num_observations,", "graph: tf.Graph :param batch_size: int Size of mini-batches used. :param init_a: nd.array (mean", "in vector generalized linear models (VGLMs). :param constraints_scale: tensor (all parameters x dependent", "model. Contains tf.Variables which are optimized. :param constraints_loc: tensor (all parameters x dependent", "describes this relation for the mean model. This form of constraints is used", "raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data =", "constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood", "the batched model for newton-rhapson: # (note that these are the Hessian matrix", ":param dtype: Precision used in tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb", "IRLS components on the batched model: # (note that these are the IRLS", "None with tf.name_scope(\"jacobians\"): # Jacobian of full model for reporting. jacobian_full = Jacobians(", "num_features: int Number of features. :param num_design_loc_params: int Number of parameters per feature", "of the trained subset of parameters). if train_a or train_b: batch_jac = Jacobians(", "mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: jacobian_train = jacobian_full else: jacobian_train", "# Hessian of submodel which is to be trained. if train_a or train_b:", "batch_design_loc, batch_design_scale, batch_size_factors) = batch_data with tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale,", "generalized linear models (VGLMs). :param constraints_scale: tensor (all parameters x dependent parameters) Tensor", "batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype )", "model.probs self.log_probs = model.log_probs # custom self.sample_indices = sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood", "model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: batch_jac = None #", "batch_fim = None self.X = batch_model.X self.design_loc = batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data", "parameters: all = <constraints, indep>. This tensor describes this relation for the mean", "dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): #", ":param init_a: nd.array (mean model size x features) Initialisation for all parameters of", "nd.array (dispersion model size x features) Initialisation for all parameters of dispersion model.", "= \"global\", extended_summary=False, noise_model: str = None, dtype=\"float32\" ): \"\"\" :param fetch_fn: TODO", "with tf.name_scope(\"full_data\"): logger.debug(\" ** Build full data model\") # ### alternative definitions for", "dtype ): \"\"\" :param sample_indices: TODO :param fetch_fn: TODO :param batch_size: int Size", ":param constraints_loc: tensor (all parameters x dependent parameters) Tensor that encodes how complete", "newton-rhapson: # (note that these are the Hessian matrix blocks # of the", "model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data,", "is kept. :param train_scale: bool Whether to train dispersion model. If False, the", "else: hessians_train = hessians_full else: hessians_train = None fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices,", "GLM metrics on batched data set. \"\"\" def __init__( self, num_observations, fetch_fn, batch_size:", "int Number of parameters per feature in scale model. :param graph: tf.Graph :param", "set which includes dependent parameters arises from indepedent parameters: all = <constraints, indep>.", "logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM metrics on", "noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype ) # Hessian of submodel which is to", "= FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype", "fim? # Summary statistics on feature-wise model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with", "train_b: if not train_a or not train_b: jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None,", "which is to be trained. if train_a or train_b: if not train_a or", "import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import op_utils from .external import pkg_constants logger", "on full data set. \"\"\" def __init__( self, sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int,", "init_b: nd.array (dispersion model size x features) Initialisation for all parameters of dispersion", "log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood)", "metrics on batched data set. \"\"\" def __init__( self, num_observations, fetch_fn, batch_size: Union[int,", "int Size of mini-batches used. :param model_vars: ModelVars Variables of model. Contains tf.Variables", "= Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b,", "This form of constraints is used in vector generalized linear models (VGLMs). :param", "* batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) # sort indices training_data", "for newton-rhapson: # (note that these are the Hessian matrix blocks # of", "constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged == False)[0] # ### performance related settings buffer_size", "num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) #", "are optimized. :param constraints_loc: tensor (all parameters x dependent parameters) Tensor that encodes", "self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale,", "batch_hessians = None # Define the IRLS components on the batched model: #", "train_scale=train_scale, dtype=dtype ) # Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss =", "Fisher information matrix of submodel which is to be trained. if train_a or", "= batch_model.design_scale self.batched_data = batch_data self.mu = batch_model.mu self.r = batch_model.r self.sigma2 =", "linear models (VGLMs). :param constraints_scale: tensor (all parameters x dependent parameters) Tensor that", "train_b: batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False,", "\"\"\" :param fetch_fn: TODO :param feature_isnonzero: Whether all observations of a feature are", "\"\"\" :param sample_indices: TODO :param fetch_fn: TODO :param batch_size: int Size of mini-batches", "noise_model == \"nb\": from .external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM else: raise", "False, the initialisation is kept. :param train_r: bool Whether to train dispersion model.", "hessians_train self.fim = fim_full self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to", ") else: batch_hessians = None # Define the IRLS components on the batched", "hess_b=train_b, dtype=dtype ) else: hessians_train = hessians_full else: hessians_train = None fim_full =", "or train_b: if not train_a or not train_b: jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices,", "This tensor describes this relation for the mean model. This form of constraints", "if not train_a or not train_b: hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "dtype=dtype ) else: batch_jac = None # Define the hessian on the batched", "batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a,", "dtype=dtype ) # Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss = self.full_data_model.loss", "False, the initialisation is kept. :param dtype: Precision used in tensorflow. \"\"\" if", "related settings buffer_size = 4 with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched data model\")", "binomial GLM metrics on full data set. \"\"\" def __init__( self, sample_indices: tf.Tensor,", "train_mu: bool Whether to train mean model. If False, the initialisation is kept.", "used. :param model_vars: ModelVars Variables of model. Contains tf.Variables which are optimized. :param", "trained. if train_a or train_b: if not train_a or not train_b: fim_train =", "log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda idx, data: map_model(idx, data).log_likelihood,", "sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype ) else:", "model.design_loc self.design_scale = model.design_scale self.batched_data = batched_data self.mu = model.mu self.r = model.r", "fetch_fn: TODO :param batch_size: int Size of mini-batches used. :param model_vars: ModelVars Variables", "log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss = loss self.jac = jacobian_full.jac", "= training_data.map(tf.contrib.framework.sort) # sort indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator", "= tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate)", "else: jacobian_train = None self.X = model.X self.design_loc = model.design_loc self.design_scale = model.design_scale", "= hessians_full else: hessians_train = None fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "False)[0] # ### performance related settings buffer_size = 4 with tf.name_scope(\"batched_data\"): logger.debug(\" **", ".external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external", "parameters of mean model. :param init_b: nd.array (dispersion model size x features) Initialisation", "all = <constraints, indep>. This tensor describes this relation for the mean model.", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"):", "def __init__( self, sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a,", "# of the trained subset of parameters). if train_a or train_b: batch_jac =", "with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged =", "= self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO", ":param train_scale: bool Whether to train dispersion model. If False, the initialisation is", "model.mu self.r = model.r self.sigma2 = model.sigma2 self.probs = model.probs self.log_probs = model.log_probs", "batched data set. \"\"\" def __init__( self, num_observations, fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size:", "BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype )", "train dispersion model. If False, the initialisation is kept. :param provide_optimizers: :param termination_type:", "encodes how complete parameter set which includes dependent parameters arises from indepedent parameters:", "model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype ) # Fisher information matrix of", "the batched model: # (note that these are the IRLS matrix blocks #", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) # initial graph elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars", "dtype=dtype, size_factors=size_factors) return model model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices,", "batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype ) else:", "model. If False, the initialisation is kept. :param train_scale: bool Whether to train", ") # initial graph elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype,", "tf.name_scope(\"jacobians\"): # Jacobian of full model for reporting. jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices,", "numpy as np import xarray as xr from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM", "tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian of full model for reporting.", "mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype ) # Jacobian of submodel which is", "# Define the IRLS components on the batched model: # (note that these", "= None self.X = batch_model.X self.design_loc = batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data =", "batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype ) else:", "batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data = batch_data self.mu = batch_model.mu self.r = batch_model.r", "batch_size: int Size of mini-batches used. :param model_vars: ModelVars Variables of model. Contains", "self.idx_nonconverged = np.where(self.model_vars.converged == False)[0] # ### performance related settings buffer_size = 4", "constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype )", "self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss = self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian", "If False, the initialisation is kept. :param dtype: Precision used in tensorflow. \"\"\"", "to evaluate negative binomial GLM metrics on batched data set. \"\"\" def __init__(", "= <constraints, indep>. This tensor describes this relation for the dispersion model. This", "trained subset of parameters). if train_a or train_b: batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index,", ":param init_b: nd.array (dispersion model size x features) Initialisation for all parameters of", "the trained subset of parameters). if train_a or train_b: batch_jac = Jacobians( batched_data=batch_data,", "self.hessians = hessians_full self.hessians_train = hessians_train self.fim = fim_full self.fim_train = fim_train class", "with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian of full model for", "subset of parameters). if train_a or train_b: batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model,", "models (VGLMs). :param train_mu: bool Whether to train mean model. If False, the", "ValueError(\"noise model not rewcognized\") self.noise_model = noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size)", "parameter set which includes dependent parameters arises from indepedent parameters: all = <constraints,", "num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph =", "dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\" ** Build full data model\") # ### alternative", "for reporting. jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model,", "(dispersion model size x features) Initialisation for all parameters of dispersion model. :param", "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype ) # Hessian of submodel", "BasicModelGraph, ModelVars, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not recognized\") self.noise_model =", "tf.Variables which are optimized. :param constraints_loc: tensor (all parameters x dependent parameters) Tensor", "loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian of full model for reporting. hessians_full", "or not train_b: jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE,", "If False, the initialisation is kept. :param train_scale: bool Whether to train dispersion", "sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: hessians_train", "noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype ) else: fim_train = fim_full else: fim_train =", "batch_model.sigma2 self.probs = batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood = batch_model.log_likelihood", "X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) # Define the", "vector generalized linear models (VGLMs). :param train_mu: bool Whether to train mean model.", "data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood", "Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param", "training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) #", "training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next() (batch_X,", "= None, init_a=None, init_b=None, train_loc: bool = True, train_scale: bool = True, provide_optimizers:", "extended_summary=False, noise_model: str = None, dtype=\"float32\" ): \"\"\" :param fetch_fn: TODO :param feature_isnonzero:", "mean model. :param num_design_scale_params: int Number of parameters per feature in scale model.", "to train mean model. If False, the initialisation is kept. :param train_scale: bool", "training_data.map(tf.contrib.framework.sort) # sort indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator =", "model. This form of constraints is used in vector generalized linear models (VGLMs).", "def __init__( self, num_observations, fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size: int, model_vars, constraints_loc, constraints_scale,", "if train_a or train_b: batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE,", "(mean model size x features) Initialisation for all parameters of mean model. :param", "num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) # initial graph elements with", "subset of parameters). if train_a or train_b: batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc,", "map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda idx,", "train_b: hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a,", "ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged == False)[0] #", "jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True,", "batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood", "= BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model", "= batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss =", ":param train_loc: bool Whether to train mean model. If False, the initialisation is", "= sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss =", "= None # Define the hessian on the batched model for newton-rhapson: #", "): \"\"\" :param fetch_fn: TODO :param batch_size: int Size of mini-batches used. :param", "model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: jacobian_train = jacobian_full else:", "Build batched data model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc,", "map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = -", "num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next() (batch_X, batch_design_loc,", "in vector generalized linear models (VGLMs). :param train_loc: bool Whether to train mean", "bool Whether to train mean model. If False, the initialisation is kept. :param", "model. :param constraints_loc: tensor (all parameters x dependent parameters) Tensor that encodes how", "training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) # sort indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data", "= op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim? # Summary statistics on feature-wise model", "# Hessian of full model for reporting. hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc,", "not train_a or not train_b: hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", "model for reporting. jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE,", "If False, the initialisation is kept. :param provide_optimizers: :param termination_type: :param extended_summary: :param", "feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss = self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv", "model size x features) Initialisation for all parameters of dispersion model. :param constraints_loc:", "None fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True,", "= batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train = batch_jac self.hessians_train =", "trained. if train_a or train_b: if not train_a or not train_b: hessians_train =", "model. If False, the initialisation is kept. :param train_r: bool Whether to train", "str = None, dtype=\"float32\" ): \"\"\" :param fetch_fn: TODO :param feature_isnonzero: Whether all", "matrix of submodel which is to be trained. if train_a or train_b: if", "training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors) =", "in tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph, ModelVars, Jacobians,", "self.log_likelihood = log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss = loss self.jac", "extended_summary: :param dtype: Precision used in tensorflow. \"\"\" if noise_model == \"nb\": from", "= None, dtype=\"float32\" ): \"\"\" :param fetch_fn: TODO :param feature_isnonzero: Whether all observations", "): \"\"\" :param sample_indices: TODO :param fetch_fn: TODO :param batch_size: int Size of", "a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) # Define the jacobian on the batched model", "Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype", "** Build full data model\") # ### alternative definitions for custom observations: sample_selection", "# of the trained subset of parameters). if train_a or train_b: batch_fim =", "= model.mu self.r = model.r self.sigma2 = model.sigma2 self.probs = model.probs self.log_probs =", "(VGLMs). :param train_mu: bool Whether to train mean model. If False, the initialisation", "model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale,", "noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype ) # Define output", ".external import op_utils from .external import pkg_constants logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\"", "Initialisation for all parameters of mean model. :param init_b: nd.array (dispersion model size", "self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model,", "constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param fetch_fn: TODO :param batch_size:", "train_loc=train_loc, train_scale=train_scale, dtype=dtype ) # Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss", "Jacobian of submodel which is to be trained. if train_a or train_b: if", "batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\"", "tf.name_scope(\"hessians\"): # Hessian of full model for reporting. hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices,", "train_loc: bool = True, train_scale: bool = True, provide_optimizers: Union[dict, None] = None,", "kept. :param provide_optimizers: :param termination_type: :param extended_summary: :param dtype: Precision used in tensorflow.", "Define the IRLS components on the batched model: # (note that these are", "init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged == False)[0] # ### performance", "the initialisation is kept. :param train_r: bool Whether to train dispersion model. If", "= tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def", "num_design_scale_params: int Number of parameters per feature in scale model. :param graph: tf.Graph", "mean model. :param init_b: nd.array (dispersion model size x features) Initialisation for all", "recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph,", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype ) # Jacobian of", "design_loc, design_scale, size_factors = data model = BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "or not train_b: hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model,", "iterator=True, update_a=train_a, update_b=train_b, dtype=dtype ) else: fim_train = fim_full else: fim_train = None", "of full model for reporting. hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", "batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype ) else:", "dtype=dtype ) # Jacobian of submodel which is to be trained. if train_a", ") # Jacobian of submodel which is to be trained. if train_a or", "train_a or train_b: batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model,", "Number of parameters per feature in scale model. :param graph: tf.Graph :param batch_size:", "of observations. :param num_features: int Number of features. :param num_design_loc_params: int Number of", "= batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train = batch_jac self.hessians_train = batch_hessians self.fim_train =", "gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss)", "graph to evaluate negative binomial GLM metrics on batched data set. \"\"\" def", "evaluate negative binomial GLM metrics on batched data set. \"\"\" def __init__( self,", "matrix blocks # of the trained subset of parameters). if train_a or train_b:", "jac_b=train_b, dtype=dtype ) else: jacobian_train = jacobian_full else: jacobian_train = None self.X =", "batch_size_factors) = batch_data with tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "with tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype,", "train_b: batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a,", "of parameters per feature in mean model. :param num_design_scale_params: int Number of parameters", "per feature in mean model. :param num_design_scale_params: int Number of parameters per feature", "provide_optimizers: Union[dict, None] = None, termination_type: str = \"global\", extended_summary=False, noise_model: str =", "scale model. :param graph: tf.Graph :param batch_size: int Size of mini-batches used. :param", "= BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype", "hessian on the batched model for newton-rhapson: # (note that these are the", "parameters per feature in mean model. :param num_design_scale_params: int Number of parameters per", "full data model\") # ### alternative definitions for custom observations: sample_selection = tf.placeholder_with_default(", ") else: batch_fim = None self.X = batch_model.X self.design_loc = batch_model.design_loc self.design_scale =", "models (VGLMs). :param constraints_scale: tensor (all parameters x dependent parameters) Tensor that encodes", "full model for reporting. hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE,", "of parameters per feature in scale model. :param graph: tf.Graph :param batch_size: int", "observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn,", "self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged", "= model.probs self.log_probs = model.log_probs # custom self.sample_indices = sample_indices self.log_likelihood = log_likelihood", "Union[int, tf.Tensor], buffer_size: int, model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ):", "else: batch_hessians = None # Define the IRLS components on the batched model:", "batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype ) #", "import numpy as np import xarray as xr from .external import GradientGraphGLM, NewtonGraphGLM,", "train_scale: bool Whether to train dispersion model. If False, the initialisation is kept.", "train_r: bool Whether to train dispersion model. If False, the initialisation is kept.", "sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype ) else:", "train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype ) #", "newton-rhapson: # (note that these are the Jacobian matrix blocks # of the", "= FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype", "batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def map_model(idx, data) -> BasicModelGraph: X,", "BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) # Define", "loss self.jac = jacobian_full.jac self.jac_train = jacobian_train self.hessians = hessians_full self.hessians_train = hessians_train", "return model model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) -", "dtype=dtype ) else: batch_hessians = None # Define the IRLS components on the", "iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data with tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X,", "None # Define the IRLS components on the batched model: # (note that", "= batch_data with tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var,", "FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"): data_indices", "iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: hessians_train = hessians_full else: hessians_train = None", "4 with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched data model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations,", "__init__( self, sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a, train_b,", "batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype ) else:", "or not train_b: fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model,", "alternative definitions for custom observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model", "\"\"\" def __init__( self, sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale,", "= op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1,", "hess_b=True, dtype=dtype ) # Hessian of submodel which is to be trained. if", "norm_neg_log_likelihood self.loss = loss self.jac = jacobian_full.jac self.jac_train = jacobian_train self.hessians = hessians_full", "b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce(", "= jacobian_full.jac self.jac_train = jacobian_train self.hessians = hessians_full self.hessians_train = hessians_train self.fim =", "hess_a=train_a, hess_b=train_b, dtype=dtype ) else: batch_hessians = None # Define the IRLS components", "= logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM metrics", "None] = None, termination_type: str = \"global\", extended_summary=False, noise_model: str = None, dtype=\"float32\"", "case are not fitted. :param num_observations: int Number of observations. :param num_features: int", "hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b,", "self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim? # Summary statistics on", "generalized linear models (VGLMs). :param train_mu: bool Whether to train mean model. If", "= batch_model.X self.design_loc = batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data = batch_data self.mu =", "= loss self.jac = jacobian_full.jac self.jac_train = jacobian_train self.hessians = hessians_full self.hessians_train =", "that these are the Hessian matrix blocks # of the trained subset of", "str, dtype ): \"\"\" :param sample_indices: TODO :param fetch_fn: TODO :param batch_size: int", "form of constraints is used in vector generalized linear models (VGLMs). :param constraints_scale:", "indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data", "of mean model. :param init_b: nd.array (dispersion model size x features) Initialisation for", "the IRLS components on the batched model: # (note that these are the", "train_a or train_b: if not train_a or not train_b: jacobian_train = Jacobians( batched_data=batched_data,", "def map_model(idx, data) -> BasicModelGraph: X, design_loc, design_scale, size_factors = data model =", "with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale )", "init_a=None, init_b=None, train_loc: bool = True, train_scale: bool = True, provide_optimizers: Union[dict, None]", ":param num_features: int Number of features. :param num_design_loc_params: int Number of parameters per", "True, train_scale: bool = True, provide_optimizers: Union[dict, None] = None, termination_type: str =", "Number of parameters per feature in mean model. :param num_design_scale_params: int Number of", "vector generalized linear models (VGLMs). :param train_loc: bool Whether to train mean model.", "Jacobians, Hessians, FIM else: raise ValueError(\"noise model not recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__(", "self.fim = fim_full self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to evaluate", "model\") # ### alternative definitions for custom observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,),", "not train_b: hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True,", "constraints is used in vector generalized linear models (VGLMs). :param constraints_scale: tensor (all", ":param batch_size: int Size of mini-batches used. :param model_vars: ModelVars Variables of model.", "self, num_observations, fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size: int, model_vars, constraints_loc, constraints_scale, train_a, train_b,", "noise_model: str, dtype ): \"\"\" :param fetch_fn: TODO :param batch_size: int Size of", "fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph:", "def __init__( self, fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray,", "are zero. Features for which this is the case are not fitted. :param", "are the Hessian matrix blocks # of the trained subset of parameters). if", "norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood with tf.name_scope(\"loss\"): loss", "provide_optimizers: :param termination_type: :param extended_summary: :param dtype: Precision used in tensorflow. \"\"\" if", "are the IRLS matrix blocks # of the trained subset of parameters). if", "fitted. :param num_observations: int Number of observations. :param num_features: int Number of features.", "# initial graph elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype, init_a=init_a,", "used in vector generalized linear models (VGLMs). :param train_mu: bool Whether to train", "termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype ) # Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype", "self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if extended_summary: pass self.saver = tf.train.Saver() self.merged_summary =", "blocks # of the trained subset of parameters). if train_a or train_b: batch_jac", "data model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc,", "training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data =", "drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) # sort indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data =", "= batched_data self.mu = model.mu self.r = model.r self.sigma2 = model.sigma2 self.probs =", "= log_likelihood / tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood with tf.name_scope(\"loss\"): loss =", "mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: batch_jac = None # Define", "If False, the initialisation is kept. :param train_r: bool Whether to train dispersion", "sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype ) else: batch_fim", "to train dispersion model. If False, the initialisation is kept. :param dtype: Precision", "None, batch_size: int = None, init_a=None, init_b=None, train_loc: bool = True, train_scale: bool", "iterator=True, jac_a=True, jac_b=True, dtype=dtype ) # Jacobian of submodel which is to be", "fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b,", "from .external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not", "self.batched_data = batch_data self.mu = batch_model.mu self.r = batch_model.r self.sigma2 = batch_model.sigma2 self.probs", "= batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def map_model(idx, data) -> BasicModelGraph: X, design_loc,", "NewtonGraphGLM, TrainerGraphGLM from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import op_utils from", "tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood =", "components on the batched model: # (note that these are the IRLS matrix", "models (VGLMs). :param train_loc: bool Whether to train mean model. If False, the", "= Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype", "TODO :param feature_isnonzero: Whether all observations of a feature are zero. Features for", "tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if extended_summary: pass self.saver = tf.train.Saver() self.merged_summary", "trained. if train_a or train_b: if not train_a or not train_b: jacobian_train =", "subset of parameters). if train_a or train_b: batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc,", "training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data with tf.name_scope(\"batch\"):", "self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc, train_scale=train_scale, dtype=dtype ) # Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero,", "= FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model,", "batched model for newton-rhapson: # (note that these are the Hessian matrix blocks", "train_b: if not train_a or not train_b: hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc,", "noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: hessians_train = hessians_full else: hessians_train =", "model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\" ** Build", "hessians_train = None fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model,", "Union[dict, None] = None, termination_type: str = \"global\", extended_summary=False, noise_model: str = None,", "noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: batch_hessians = None # Define the", "init_b=None, train_loc: bool = True, train_scale: bool = True, provide_optimizers: Union[dict, None] =", "Jacobians, Hessians, FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model dataset", "constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param fetch_fn: TODO :param", "self.sample_indices = batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss", "num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) # initial graph elements with self.graph.as_default():", ":param train_mu: bool Whether to train mean model. If False, the initialisation is", "on the batched model for newton-rhapson: # (note that these are the Jacobian", "self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train = batch_jac self.hessians_train", "feature_isnonzero: Whether all observations of a feature are zero. Features for which this", "iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: batch_hessians = None # Define the IRLS", "num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) # initial graph", "that these are the IRLS matrix blocks # of the trained subset of", "all parameters of mean model. :param init_b: nd.array (dispersion model size x features)", "size x features) Initialisation for all parameters of dispersion model. :param constraints_loc: tensor", "batch_model.mu self.r = batch_model.r self.sigma2 = batch_model.sigma2 self.probs = batch_model.probs self.log_probs = batch_model.log_probs", "dtype=dtype ) else: jacobian_train = jacobian_full else: jacobian_train = None self.X = model.X", "x features) Initialisation for all parameters of dispersion model. :param constraints_loc: tensor (all", "batch_design_scale, batch_size_factors) = batch_data with tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc,", "# of the trained subset of parameters). if train_a or train_b: batch_hessians =", "dispersion model. If False, the initialisation is kept. :param dtype: Precision used in", "of mini-batches used. :param model_vars: ModelVars Variables of model. Contains tf.Variables which are", "fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM metrics on", "else: raise ValueError(\"noise model not recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features,", "np import xarray as xr from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external", "- 1), data=batched_data, map_fn=lambda idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood = log_likelihood", "\"\"\" mu: tf.Tensor sigma2: tf.Tensor def __init__( self, fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params,", "\"nb\": from .external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM else: raise ValueError(\"noise model", "str = \"global\", extended_summary=False, noise_model: str = None, dtype=\"float32\" ): \"\"\" :param fetch_fn:", "Whether to train dispersion model. If False, the initialisation is kept. :param dtype:", "Hessians, FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model dataset =", "is used in vector generalized linear models (VGLMs). :param train_loc: bool Whether to", "is to be trained. if train_a or train_b: if not train_a or not", "used in vector generalized linear models (VGLMs). :param train_loc: bool Whether to train", "hessians_train = hessians_full else: hessians_train = None fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc,", "num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def map_model(idx, data) -> BasicModelGraph: X, design_loc, design_scale, size_factors", "features) Initialisation for all parameters of mean model. :param init_b: nd.array (dispersion model", "for newton-rhapson: # (note that these are the Jacobian matrix blocks # of", "the initialisation is kept. :param dtype: Precision used in tensorflow. \"\"\" if noise_model", "batch_data self.mu = batch_model.mu self.r = batch_model.r self.sigma2 = batch_model.sigma2 self.probs = batch_model.probs", "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: batch_hessians = None", "self.jac_train = batch_jac self.hessians_train = batch_hessians self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\"", "FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype", "the trained subset of parameters). if train_a or train_b: batch_hessians = Hessians( batched_data=batch_data,", "= None self.X = model.X self.design_loc = model.design_loc self.design_scale = model.design_scale self.batched_data =", "Build full data model\") # ### alternative definitions for custom observations: sample_selection =", "= training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data with", "= hessians_full self.hessians_train = hessians_train self.fim = fim_full self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM):", "on feature-wise model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var',", "None, termination_type: str = \"global\", extended_summary=False, noise_model: str = None, dtype=\"float32\" ): \"\"\"", "Precision used in tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph,", "kept. :param train_scale: bool Whether to train dispersion model. If False, the initialisation", "batch_data with tf.name_scope(\"batch\"): batch_model = BasicModelGraph( X=batch_X, design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var,", "batched data model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "data set. \"\"\" def __init__( self, num_observations, fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size: int,", "int Number of parameters per feature in mean model. :param num_design_scale_params: int Number", ") else: batch_jac = None # Define the hessian on the batched model", "int, model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param fetch_fn:", "self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size,", "* buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers,", "batched_data.prefetch(1) def map_model(idx, data) -> BasicModelGraph: X, design_loc, design_scale, size_factors = data model", "set. \"\"\" def __init__( self, sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor], model_vars, constraints_loc,", "= tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size *", "is used in vector generalized linear models (VGLMs). :param train_mu: bool Whether to", "the hessian on the batched model for newton-rhapson: # (note that these are", "to train mean model. If False, the initialisation is kept. :param train_r: bool", "import logging import tensorflow as tf import numpy as np import xarray as", "= model.design_scale self.batched_data = batched_data self.mu = model.mu self.r = model.r self.sigma2 =", "Jacobian matrix blocks # of the trained subset of parameters). if train_a or", "logger.debug(\" ** Build batched data model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size,", "from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import op_utils from .external import", "initial graph elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype, init_a=init_a, init_b=init_b,", "= Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True,", "the mean model. This form of constraints is used in vector generalized linear", "hess_a=train_a, hess_b=train_b, dtype=dtype ) else: hessians_train = hessians_full else: hessians_train = None fim_full", "custom self.sample_indices = sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood", "the Hessian matrix blocks # of the trained subset of parameters). if train_a", "observations. :param num_features: int Number of features. :param num_design_loc_params: int Number of parameters", "Define output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss = self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood", "tf.Tensor def __init__( self, fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc:", "parameters). if train_a or train_b: batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def map_model(idx, data) -> BasicModelGraph:", "= fim_full self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to evaluate negative", "xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph = None, batch_size: int = None, init_a=None, init_b=None,", "blocks # of the trained subset of parameters). if train_a or train_b: batch_fim", "design_scale, size_factors = data model = BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var,", "= noise_model with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2", "# custom self.sample_indices = sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood =", "model not recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params,", "= True, provide_optimizers: Union[dict, None] = None, termination_type: str = \"global\", extended_summary=False, noise_model:", "update_a=train_a, update_b=train_b, dtype=dtype ) else: batch_fim = None self.X = batch_model.X self.design_loc =", "if noise_model == \"nb\": from .external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM else:", "= None, termination_type: str = \"global\", extended_summary=False, noise_model: str = None, dtype=\"float32\" ):", "parameters) Tensor that encodes how complete parameter set which includes dependent parameters arises", "this is the case are not fitted. :param num_observations: int Number of observations.", "# ### alternative definitions for custom observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\"", "of the trained subset of parameters). if train_a or train_b: batch_fim = FIM(", "information matrix of submodel which is to be trained. if train_a or train_b:", "not rewcognized\") self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") ))", "this relation for the dispersion model. This form of constraints is used in", "model: # (note that these are the IRLS matrix blocks # of the", "used. :param init_a: nd.array (mean model size x features) Initialisation for all parameters", "batched_data = batched_data.prefetch(1) def map_model(idx, data) -> BasicModelGraph: X, design_loc, design_scale, size_factors =", "None # Define the hessian on the batched model for newton-rhapson: # (note", "num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph = None, batch_size: int", "): \"\"\" :param fetch_fn: TODO :param feature_isnonzero: Whether all observations of a feature", "train mean model. If False, the initialisation is kept. :param train_scale: bool Whether", "num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) with", "shape=(None,), name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc,", "fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init(", "num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph = None, batch_size: int =", "as tf import numpy as np import xarray as xr from .external import", "num_design_loc_params: int Number of parameters per feature in mean model. :param num_design_scale_params: int", "GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import op_utils", "= log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss = loss self.jac =", "str, dtype ): \"\"\" :param fetch_fn: TODO :param batch_size: int Size of mini-batches", "that these are the Jacobian matrix blocks # of the trained subset of", "\"\"\" \"\"\" mu: tf.Tensor sigma2: tf.Tensor def __init__( self, fetch_fn, feature_isnonzero, num_observations, num_features,", "False, the initialisation is kept. :param provide_optimizers: :param termination_type: :param extended_summary: :param dtype:", "sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype ) #", "batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood", "buffer_size: int, model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param", "\"\"\" def __init__( self, num_observations, fetch_fn, batch_size: Union[int, tf.Tensor], buffer_size: int, model_vars, constraints_loc,", "\"\"\" if noise_model == \"nb\": from .external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM", "Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype", "data=batched_data, map_fn=lambda idx, data: map_model(idx, data).log_likelihood, parallel_iterations=1, ) norm_log_likelihood = log_likelihood / tf.cast(tf.size(sample_indices),", "for the mean model. This form of constraints is used in vector generalized", "reporting. hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True,", "self.jac_train = jacobian_train self.hessians = hessians_full self.hessians_train = hessians_train self.fim = fim_full self.fim_train", "constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged == False)[0] # ### performance related settings", "= batched_data.prefetch(1) def map_model(idx, data) -> BasicModelGraph: X, design_loc, design_scale, size_factors = data", "for fim? # Summary statistics on feature-wise model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1)", "train_scale: bool = True, provide_optimizers: Union[dict, None] = None, termination_type: str = \"global\",", "jacobian_train = jacobian_full else: jacobian_train = None self.X = model.X self.design_loc = model.design_loc", "model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype ) else: batch_fim = None self.X", "batch_size: Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\"", "training_data = training_data.map(tf.contrib.framework.sort) # sort indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size)", "or train_b: if not train_a or not train_b: hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices,", "BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model model", "from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not rewcognized\")", "train_a or train_b: if not train_a or not train_b: hessians_train = Hessians( batched_data=batched_data,", "train_a or not train_b: hessians_train = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE,", "not train_b: fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True,", "= fim_full else: fim_train = None with tf.name_scope(\"jacobians\"): # Jacobian of full model", "size_factors=batch_size_factors ) # Define the jacobian on the batched model for newton-rhapson: #", "self.loss = batch_model.loss self.jac_train = batch_jac self.hessians_train = batch_hessians self.fim_train = batch_fim class", "dispersion model. If False, the initialisation is kept. :param provide_optimizers: :param termination_type: :param", ":param provide_optimizers: :param termination_type: :param extended_summary: :param dtype: Precision used in tensorflow. \"\"\"", "buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type, provide_optimizers=provide_optimizers, train_loc=train_loc,", "in scale model. :param graph: tf.Graph :param batch_size: int Size of mini-batches used.", "that encodes how complete parameter set which includes dependent parameters arises from indepedent", "= jacobian_full else: jacobian_train = None self.X = model.X self.design_loc = model.design_loc self.design_scale", "# (note that these are the Hessian matrix blocks # of the trained", "False, the initialisation is kept. :param train_scale: bool Whether to train dispersion model.", "(VGLMs). :param constraints_scale: tensor (all parameters x dependent parameters) Tensor that encodes how", "tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda idx, data: map_model(idx,", "jac_a=train_a, jac_b=train_b, dtype=dtype ) else: batch_jac = None # Define the hessian on", ":param termination_type: :param extended_summary: :param dtype: Precision used in tensorflow. \"\"\" if noise_model", "mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: hessians_train = hessians_full else: hessians_train", "iterator=False, update_a=train_a, update_b=train_b, dtype=dtype ) else: batch_fim = None self.X = batch_model.X self.design_loc", "dtype=dtype ) # initial graph elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars(", "xarray as xr from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external import EstimatorGraphGLM,", "GLM metrics on full data set. \"\"\" def __init__( self, sample_indices: tf.Tensor, fetch_fn,", "else: fim_train = fim_full else: fim_train = None with tf.name_scope(\"jacobians\"): # Jacobian of", ") self.idx_nonconverged = np.where(self.model_vars.converged == False)[0] # ### performance related settings buffer_size =", "dtype=dtype ) else: fim_train = fim_full else: fim_train = None with tf.name_scope(\"jacobians\"): #", "jacobian_train self.hessians = hessians_full self.hessians_train = hessians_train self.fim = fim_full self.fim_train = fim_train", "the batched model for newton-rhapson: # (note that these are the Jacobian matrix", "class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor sigma2: tf.Tensor def __init__( self, fetch_fn, feature_isnonzero,", ".external_nb import BasicModelGraph, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model", "feature in scale model. :param graph: tf.Graph :param batch_size: int Size of mini-batches", "linear models (VGLMs). :param train_loc: bool Whether to train mean model. If False,", "batch_model.loss self.jac_train = batch_jac self.hessians_train = batch_hessians self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\"", "form of constraints is used in vector generalized linear models (VGLMs). :param train_loc:", "import BasicModelGraph, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model =", "sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=True, update_b=True, dtype=dtype ) # Fisher", "design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return model model = map_model(*fetch_fn(sample_indices))", "Number of features. :param num_design_loc_params: int Number of parameters per feature in mean", "### alternative definitions for custom observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" )", "parameters per feature in scale model. :param graph: tf.Graph :param batch_size: int Size", "= model.r self.sigma2 = model.sigma2 self.probs = model.probs self.log_probs = model.log_probs # custom", "x features) Initialisation for all parameters of mean model. :param init_b: nd.array (dispersion", "blocks # of the trained subset of parameters). if train_a or train_b: batch_hessians", "Computational graph to evaluate negative binomial GLM metrics on batched data set. \"\"\"", "of constraints is used in vector generalized linear models (VGLMs). :param train_mu: bool", ".external_nb import BasicModelGraph, ModelVars, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not recognized\")", "model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size, buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale,", ".external import pkg_constants logger = logging.getLogger(__name__) class FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to evaluate", "sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype )", "tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data", "dtype=dtype ) # Fisher information matrix of submodel which is to be trained.", "batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: batch_jac", "data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) # sort indices", "self.log_probs = model.log_probs # custom self.sample_indices = sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood =", "dispersion model. :param constraints_loc: tensor (all parameters x dependent parameters) Tensor that encodes", "else: jacobian_train = jacobian_full else: jacobian_train = None self.X = model.X self.design_loc =", "noise_model == \"nb\": from .external_nb import BasicModelGraph, Jacobians, Hessians, FIM else: raise ValueError(\"noise", "not train_a or not train_b: jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "batch_model.norm_neg_log_likelihood self.loss = batch_model.loss self.jac_train = batch_jac self.hessians_train = batch_hessians self.fim_train = batch_fim", "Hessians, FIM else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"):", "fetch_fn: TODO :param feature_isnonzero: Whether all observations of a feature are zero. Features", "for which this is the case are not fitted. :param num_observations: int Number", "= dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def map_model(idx, data) ->", "= None fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True,", "model. If False, the initialisation is kept. :param dtype: Precision used in tensorflow.", "ModelVars, Jacobians, Hessians, FIM else: raise ValueError(\"noise model not recognized\") self.noise_model = noise_model", "self.hessians = self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim? # Summary", "# Jacobian of full model for reporting. jacobian_full = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None,", "for the dispersion model. This form of constraints is used in vector generalized", "data model\") # ### alternative definitions for custom observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations),", "which this is the case are not fitted. :param num_observations: int Number of", "num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype ) # initial", "batched_data = dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1) def map_model(idx, data)", "sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str,", "of submodel which is to be trained. if train_a or train_b: if not", "batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=True, jac_b=True, dtype=dtype )", ") else: jacobian_train = jacobian_full else: jacobian_train = None self.X = model.X self.design_loc", "be trained. if train_a or train_b: if not train_a or not train_b: hessians_train", "hess_a=True, hess_b=True, dtype=dtype ) # Hessian of submodel which is to be trained.", "per feature in scale model. :param graph: tf.Graph :param batch_size: int Size of", "design_loc=batch_design_loc, design_scale=batch_design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=batch_size_factors ) # Define the jacobian", "init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged == False)[0] # ### performance related", "not rewcognized\") self.noise_model = noise_model dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data =", "with tf.name_scope(\"hessians\"): # Hessian of full model for reporting. hessians_full = Hessians( batched_data=batched_data,", "Define the jacobian on the batched model for newton-rhapson: # (note that these", "Whether to train mean model. If False, the initialisation is kept. :param train_scale:", "if train_a or train_b: if not train_a or not train_b: hessians_train = Hessians(", "parameters arises from indepedent parameters: all = <constraints, indep>. This tensor describes this", ") # Hessian of submodel which is to be trained. if train_a or", "update_a=train_a, update_b=train_b, dtype=dtype ) else: fim_train = fim_full else: fim_train = None with", "# Define the jacobian on the batched model for newton-rhapson: # (note that", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: batch_jac =", "ModelVars Variables of model. Contains tf.Variables which are optimized. :param constraints_loc: tensor (all", "x dependent parameters) Tensor that encodes how complete parameter set which includes dependent", "batched model: # (note that these are the IRLS matrix blocks # of", "model size x features) Initialisation for all parameters of mean model. :param init_b:", "Summary statistics on feature-wise model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var',", "self.loss = self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians = self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) #", "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: batch_jac = None", "binomial GLM metrics on batched data set. \"\"\" def __init__( self, num_observations, fetch_fn,", "the Jacobian matrix blocks # of the trained subset of parameters). if train_a", "self, sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor], model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model:", "tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched data model\") self.batched_data_model = BatchedDataModelGraph( num_observations=self.num_observations, fetch_fn=fetch_fn, batch_size=batch_size,", "This tensor describes this relation for the dispersion model. This form of constraints", "sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model = FullDataModelGraph( sample_indices=sample_selection, fetch_fn=fetch_fn, batch_size=batch_size", "= None with tf.name_scope(\"jacobians\"): # Jacobian of full model for reporting. jacobian_full =", "bool = True, train_scale: bool = True, provide_optimizers: Union[dict, None] = None, termination_type:", "train_a or not train_b: jacobian_train = Jacobians( batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", "train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\" ** Build full data model\")", "tf import numpy as np import xarray as xr from .external import GradientGraphGLM,", "from indepedent parameters: all = <constraints, indep>. This tensor describes this relation for", "constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype ): \"\"\" :param sample_indices: TODO :param", "for all parameters of mean model. :param init_b: nd.array (dispersion model size x", "tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var) tf.summary.histogram('b_var', self.model_vars.b_var) tf.summary.scalar('loss', self.batched_data_model.loss) tf.summary.scalar('learning_rate', self.learning_rate) if", "features) Initialisation for all parameters of dispersion model. :param constraints_loc: tensor (all parameters", "batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: jacobian_train", "train_b: fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a,", "iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: jacobian_train = jacobian_full else: jacobian_train = None", "buffer_size=buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\" **", "statistics on feature-wise model gradients: self.gradients = tf.reduce_sum(tf.transpose(self.gradients_full), axis=1) with tf.name_scope('summaries'): tf.summary.histogram('a_var', self.model_vars.a_var)", "iterator=True, update_a=True, update_b=True, dtype=dtype ) # Fisher information matrix of submodel which is", "Whether to train dispersion model. If False, the initialisation is kept. :param provide_optimizers:", "output metrics: self._set_out_var( feature_isnonzero=feature_isnonzero, dtype=dtype ) self.loss = self.full_data_model.loss self.log_likelihood = self.full_data_model.log_likelihood self.hessians", "noise_model: str, dtype ): \"\"\" :param sample_indices: TODO :param fetch_fn: TODO :param batch_size:", "size_factors=size_factors) return model model = map_model(*fetch_fn(sample_indices)) with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices)", ")) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort)", "import tensorflow as tf import numpy as np import xarray as xr from", "(note that these are the Jacobian matrix blocks # of the trained subset", "dtype: Precision used in tensorflow. \"\"\" if noise_model == \"nb\": from .external_nb import", "noise_model: str = None, dtype=\"float32\" ): \"\"\" :param fetch_fn: TODO :param feature_isnonzero: Whether", "= Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True, dtype=dtype", "= model.X self.design_loc = model.design_loc self.design_scale = model.design_scale self.batched_data = batched_data self.mu =", "training_data = training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next() (batch_X, batch_design_loc, batch_design_scale,", "iterator = training_data.make_one_shot_iterator() batch_sample_index, batch_data = iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data", "# Define the hessian on the batched model for newton-rhapson: # (note that", "batch_hessians = Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b,", "mean model. If False, the initialisation is kept. :param train_scale: bool Whether to", "logging import tensorflow as tf import numpy as np import xarray as xr", "with tf.name_scope(\"log_likelihood\"): log_likelihood = op_utils.map_reduce( last_elem=tf.gather(sample_indices, tf.size(sample_indices) - 1), data=batched_data, map_fn=lambda idx, data:", "tf.cast(tf.size(sample_indices), dtype=log_likelihood.dtype) norm_neg_log_likelihood = - norm_log_likelihood with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"):", "sample_indices self.log_likelihood = log_likelihood self.norm_log_likelihood = norm_log_likelihood self.norm_neg_log_likelihood = norm_neg_log_likelihood self.loss = loss", "model = BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale, constraints_loc=constraints_loc, constraints_scale=constraints_scale, a_var=model_vars.a_var, b_var=model_vars.b_var, dtype=dtype, size_factors=size_factors) return", "int Size of mini-batches used. :param init_a: nd.array (mean model size x features)", "complete parameter set which includes dependent parameters arises from indepedent parameters: all =", "Features for which this is the case are not fitted. :param num_observations: int", "train_a, train_b, noise_model: str, dtype ): \"\"\" :param fetch_fn: TODO :param batch_size: int", "batch_size=batch_size * buffer_size, model_vars=self.model_vars, constraints_loc=constraints_loc, constraints_scale=constraints_scale, train_a=train_loc, train_b=train_scale, noise_model=noise_model, dtype=dtype ) self._run_trainer_init( termination_type=termination_type,", "self.hessians_train = batch_hessians self.fim_train = batch_fim class EstimatorGraphAll(EstimatorGraphGLM): \"\"\" \"\"\" mu: tf.Tensor sigma2:", "mean model. If False, the initialisation is kept. :param train_r: bool Whether to", "model. :param init_b: nd.array (dispersion model size x features) Initialisation for all parameters", "form of constraints is used in vector generalized linear models (VGLMs). :param train_mu:", "includes dependent parameters arises from indepedent parameters: all = <constraints, indep>. This tensor", "raise ValueError(\"noise model not recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params,", "constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph = None, batch_size: int = None, init_a=None,", "train_b=train_scale, noise_model=noise_model, dtype=dtype ) with tf.name_scope(\"full_data\"): logger.debug(\" ** Build full data model\") #", "if train_a or train_b: if not train_a or not train_b: fim_train = FIM(", "train dispersion model. If False, the initialisation is kept. :param dtype: Precision used", "train_b, noise_model: str, dtype ): \"\"\" :param fetch_fn: TODO :param batch_size: int Size", "train_loc: bool Whether to train mean model. If False, the initialisation is kept.", "jac_a=True, jac_b=True, dtype=dtype ) # Jacobian of submodel which is to be trained.", "jacobian_full else: jacobian_train = None self.X = model.X self.design_loc = model.design_loc self.design_scale =", "from .external import GradientGraphGLM, NewtonGraphGLM, TrainerGraphGLM from .external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from", "jac_b=True, dtype=dtype ) # Jacobian of submodel which is to be trained. if", "model for newton-rhapson: # (note that these are the Hessian matrix blocks #", "model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: batch_hessians = None #", "of dispersion model. :param constraints_loc: tensor (all parameters x dependent parameters) Tensor that", "fim_full else: fim_train = None with tf.name_scope(\"jacobians\"): # Jacobian of full model for", "norm_neg_log_likelihood = - norm_log_likelihood with tf.name_scope(\"loss\"): loss = tf.reduce_sum(norm_neg_log_likelihood) with tf.name_scope(\"hessians\"): # Hessian", "-> BasicModelGraph: X, design_loc, design_scale, size_factors = data model = BasicModelGraph( X=X, design_loc=design_loc,", "the trained subset of parameters). if train_a or train_b: batch_fim = FIM( batched_data=batch_data,", "indep>. This tensor describes this relation for the dispersion model. This form of", "is kept. :param dtype: Precision used in tensorflow. \"\"\" if noise_model == \"nb\":", "hessians_full = Hessians( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=True, hess_b=True,", "kept. :param dtype: Precision used in tensorflow. \"\"\" if noise_model == \"nb\": from", "else: hessians_train = None fim_full = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE,", "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: jacobian_train = jacobian_full", "model.sigma2 self.probs = model.probs self.log_probs = model.log_probs # custom self.sample_indices = sample_indices self.log_likelihood", "fim_full self.fim_train = fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial", "= Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=False, jac_a=train_a, jac_b=train_b,", "or train_b: batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model,", "(note that these are the IRLS matrix blocks # of the trained subset", "size x features) Initialisation for all parameters of mean model. :param init_b: nd.array", "tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale ) self.idx_nonconverged = np.where(self.model_vars.converged", "batch_size: Union[int, tf.Tensor], buffer_size: int, model_vars, constraints_loc, constraints_scale, train_a, train_b, noise_model: str, dtype", "Initialisation for all parameters of dispersion model. :param constraints_loc: tensor (all parameters x", "self, fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray,", "of parameters). if train_a or train_b: batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc,", "evaluate negative binomial GLM metrics on full data set. \"\"\" def __init__( self,", "self.design_scale = model.design_scale self.batched_data = batched_data self.mu = model.mu self.r = model.r self.sigma2", "BasicModelGraph: X, design_loc, design_scale, size_factors = data model = BasicModelGraph( X=X, design_loc=design_loc, design_scale=design_scale,", "EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import op_utils from .external import pkg_constants logger =", "map_model(idx, data) -> BasicModelGraph: X, design_loc, design_scale, size_factors = data model = BasicModelGraph(", "Hessians( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, hess_a=train_a, hess_b=train_b, dtype=dtype )", "dtype ): \"\"\" :param fetch_fn: TODO :param batch_size: int Size of mini-batches used.", "noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype ) else: jacobian_train = jacobian_full else: jacobian_train =", "train mean model. If False, the initialisation is kept. :param train_r: bool Whether", "data set. \"\"\" def __init__( self, sample_indices: tf.Tensor, fetch_fn, batch_size: Union[int, tf.Tensor], model_vars,", "self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim? # Summary statistics on feature-wise", "self.r = model.r self.sigma2 = model.sigma2 self.probs = model.probs self.log_probs = model.log_probs #", "constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype ) else: batch_fim =", "batched_data=batched_data, sample_indices=sample_indices, batch_model=None, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.JACOBIAN_MODE, noise_model=noise_model, iterator=True, jac_a=train_a, jac_b=train_b, dtype=dtype )", "batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood = batch_model.norm_log_likelihood self.norm_neg_log_likelihood = batch_model.norm_neg_log_likelihood self.loss = batch_model.loss", ":param extended_summary: :param dtype: Precision used in tensorflow. \"\"\" if noise_model == \"nb\":", "np.where(self.model_vars.converged == False)[0] # ### performance related settings buffer_size = 4 with tf.name_scope(\"batched_data\"):", "EstimatorGraphGLM.__init__( self=self, num_observations=num_observations, num_features=num_features, num_design_loc_params=num_design_loc_params, num_design_scale_params=num_design_scale_params, num_loc_params=num_loc_params, num_scale_params=num_scale_params, graph=graph, batch_size=batch_size, constraints_loc=constraints_loc, constraints_scale=constraints_scale, dtype=dtype", "feature in mean model. :param num_design_scale_params: int Number of parameters per feature in", "= tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data = training_data.batch(batch_size,", "indep>. This tensor describes this relation for the mean model. This form of", "model.design_scale self.batched_data = batched_data self.mu = model.mu self.r = model.r self.sigma2 = model.sigma2", "else: raise ValueError(\"noise model not rewcognized\") self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"): data_indices =", "FullDataModelGraph(FullDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM metrics on full data", "this relation for the mean model. This form of constraints is used in", "graph elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc,", "tf.range(num_observations, name=\"sample_index\") )) training_data = data_indices.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=2 * batch_size)) training_data = training_data.batch(batch_size, drop_remainder=True) training_data", "buffer_size = 4 with tf.name_scope(\"batched_data\"): logger.debug(\" ** Build batched data model\") self.batched_data_model =", "self.sigma2 = batch_model.sigma2 self.probs = batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood", "model not rewcognized\") self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\")", ".external import EstimatorGraphGLM, FullDataModelGraphGLM, BatchedDataModelGraphGLM from .external import op_utils from .external import pkg_constants", "mini-batches used. :param model_vars: ModelVars Variables of model. Contains tf.Variables which are optimized.", "dependent parameters arises from indepedent parameters: all = <constraints, indep>. This tensor describes", "= None, batch_size: int = None, init_a=None, init_b=None, train_loc: bool = True, train_scale:", "if train_a or train_b: batch_jac = Jacobians( batched_data=batch_data, sample_indices=batch_sample_index, batch_model=batch_model, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars,", "None self.X = batch_model.X self.design_loc = batch_model.design_loc self.design_scale = batch_model.design_scale self.batched_data = batch_data", "sigma2: tf.Tensor def __init__( self, fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params,", "FIM else: raise ValueError(\"noise model not recognized\") self.noise_model = noise_model EstimatorGraphGLM.__init__( self=self, num_observations=num_observations,", "of parameters). if train_a or train_b: batch_fim = FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale,", "if train_a or train_b: if not train_a or not train_b: jacobian_train = Jacobians(", "= batch_model.probs self.log_probs = batch_model.log_probs self.sample_indices = batch_sample_index self.log_likelihood = batch_model.log_likelihood self.norm_log_likelihood =", "TODO switch for fim? # Summary statistics on feature-wise model gradients: self.gradients =", "# sort indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) training_data = training_data.prefetch(buffer_size) iterator = training_data.make_one_shot_iterator()", "zero. Features for which this is the case are not fitted. :param num_observations:", "FIM( batched_data=batch_data, sample_indices=batch_sample_index, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype )", "elements with self.graph.as_default(): with tf.name_scope(\"model_vars\"): self.model_vars = ModelVars( dtype=dtype, init_a=init_a, init_b=init_b, constraints_loc=constraints_loc, constraints_scale=constraints_scale", "self.design_loc = model.design_loc self.design_scale = model.design_scale self.batched_data = batched_data self.mu = model.mu self.r", "train_b: if not train_a or not train_b: fim_train = FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc,", "dataset = tf.data.Dataset.from_tensor_slices(sample_indices) batched_data = dataset.batch(batch_size) batched_data = batched_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS) batched_data = batched_data.prefetch(1)", "else: fim_train = None with tf.name_scope(\"jacobians\"): # Jacobian of full model for reporting.", "num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale: xr.DataArray, graph: tf.Graph = None, batch_size:", "definitions for custom observations: sample_selection = tf.placeholder_with_default( tf.range(num_observations), shape=(None,), name=\"sample_selection\" ) self.full_data_model =", "used in vector generalized linear models (VGLMs). :param constraints_scale: tensor (all parameters x", ":param fetch_fn: TODO :param batch_size: int Size of mini-batches used. :param model_vars: ModelVars", "constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, hess_a=train_a, hess_b=train_b, dtype=dtype ) else: hessians_train = hessians_full", "jacobian_full.jac self.jac_train = jacobian_train self.hessians = hessians_full self.hessians_train = hessians_train self.fim = fim_full", "= training_data.batch(batch_size, drop_remainder=True) training_data = training_data.map(tf.contrib.framework.sort) # sort indices training_data = training_data.map(fetch_fn, num_parallel_calls=pkg_constants.TF_NUM_THREADS)", "= self.full_data_model.hessians.hessian self.fisher_inv = op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim? # Summary statistics", "batch_data = iterator.get_next() (batch_X, batch_design_loc, batch_design_scale, batch_size_factors) = batch_data with tf.name_scope(\"batch\"): batch_model =", "Size of mini-batches used. :param model_vars: ModelVars Variables of model. Contains tf.Variables which", "self.noise_model = noise_model with tf.name_scope(\"input_pipeline\"): data_indices = tf.data.Dataset.from_tensor_slices(( tf.range(num_observations, name=\"sample_index\") )) training_data =", "__init__( self, fetch_fn, feature_isnonzero, num_observations, num_features, num_design_loc_params, num_design_scale_params, num_loc_params, num_scale_params, constraints_loc: xr.DataArray, constraints_scale:", "= np.where(self.model_vars.converged == False)[0] # ### performance related settings buffer_size = 4 with", "Whether all observations of a feature are zero. Features for which this is", "mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=False, update_a=train_a, update_b=train_b, dtype=dtype ) else: batch_fim = None self.X =", "termination_type: str = \"global\", extended_summary=False, noise_model: str = None, dtype=\"float32\" ): \"\"\" :param", "FIM( batched_data=batched_data, sample_indices=sample_indices, constraints_loc=constraints_loc, constraints_scale=constraints_scale, model_vars=model_vars, mode=pkg_constants.HESSIAN_MODE, noise_model=noise_model, iterator=True, update_a=train_a, update_b=train_b, dtype=dtype )", "op_utils.pinv(self.full_data_model.hessians.neg_hessian) # TODO switch for fim? # Summary statistics on feature-wise model gradients:", "= fim_train class BatchedDataModelGraph(BatchedDataModelGraphGLM): \"\"\" Computational graph to evaluate negative binomial GLM metrics" ]
[ ":return: base**exponent % modulus \"\"\" if USE_GMPY2: return gmpy2.powmod(base, exponent, modulus) # else", "prime number in the range [low, high). Returns GMPY2 MPZ integer if available.", "a*x + b*y = gcd. :param num_a: First number a. :param num_b: Second", "high: int) -> int: \"\"\" Generate a random prime number in the range", "(exclusive) of the range. :return: Random prime number. :raise ValueError: the lower bound", "x_old, x_cur, y_old, y_cur = 0, 1, 1, 0 while num_a != 0:", "raise ZeroDivisionError(f\"Inverse of {value} mod {modulus} does not exist.\") return inverse def extended_euclidean(num_a:", ":param low: Lower bound (inclusive) of the range. :param high: Upper bound (exclusive)", "\"\"\" Compute the inverse of a number, given the modulus of the group.", ":param num_a: First number a. :param num_b: Second number b. :return: Tuple containing", "is prime or not \"\"\" if USE_GMPY2: return gmpy2.mpz(number).is_prime() # else return sympy.isprime(number)", "of a under the modulus. \"\"\" value %= modulus if USE_GMPY2: return gmpy2.invert(value,", "encryption schemes. \"\"\" from math import gcd from typing import Tuple import sympy", "base: base :param exponent: exponent :param modulus: modulus :return: base**exponent % modulus \"\"\"", "int, modulus: int) -> int: \"\"\" Compute base**exponent % modulus. Uses GMPY2 if", "to the upper bound\" ) if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return", "The group modulus. :raise ZeroDivisionError: Raised when the inverse of the value does", "random prime number in the range [low, high). Returns GMPY2 MPZ integer if", "int) -> bool: \"\"\" Check if the input number is a prime number.", "num_b, x_old, y_old def lcm(num_a: int, num_b: int) -> int: \"\"\" Compute the", "GMPY2 if available. :param value: The number to be inverted. :param modulus: The", "num_b: Second number b. :return: Tuple containing gcd, x, and y, such that", "int: \"\"\" Compute the inverse of a number, given the modulus of the", "num_a, num_a, num_b % num_a y_old, y_cur = y_cur, y_old - quotient *", "Perform the extended euclidean algorithm on the input numbers. The method returns gcd,", ":raise ValueError: the lower bound should be strictly lower than the upper bound", "0, 1, 1, 0 while num_a != 0: quotient, num_b, num_a = num_b", "is a prime number. Uses GMPY2 if available :param number: The number to", "the upper bound \"\"\" if low >= high: raise ValueError( \"the lower bound", "or equal to the upper bound\" ) if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) #", "import gmpy2 def randprime(low: int, high: int) -> int: \"\"\" Generate a random", "base :param exponent: exponent :param modulus: modulus :return: base**exponent % modulus \"\"\" if", "quotient * y_cur x_old, x_cur = x_cur, x_old - quotient * x_cur return", "number. Uses GMPY2 if available :param number: The number to check :return: Whether", "under the modulus. \"\"\" value %= modulus if USE_GMPY2: return gmpy2.invert(value, modulus) #", "method returns gcd, x, y, such that a*x + b*y = gcd. :param", "int) -> int: \"\"\" Compute the least common multiple of two input numbers.", "bound should be smaller or equal to the upper bound\" ) if USE_GMPY2:", "common multiple of a and b. \"\"\" if USE_GMPY2: return gmpy2.lcm(num_a, num_b) #", "input numbers. The method returns gcd, x, y, such that a*x + b*y", "= y_cur, y_old - quotient * y_cur x_old, x_cur = x_cur, x_old -", "* num_b // gcd(num_a, num_b) def is_prime(number: int) -> bool: \"\"\" Check if", "available. :param num_a: First number a. :param num_b: Second number b. :return: Least", "pow_mod(base: int, exponent: int, modulus: int) -> int: \"\"\" Compute base**exponent % modulus.", "the least common multiple of two input numbers. Uses GMPY2 if available. :param", "Random prime number. :raise ValueError: the lower bound should be strictly lower than", "\"\"\" Compute base**exponent % modulus. Uses GMPY2 if available. :param base: base :param", "int, num_b: int) -> Tuple[int, int, int]: \"\"\" Perform the extended euclidean algorithm", "least common multiple of two input numbers. Uses GMPY2 if available. :param num_a:", "\"\"\" if USE_GMPY2: return gmpy2.lcm(num_a, num_b) # else return num_a * num_b //", ":param number: The number to check :return: Whether the input is prime or", "Compute the inverse of a number, given the modulus of the group. Note", "ZeroDivisionError: Raised when the inverse of the value does not exist. :return: The", "exist. :return: The inverse of a under the modulus. \"\"\" value %= modulus", "int: \"\"\" Compute the least common multiple of two input numbers. Uses GMPY2", "when the inverse of the value does not exist. :return: The inverse of", "b. :return: Least common multiple of a and b. \"\"\" if USE_GMPY2: return", "y_cur x_old, x_cur = x_cur, x_old - quotient * x_cur return num_b, x_old,", "extended_euclidean(num_a: int, num_b: int) -> Tuple[int, int, int]: \"\"\" Perform the extended euclidean", "range. :param high: Upper bound (exclusive) of the range. :return: Random prime number.", "if low >= high: raise ValueError( \"the lower bound should be smaller or", "._check_gmpy2 import USE_GMPY2 if USE_GMPY2: import gmpy2 def randprime(low: int, high: int) ->", "should be strictly lower than the upper bound \"\"\" if low >= high:", "modulus: int) -> int: \"\"\" Compute base**exponent % modulus. Uses GMPY2 if available.", "y_old, y_cur = y_cur, y_old - quotient * y_cur x_old, x_cur = x_cur,", "1: raise ZeroDivisionError(f\"Inverse of {value} mod {modulus} does not exist.\") return inverse def", "modulus. Uses GMPY2 if available. :param base: base :param exponent: exponent :param modulus:", "if available. :param value: The number to be inverted. :param modulus: The group", "return num_a * num_b // gcd(num_a, num_b) def is_prime(number: int) -> bool: \"\"\"", "ValueError( \"the lower bound should be smaller or equal to the upper bound\"", ":raise ZeroDivisionError: Raised when the inverse of the value does not exist. :return:", "creating encryption schemes. \"\"\" from math import gcd from typing import Tuple import", "bound (exclusive) of the range. :return: Random prime number. :raise ValueError: the lower", "upper bound\" ) if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return sympy.ntheory.generate.randprime(low, high)", ":param modulus: The group modulus. :raise ZeroDivisionError: Raised when the inverse of the", "Generate a random prime number in the range [low, high). Returns GMPY2 MPZ", ":return: Tuple containing gcd, x, and y, such that a*x + b*y =", "def extended_euclidean(num_a: int, num_b: int) -> Tuple[int, int, int]: \"\"\" Perform the extended", "import USE_GMPY2 if USE_GMPY2: import gmpy2 def randprime(low: int, high: int) -> int:", "y_old def lcm(num_a: int, num_b: int) -> int: \"\"\" Compute the least common", "GMPY2 if available. :param num_a: First number a. :param num_b: Second number b.", "# else gcd_, inverse, _ = extended_euclidean(value, modulus) if gcd_ != 1: raise", "y_cur, y_old - quotient * y_cur x_old, x_cur = x_cur, x_old - quotient", "on the input numbers. The method returns gcd, x, y, such that a*x", "gcd. :param num_a: First number a. :param num_b: Second number b. :return: Tuple", "import Tuple import sympy from ._check_gmpy2 import USE_GMPY2 if USE_GMPY2: import gmpy2 def", "Whether the input is prime or not \"\"\" if USE_GMPY2: return gmpy2.mpz(number).is_prime() #", "\"\"\" Generate a random prime number in the range [low, high). Returns GMPY2", "range [low, high). Returns GMPY2 MPZ integer if available. :param low: Lower bound", "gmpy2.invert(value, modulus) # else gcd_, inverse, _ = extended_euclidean(value, modulus) if gcd_ !=", "not exist. :return: The inverse of a under the modulus. \"\"\" value %=", "a*x + b*y = gcd x_old, x_cur, y_old, y_cur = 0, 1, 1,", "inverse of the value does not exist. :return: The inverse of a under", "Lower bound (inclusive) of the range. :param high: Upper bound (exclusive) of the", "= x_cur, x_old - quotient * x_cur return num_b, x_old, y_old def lcm(num_a:", "exponent: int, modulus: int) -> int: \"\"\" Compute base**exponent % modulus. Uses GMPY2", "the value does not exist. :return: The inverse of a under the modulus.", "exponent: exponent :param modulus: modulus :return: base**exponent % modulus \"\"\" if USE_GMPY2: return", "of {value} mod {modulus} does not exist.\") return inverse def extended_euclidean(num_a: int, num_b:", "be smaller or equal to the upper bound\" ) if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low,", "of the value does not exist. :return: The inverse of a under the", "number a. :param num_b: Second number b. :return: Least common multiple of a", "the range [low, high). Returns GMPY2 MPZ integer if available. :param low: Lower", "% modulus. Uses GMPY2 if available. :param base: base :param exponent: exponent :param", "to check :return: Whether the input is prime or not \"\"\" if USE_GMPY2:", ":param modulus: modulus :return: base**exponent % modulus \"\"\" if USE_GMPY2: return gmpy2.powmod(base, exponent,", "prime number. Uses GMPY2 if available :param number: The number to check :return:", "Useful functions for creating encryption schemes. \"\"\" from math import gcd from typing", "a prime number. Uses GMPY2 if available :param number: The number to check", "two input numbers. Uses GMPY2 if available. :param num_a: First number a. :param", "int, modulus: int) -> int: \"\"\" Compute the inverse of a number, given", "b*y = gcd x_old, x_cur, y_old, y_cur = 0, 1, 1, 0 while", "a number, given the modulus of the group. Note that the inverse might", "modulus) # else return pow(base, exponent, modulus) def mod_inv(value: int, modulus: int) ->", "strictly lower than the upper bound \"\"\" if low >= high: raise ValueError(", "raise ValueError( \"the lower bound should be smaller or equal to the upper", "% modulus \"\"\" if USE_GMPY2: return gmpy2.powmod(base, exponent, modulus) # else return pow(base,", "num_b // gcd(num_a, num_b) def is_prime(number: int) -> bool: \"\"\" Check if the", "\"\"\" # a*x + b*y = gcd x_old, x_cur, y_old, y_cur = 0,", "exponent :param modulus: modulus :return: base**exponent % modulus \"\"\" if USE_GMPY2: return gmpy2.powmod(base,", "# a*x + b*y = gcd x_old, x_cur, y_old, y_cur = 0, 1,", "of two input numbers. Uses GMPY2 if available. :param num_a: First number a.", "Compute base**exponent % modulus. Uses GMPY2 if available. :param base: base :param exponent:", "the inverse of the value does not exist. :return: The inverse of a", "common multiple of two input numbers. Uses GMPY2 if available. :param num_a: First", "gcd. \"\"\" # a*x + b*y = gcd x_old, x_cur, y_old, y_cur =", "be strictly lower than the upper bound \"\"\" if low >= high: raise", "value: The number to be inverted. :param modulus: The group modulus. :raise ZeroDivisionError:", "gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int, exponent: int, modulus:", "extended_euclidean(value, modulus) if gcd_ != 1: raise ZeroDivisionError(f\"Inverse of {value} mod {modulus} does", "-> int: \"\"\" Compute base**exponent % modulus. Uses GMPY2 if available. :param base:", "to be inverted. :param modulus: The group modulus. :raise ZeroDivisionError: Raised when the", "mod_inv(value: int, modulus: int) -> int: \"\"\" Compute the inverse of a number,", "containing gcd, x, and y, such that a*x + b*y = gcd. \"\"\"", "number b. :return: Tuple containing gcd, x, and y, such that a*x +", "of a number, given the modulus of the group. Note that the inverse", "[low, high). Returns GMPY2 MPZ integer if available. :param low: Lower bound (inclusive)", "of the group. Note that the inverse might not exist. Uses GMPY2 if", "the modulus of the group. Note that the inverse might not exist. Uses", "num_a: First number a. :param num_b: Second number b. :return: Tuple containing gcd,", "\"\"\" Useful functions for creating encryption schemes. \"\"\" from math import gcd from", "b. :return: Tuple containing gcd, x, and y, such that a*x + b*y", "exist. Uses GMPY2 if available. :param value: The number to be inverted. :param", "num_a = num_b // num_a, num_a, num_b % num_a y_old, y_cur = y_cur,", "inverted. :param modulus: The group modulus. :raise ZeroDivisionError: Raised when the inverse of", "return pow(base, exponent, modulus) def mod_inv(value: int, modulus: int) -> int: \"\"\" Compute", "the inverse might not exist. Uses GMPY2 if available. :param value: The number", "if the input number is a prime number. Uses GMPY2 if available :param", "ZeroDivisionError(f\"Inverse of {value} mod {modulus} does not exist.\") return inverse def extended_euclidean(num_a: int,", "num_b: Second number b. :return: Least common multiple of a and b. \"\"\"", "Uses GMPY2 if available. :param value: The number to be inverted. :param modulus:", "modulus) def mod_inv(value: int, modulus: int) -> int: \"\"\" Compute the inverse of", "{modulus} does not exist.\") return inverse def extended_euclidean(num_a: int, num_b: int) -> Tuple[int,", "int, int]: \"\"\" Perform the extended euclidean algorithm on the input numbers. The", "int) -> Tuple[int, int, int]: \"\"\" Perform the extended euclidean algorithm on the", "!= 0: quotient, num_b, num_a = num_b // num_a, num_a, num_b % num_a", "Second number b. :return: Tuple containing gcd, x, and y, such that a*x", "USE_GMPY2 if USE_GMPY2: import gmpy2 def randprime(low: int, high: int) -> int: \"\"\"", "def mod_inv(value: int, modulus: int) -> int: \"\"\" Compute the inverse of a", "inverse of a under the modulus. \"\"\" value %= modulus if USE_GMPY2: return", "(inclusive) of the range. :param high: Upper bound (exclusive) of the range. :return:", "return gmpy2.powmod(base, exponent, modulus) # else return pow(base, exponent, modulus) def mod_inv(value: int,", "bound should be strictly lower than the upper bound \"\"\" if low >=", "modulus if USE_GMPY2: return gmpy2.invert(value, modulus) # else gcd_, inverse, _ = extended_euclidean(value,", "of the range. :return: Random prime number. :raise ValueError: the lower bound should", "y_old, y_cur = 0, 1, 1, 0 while num_a != 0: quotient, num_b,", ">= high: raise ValueError( \"the lower bound should be smaller or equal to", "a. :param num_b: Second number b. :return: Least common multiple of a and", "sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int, exponent: int, modulus: int) -> int: \"\"\" Compute", "def pow_mod(base: int, exponent: int, modulus: int) -> int: \"\"\" Compute base**exponent %", "base**exponent % modulus. Uses GMPY2 if available. :param base: base :param exponent: exponent", "1, 1, 0 while num_a != 0: quotient, num_b, num_a = num_b //", "Raised when the inverse of the value does not exist. :return: The inverse", "+ b*y = gcd x_old, x_cur, y_old, y_cur = 0, 1, 1, 0", "a. :param num_b: Second number b. :return: Tuple containing gcd, x, and y,", "numbers. Uses GMPY2 if available. :param num_a: First number a. :param num_b: Second", "equal to the upper bound\" ) if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else", "returns gcd, x, y, such that a*x + b*y = gcd. :param num_a:", "num_b // num_a, num_a, num_b % num_a y_old, y_cur = y_cur, y_old -", "-> int: \"\"\" Compute the inverse of a number, given the modulus of", "\"\"\" value %= modulus if USE_GMPY2: return gmpy2.invert(value, modulus) # else gcd_, inverse,", "y_cur = 0, 1, 1, 0 while num_a != 0: quotient, num_b, num_a", "modulus: The group modulus. :raise ZeroDivisionError: Raised when the inverse of the value", "_ = extended_euclidean(value, modulus) if gcd_ != 1: raise ZeroDivisionError(f\"Inverse of {value} mod", "available :param number: The number to check :return: Whether the input is prime", "the input numbers. The method returns gcd, x, y, such that a*x +", "such that a*x + b*y = gcd. \"\"\" # a*x + b*y =", "multiple of two input numbers. Uses GMPY2 if available. :param num_a: First number", "the range. :return: Random prime number. :raise ValueError: the lower bound should be", "functions for creating encryption schemes. \"\"\" from math import gcd from typing import", "num_a: First number a. :param num_b: Second number b. :return: Least common multiple", "upper bound \"\"\" if low >= high: raise ValueError( \"the lower bound should", "such that a*x + b*y = gcd. :param num_a: First number a. :param", "available. :param value: The number to be inverted. :param modulus: The group modulus.", "if USE_GMPY2: return gmpy2.lcm(num_a, num_b) # else return num_a * num_b // gcd(num_a,", "high: raise ValueError( \"the lower bound should be smaller or equal to the", ":param num_b: Second number b. :return: Least common multiple of a and b.", "of the range. :param high: Upper bound (exclusive) of the range. :return: Random", ":return: Whether the input is prime or not \"\"\" if USE_GMPY2: return gmpy2.mpz(number).is_prime()", "integer if available. :param low: Lower bound (inclusive) of the range. :param high:", "that a*x + b*y = gcd. \"\"\" # a*x + b*y = gcd", "bound \"\"\" if low >= high: raise ValueError( \"the lower bound should be", "int: \"\"\" Generate a random prime number in the range [low, high). Returns", "not exist. Uses GMPY2 if available. :param value: The number to be inverted.", "0: quotient, num_b, num_a = num_b // num_a, num_a, num_b % num_a y_old,", "gcd, x, y, such that a*x + b*y = gcd. :param num_a: First", "if gcd_ != 1: raise ZeroDivisionError(f\"Inverse of {value} mod {modulus} does not exist.\")", ":return: Least common multiple of a and b. \"\"\" if USE_GMPY2: return gmpy2.lcm(num_a,", "Uses GMPY2 if available :param number: The number to check :return: Whether the", "x_old, y_old def lcm(num_a: int, num_b: int) -> int: \"\"\" Compute the least", "number in the range [low, high). Returns GMPY2 MPZ integer if available. :param", "number b. :return: Least common multiple of a and b. \"\"\" if USE_GMPY2:", "number a. :param num_b: Second number b. :return: Tuple containing gcd, x, and", "USE_GMPY2: return gmpy2.powmod(base, exponent, modulus) # else return pow(base, exponent, modulus) def mod_inv(value:", "does not exist. :return: The inverse of a under the modulus. \"\"\" value", "lcm(num_a: int, num_b: int) -> int: \"\"\" Compute the least common multiple of", "Uses GMPY2 if available. :param base: base :param exponent: exponent :param modulus: modulus", "modulus :return: base**exponent % modulus \"\"\" if USE_GMPY2: return gmpy2.powmod(base, exponent, modulus) #", "sympy from ._check_gmpy2 import USE_GMPY2 if USE_GMPY2: import gmpy2 def randprime(low: int, high:", "not exist.\") return inverse def extended_euclidean(num_a: int, num_b: int) -> Tuple[int, int, int]:", "a random prime number in the range [low, high). Returns GMPY2 MPZ integer", "GMPY2 if available :param number: The number to check :return: Whether the input", "modulus) if gcd_ != 1: raise ZeroDivisionError(f\"Inverse of {value} mod {modulus} does not", "gcd from typing import Tuple import sympy from ._check_gmpy2 import USE_GMPY2 if USE_GMPY2:", "lower than the upper bound \"\"\" if low >= high: raise ValueError( \"the", "num_b % num_a y_old, y_cur = y_cur, y_old - quotient * y_cur x_old,", ":param num_b: Second number b. :return: Tuple containing gcd, x, and y, such", "be inverted. :param modulus: The group modulus. :raise ZeroDivisionError: Raised when the inverse", "than the upper bound \"\"\" if low >= high: raise ValueError( \"the lower", "modulus. :raise ZeroDivisionError: Raised when the inverse of the value does not exist.", "= extended_euclidean(value, modulus) if gcd_ != 1: raise ZeroDivisionError(f\"Inverse of {value} mod {modulus}", "Least common multiple of a and b. \"\"\" if USE_GMPY2: return gmpy2.lcm(num_a, num_b)", "\"the lower bound should be smaller or equal to the upper bound\" )", "pow(base, exponent, modulus) def mod_inv(value: int, modulus: int) -> int: \"\"\" Compute the", "modulus of the group. Note that the inverse might not exist. Uses GMPY2", "1, 0 while num_a != 0: quotient, num_b, num_a = num_b // num_a,", "-> bool: \"\"\" Check if the input number is a prime number. Uses", "import gcd from typing import Tuple import sympy from ._check_gmpy2 import USE_GMPY2 if", "# else return sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int, exponent: int, modulus: int) ->", "The number to check :return: Whether the input is prime or not \"\"\"", "gcd x_old, x_cur, y_old, y_cur = 0, 1, 1, 0 while num_a !=", "input numbers. Uses GMPY2 if available. :param num_a: First number a. :param num_b:", "// gcd(num_a, num_b) def is_prime(number: int) -> bool: \"\"\" Check if the input", "smaller or equal to the upper bound\" ) if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high))", "input is prime or not \"\"\" if USE_GMPY2: return gmpy2.mpz(number).is_prime() # else return", "return inverse def extended_euclidean(num_a: int, num_b: int) -> Tuple[int, int, int]: \"\"\" Perform", "Tuple import sympy from ._check_gmpy2 import USE_GMPY2 if USE_GMPY2: import gmpy2 def randprime(low:", "high) def pow_mod(base: int, exponent: int, modulus: int) -> int: \"\"\" Compute base**exponent", "USE_GMPY2: import gmpy2 def randprime(low: int, high: int) -> int: \"\"\" Generate a", "from ._check_gmpy2 import USE_GMPY2 if USE_GMPY2: import gmpy2 def randprime(low: int, high: int)", "the modulus. \"\"\" value %= modulus if USE_GMPY2: return gmpy2.invert(value, modulus) # else", "-> int: \"\"\" Compute the least common multiple of two input numbers. Uses", "num_b) def is_prime(number: int) -> bool: \"\"\" Check if the input number is", "\"\"\" Compute the least common multiple of two input numbers. Uses GMPY2 if", "= gcd x_old, x_cur, y_old, y_cur = 0, 1, 1, 0 while num_a", "for creating encryption schemes. \"\"\" from math import gcd from typing import Tuple", ":return: The inverse of a under the modulus. \"\"\" value %= modulus if", "that a*x + b*y = gcd. :param num_a: First number a. :param num_b:", "x, and y, such that a*x + b*y = gcd. \"\"\" # a*x", "\"\"\" Perform the extended euclidean algorithm on the input numbers. The method returns", "number: The number to check :return: Whether the input is prime or not", "the inverse of a number, given the modulus of the group. Note that", "extended euclidean algorithm on the input numbers. The method returns gcd, x, y,", "number, given the modulus of the group. Note that the inverse might not", "exponent, modulus) # else return pow(base, exponent, modulus) def mod_inv(value: int, modulus: int)", "0 while num_a != 0: quotient, num_b, num_a = num_b // num_a, num_a,", "%= modulus if USE_GMPY2: return gmpy2.invert(value, modulus) # else gcd_, inverse, _ =", "return gmpy2.lcm(num_a, num_b) # else return num_a * num_b // gcd(num_a, num_b) def", "value %= modulus if USE_GMPY2: return gmpy2.invert(value, modulus) # else gcd_, inverse, _", "if available. :param base: base :param exponent: exponent :param modulus: modulus :return: base**exponent", "x_cur, x_old - quotient * x_cur return num_b, x_old, y_old def lcm(num_a: int,", "lower bound should be strictly lower than the upper bound \"\"\" if low", "return sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int, exponent: int, modulus: int) -> int: \"\"\"", "num_b) # else return num_a * num_b // gcd(num_a, num_b) def is_prime(number: int)", "gmpy2.powmod(base, exponent, modulus) # else return pow(base, exponent, modulus) def mod_inv(value: int, modulus:", "Tuple containing gcd, x, and y, such that a*x + b*y = gcd.", "-> Tuple[int, int, int]: \"\"\" Perform the extended euclidean algorithm on the input", "if USE_GMPY2: import gmpy2 def randprime(low: int, high: int) -> int: \"\"\" Generate", "int, high: int) -> int: \"\"\" Generate a random prime number in the", "if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int,", "gmpy2.lcm(num_a, num_b) # else return num_a * num_b // gcd(num_a, num_b) def is_prime(number:", "num_b: int) -> Tuple[int, int, int]: \"\"\" Perform the extended euclidean algorithm on", "modulus: modulus :return: base**exponent % modulus \"\"\" if USE_GMPY2: return gmpy2.powmod(base, exponent, modulus)", "First number a. :param num_b: Second number b. :return: Tuple containing gcd, x,", "base**exponent % modulus \"\"\" if USE_GMPY2: return gmpy2.powmod(base, exponent, modulus) # else return", "group modulus. :raise ZeroDivisionError: Raised when the inverse of the value does not", "number to check :return: Whether the input is prime or not \"\"\" if", "y_old - quotient * y_cur x_old, x_cur = x_cur, x_old - quotient *", "+ b*y = gcd. \"\"\" # a*x + b*y = gcd x_old, x_cur,", "high). Returns GMPY2 MPZ integer if available. :param low: Lower bound (inclusive) of", "and b. \"\"\" if USE_GMPY2: return gmpy2.lcm(num_a, num_b) # else return num_a *", "if available. :param low: Lower bound (inclusive) of the range. :param high: Upper", "available. :param base: base :param exponent: exponent :param modulus: modulus :return: base**exponent %", "exponent, modulus) def mod_inv(value: int, modulus: int) -> int: \"\"\" Compute the inverse", "if USE_GMPY2: return gmpy2.powmod(base, exponent, modulus) # else return pow(base, exponent, modulus) def", "Tuple[int, int, int]: \"\"\" Perform the extended euclidean algorithm on the input numbers.", "schemes. \"\"\" from math import gcd from typing import Tuple import sympy from", "# else return pow(base, exponent, modulus) def mod_inv(value: int, modulus: int) -> int:", "quotient * x_cur return num_b, x_old, y_old def lcm(num_a: int, num_b: int) ->", "of a and b. \"\"\" if USE_GMPY2: return gmpy2.lcm(num_a, num_b) # else return", "return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int, exponent: int,", "def lcm(num_a: int, num_b: int) -> int: \"\"\" Compute the least common multiple", "First number a. :param num_b: Second number b. :return: Least common multiple of", "def is_prime(number: int) -> bool: \"\"\" Check if the input number is a", "Note that the inverse might not exist. Uses GMPY2 if available. :param value:", "x_cur = x_cur, x_old - quotient * x_cur return num_b, x_old, y_old def", "MPZ integer if available. :param low: Lower bound (inclusive) of the range. :param", "return num_b, x_old, y_old def lcm(num_a: int, num_b: int) -> int: \"\"\" Compute", "bool: \"\"\" Check if the input number is a prime number. Uses GMPY2", "number. :raise ValueError: the lower bound should be strictly lower than the upper", "USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int, exponent:", "int: \"\"\" Compute base**exponent % modulus. Uses GMPY2 if available. :param base: base", "num_a y_old, y_cur = y_cur, y_old - quotient * y_cur x_old, x_cur =", "x_cur, y_old, y_cur = 0, 1, 1, 0 while num_a != 0: quotient,", "should be smaller or equal to the upper bound\" ) if USE_GMPY2: return", "The number to be inverted. :param modulus: The group modulus. :raise ZeroDivisionError: Raised", "% num_a y_old, y_cur = y_cur, y_old - quotient * y_cur x_old, x_cur", "modulus) # else gcd_, inverse, _ = extended_euclidean(value, modulus) if gcd_ != 1:", "a*x + b*y = gcd. \"\"\" # a*x + b*y = gcd x_old,", "x, y, such that a*x + b*y = gcd. :param num_a: First number", "that the inverse might not exist. Uses GMPY2 if available. :param value: The", "is_prime(number: int) -> bool: \"\"\" Check if the input number is a prime", "* x_cur return num_b, x_old, y_old def lcm(num_a: int, num_b: int) -> int:", "modulus. \"\"\" value %= modulus if USE_GMPY2: return gmpy2.invert(value, modulus) # else gcd_,", "b*y = gcd. :param num_a: First number a. :param num_b: Second number b.", "= num_b // num_a, num_a, num_b % num_a y_old, y_cur = y_cur, y_old", "gcd(num_a, num_b) def is_prime(number: int) -> bool: \"\"\" Check if the input number", "algorithm on the input numbers. The method returns gcd, x, y, such that", "group. Note that the inverse might not exist. Uses GMPY2 if available. :param", "Upper bound (exclusive) of the range. :return: Random prime number. :raise ValueError: the", "multiple of a and b. \"\"\" if USE_GMPY2: return gmpy2.lcm(num_a, num_b) # else", "USE_GMPY2: return gmpy2.invert(value, modulus) # else gcd_, inverse, _ = extended_euclidean(value, modulus) if", "= gcd. \"\"\" # a*x + b*y = gcd x_old, x_cur, y_old, y_cur", "else return sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int, exponent: int, modulus: int) -> int:", "GMPY2 MPZ integer if available. :param low: Lower bound (inclusive) of the range.", "else gcd_, inverse, _ = extended_euclidean(value, modulus) if gcd_ != 1: raise ZeroDivisionError(f\"Inverse", "int) -> int: \"\"\" Generate a random prime number in the range [low,", "{value} mod {modulus} does not exist.\") return inverse def extended_euclidean(num_a: int, num_b: int)", "else return num_a * num_b // gcd(num_a, num_b) def is_prime(number: int) -> bool:", "gcd_ != 1: raise ZeroDivisionError(f\"Inverse of {value} mod {modulus} does not exist.\") return", ") if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return sympy.ntheory.generate.randprime(low, high) def pow_mod(base:", "\"\"\" Check if the input number is a prime number. Uses GMPY2 if", "y_cur = y_cur, y_old - quotient * y_cur x_old, x_cur = x_cur, x_old", "low: Lower bound (inclusive) of the range. :param high: Upper bound (exclusive) of", "Compute the least common multiple of two input numbers. Uses GMPY2 if available.", "modulus: int) -> int: \"\"\" Compute the inverse of a number, given the", "- quotient * x_cur return num_b, x_old, y_old def lcm(num_a: int, num_b: int)", "math import gcd from typing import Tuple import sympy from ._check_gmpy2 import USE_GMPY2", "mod {modulus} does not exist.\") return inverse def extended_euclidean(num_a: int, num_b: int) ->", "int, exponent: int, modulus: int) -> int: \"\"\" Compute base**exponent % modulus. Uses", "int, num_b: int) -> int: \"\"\" Compute the least common multiple of two", "exist.\") return inverse def extended_euclidean(num_a: int, num_b: int) -> Tuple[int, int, int]: \"\"\"", "number is a prime number. Uses GMPY2 if available :param number: The number", "= gcd. :param num_a: First number a. :param num_b: Second number b. :return:", "gcd_, inverse, _ = extended_euclidean(value, modulus) if gcd_ != 1: raise ZeroDivisionError(f\"Inverse of", "\"\"\" if low >= high: raise ValueError( \"the lower bound should be smaller", "a and b. \"\"\" if USE_GMPY2: return gmpy2.lcm(num_a, num_b) # else return num_a", "given the modulus of the group. Note that the inverse might not exist.", "inverse, _ = extended_euclidean(value, modulus) if gcd_ != 1: raise ZeroDivisionError(f\"Inverse of {value}", "USE_GMPY2: return gmpy2.lcm(num_a, num_b) # else return num_a * num_b // gcd(num_a, num_b)", "num_b, num_a = num_b // num_a, num_a, num_b % num_a y_old, y_cur =", "x_cur return num_b, x_old, y_old def lcm(num_a: int, num_b: int) -> int: \"\"\"", "from math import gcd from typing import Tuple import sympy from ._check_gmpy2 import", "the input number is a prime number. Uses GMPY2 if available :param number:", "- quotient * y_cur x_old, x_cur = x_cur, x_old - quotient * x_cur", "inverse might not exist. Uses GMPY2 if available. :param value: The number to", "value does not exist. :return: The inverse of a under the modulus. \"\"\"", "in the range [low, high). Returns GMPY2 MPZ integer if available. :param low:", "else return pow(base, exponent, modulus) def mod_inv(value: int, modulus: int) -> int: \"\"\"", "num_b: int) -> int: \"\"\" Compute the least common multiple of two input", "num_a * num_b // gcd(num_a, num_b) def is_prime(number: int) -> bool: \"\"\" Check", "inverse of a number, given the modulus of the group. Note that the", "Second number b. :return: Least common multiple of a and b. \"\"\" if", "while num_a != 0: quotient, num_b, num_a = num_b // num_a, num_a, num_b", ":return: Random prime number. :raise ValueError: the lower bound should be strictly lower", "if available :param number: The number to check :return: Whether the input is", ":param base: base :param exponent: exponent :param modulus: modulus :return: base**exponent % modulus", "prime number. :raise ValueError: the lower bound should be strictly lower than the", "randprime(low: int, high: int) -> int: \"\"\" Generate a random prime number in", ":param num_a: First number a. :param num_b: Second number b. :return: Least common", "num_a, num_b % num_a y_old, y_cur = y_cur, y_old - quotient * y_cur", "// num_a, num_a, num_b % num_a y_old, y_cur = y_cur, y_old - quotient", "* y_cur x_old, x_cur = x_cur, x_old - quotient * x_cur return num_b,", "available. :param low: Lower bound (inclusive) of the range. :param high: Upper bound", "check :return: Whether the input is prime or not \"\"\" if USE_GMPY2: return", "lower bound should be smaller or equal to the upper bound\" ) if", ":param exponent: exponent :param modulus: modulus :return: base**exponent % modulus \"\"\" if USE_GMPY2:", "\"\"\" from math import gcd from typing import Tuple import sympy from ._check_gmpy2", "gcd, x, and y, such that a*x + b*y = gcd. \"\"\" #", "The method returns gcd, x, y, such that a*x + b*y = gcd.", "modulus \"\"\" if USE_GMPY2: return gmpy2.powmod(base, exponent, modulus) # else return pow(base, exponent,", "the group. Note that the inverse might not exist. Uses GMPY2 if available.", "-> int: \"\"\" Generate a random prime number in the range [low, high).", "euclidean algorithm on the input numbers. The method returns gcd, x, y, such", "high)) # else return sympy.ntheory.generate.randprime(low, high) def pow_mod(base: int, exponent: int, modulus: int)", "return gmpy2.invert(value, modulus) # else gcd_, inverse, _ = extended_euclidean(value, modulus) if gcd_", "numbers. The method returns gcd, x, y, such that a*x + b*y =", "b. \"\"\" if USE_GMPY2: return gmpy2.lcm(num_a, num_b) # else return num_a * num_b", "if USE_GMPY2: return gmpy2.invert(value, modulus) # else gcd_, inverse, _ = extended_euclidean(value, modulus)", "b*y = gcd. \"\"\" # a*x + b*y = gcd x_old, x_cur, y_old,", "!= 1: raise ZeroDivisionError(f\"Inverse of {value} mod {modulus} does not exist.\") return inverse", "quotient, num_b, num_a = num_b // num_a, num_a, num_b % num_a y_old, y_cur", "int) -> int: \"\"\" Compute base**exponent % modulus. Uses GMPY2 if available. :param", "= 0, 1, 1, 0 while num_a != 0: quotient, num_b, num_a =", "bound (inclusive) of the range. :param high: Upper bound (exclusive) of the range.", "the upper bound\" ) if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return sympy.ntheory.generate.randprime(low,", "import sympy from ._check_gmpy2 import USE_GMPY2 if USE_GMPY2: import gmpy2 def randprime(low: int,", "+ b*y = gcd. :param num_a: First number a. :param num_b: Second number", "x_old, x_cur = x_cur, x_old - quotient * x_cur return num_b, x_old, y_old", "inverse def extended_euclidean(num_a: int, num_b: int) -> Tuple[int, int, int]: \"\"\" Perform the", "GMPY2 if available. :param base: base :param exponent: exponent :param modulus: modulus :return:", "a under the modulus. \"\"\" value %= modulus if USE_GMPY2: return gmpy2.invert(value, modulus)", "the lower bound should be strictly lower than the upper bound \"\"\" if", "Returns GMPY2 MPZ integer if available. :param low: Lower bound (inclusive) of the", "and y, such that a*x + b*y = gcd. \"\"\" # a*x +", "int]: \"\"\" Perform the extended euclidean algorithm on the input numbers. The method", "high: Upper bound (exclusive) of the range. :return: Random prime number. :raise ValueError:", ":param value: The number to be inverted. :param modulus: The group modulus. :raise", "from typing import Tuple import sympy from ._check_gmpy2 import USE_GMPY2 if USE_GMPY2: import", "bound\" ) if USE_GMPY2: return gmpy2.mpz(sympy.ntheory.generate.randprime(low, high)) # else return sympy.ntheory.generate.randprime(low, high) def", "range. :return: Random prime number. :raise ValueError: the lower bound should be strictly", "gmpy2 def randprime(low: int, high: int) -> int: \"\"\" Generate a random prime", "low >= high: raise ValueError( \"the lower bound should be smaller or equal", "ValueError: the lower bound should be strictly lower than the upper bound \"\"\"", "y, such that a*x + b*y = gcd. :param num_a: First number a.", "The inverse of a under the modulus. \"\"\" value %= modulus if USE_GMPY2:", "def randprime(low: int, high: int) -> int: \"\"\" Generate a random prime number", "int) -> int: \"\"\" Compute the inverse of a number, given the modulus", "number to be inverted. :param modulus: The group modulus. :raise ZeroDivisionError: Raised when", "if available. :param num_a: First number a. :param num_b: Second number b. :return:", "Check if the input number is a prime number. Uses GMPY2 if available", "Uses GMPY2 if available. :param num_a: First number a. :param num_b: Second number", "the input is prime or not \"\"\" if USE_GMPY2: return gmpy2.mpz(number).is_prime() # else", "typing import Tuple import sympy from ._check_gmpy2 import USE_GMPY2 if USE_GMPY2: import gmpy2", ":param high: Upper bound (exclusive) of the range. :return: Random prime number. :raise", "the extended euclidean algorithm on the input numbers. The method returns gcd, x,", "y, such that a*x + b*y = gcd. \"\"\" # a*x + b*y", "# else return num_a * num_b // gcd(num_a, num_b) def is_prime(number: int) ->", "might not exist. Uses GMPY2 if available. :param value: The number to be", "num_a != 0: quotient, num_b, num_a = num_b // num_a, num_a, num_b %", "\"\"\" if USE_GMPY2: return gmpy2.powmod(base, exponent, modulus) # else return pow(base, exponent, modulus)", "input number is a prime number. Uses GMPY2 if available :param number: The", "the range. :param high: Upper bound (exclusive) of the range. :return: Random prime", "x_old - quotient * x_cur return num_b, x_old, y_old def lcm(num_a: int, num_b:", "does not exist.\") return inverse def extended_euclidean(num_a: int, num_b: int) -> Tuple[int, int," ]
[ "OpenAPI spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import pformat from", "getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x,", "errors_count of this BulkCreateResponse. The number of enrollment identities with failed processing. :return:", "status): \"\"\" Sets the status of this BulkCreateResponse. The state of the process", "str \"\"\" if account_id is None: raise ValueError(\"Invalid value for `account_id`, must not", "'processing'. When the request has been fully processed, the state changes to 'completed'.", "and not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value for `id`, must be a follow", "BulkCreateResponse. :type: int \"\"\" if errors_count is None: raise ValueError(\"Invalid value for `errors_count`,", "this BulkCreateResponse. :type: int \"\"\" if processed_count is None: raise ValueError(\"Invalid value for", "(dict): The key is attribute name and the value is attribute type. attribute_map", "allowed_values = [\"enrollment-identity-bulk-uploads\"] if object not in allowed_values: raise ValueError( \"Invalid value for", "BulkCreateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\" Returns true", "allowed_values = [\"new\", \"processing\", \"completed\"] if status not in allowed_values: raise ValueError( \"Invalid", "= processed_count @property def status(self): \"\"\" Gets the status of this BulkCreateResponse. The", "of receiving the bulk creation task. :param created_at: The created_at of this BulkCreateResponse.", "BulkCreateResponse. etag :param etag: The etag of this BulkCreateResponse. :type: str \"\"\" if", "true if both objects are not equal \"\"\" return not self == other", "of this BulkCreateResponse. :rtype: int \"\"\" return self._total_count @total_count.setter def total_count(self, total_count): \"\"\"", "creation task. :return: The completed_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._completed_at", "x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] =", "'str', 'object': 'str', 'processed_count': 'int', 'status': 'str', 'total_count': 'int' } attribute_map = {", "hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item:", "`/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property def full_report_file(self): \"\"\" Gets the full_report_file of this", "this BulkCreateResponse. :param errors_report_file: The errors_report_file of this BulkCreateResponse. :type: str \"\"\" self._errors_report_file", "The created_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._created_at @created_at.setter def created_at(self,", "this BulkCreateResponse. :type: int \"\"\" if total_count is None: raise ValueError(\"Invalid value for", "errors_report_file of this BulkCreateResponse. :return: The errors_report_file of this BulkCreateResponse. :rtype: str \"\"\"", "return self._object @object.setter def object(self, object): \"\"\" Sets the object of this BulkCreateResponse.", ")) else: result[attr] = value return result def to_str(self): \"\"\" Returns the string", "from six import iteritems import re class BulkCreateResponse(object): \"\"\" NOTE: This class is", "Swagger \"\"\" self._account_id = account_id self._completed_at = completed_at self._created_at = created_at self._errors_count =", "@property def errors_count(self): \"\"\" Gets the errors_count of this BulkCreateResponse. The number of", "str \"\"\" if status is None: raise ValueError(\"Invalid value for `status`, must not", "pattern or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property def full_report_file(self): \"\"\" Gets", "self._account_id = account_id self._completed_at = completed_at self._created_at = created_at self._errors_count = errors_count self._errors_report_file", "market (OEM dealer) or a device transferred from an account to another. More", "iteritems import re class BulkCreateResponse(object): \"\"\" NOTE: This class is auto generated by", "None: raise ValueError(\"Invalid value for `status`, must not be `None`\") allowed_values = [\"new\",", "@property def processed_count(self): \"\"\" Gets the processed_count of this BulkCreateResponse. The number of", "be `None`\") self._account_id = account_id @property def completed_at(self): \"\"\" Gets the completed_at of", "for `status` ({0}), must be one of {1}\" .format(status, allowed_values) ) self._status =", "\"\"\" if id is None: raise ValueError(\"Invalid value for `id`, must not be", "for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr]", "def errors_report_file(self, errors_report_file): \"\"\" Sets the errors_report_file of this BulkCreateResponse. :param errors_report_file: The", "Gets the processed_count of this BulkCreateResponse. The number of enrollment identities processed until", "total_count is None: raise ValueError(\"Invalid value for `total_count`, must not be `None`\") self._total_count", "for `processed_count`, must not be `None`\") self._processed_count = processed_count @property def status(self): \"\"\"", "of this BulkCreateResponse. :return: The object of this BulkCreateResponse. :rtype: str \"\"\" return", "list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value,", "etag of this BulkCreateResponse. :type: str \"\"\" if etag is None: raise ValueError(\"Invalid", "\"\"\" Returns the string representation of the model \"\"\" return pformat(self.to_dict()) def __repr__(self):", "in the input CSV. :param total_count: The total_count of this BulkCreateResponse. :type: int", "'object', 'processed_count': 'processed_count', 'status': 'status', 'total_count': 'total_count' } def __init__(self, account_id=None, completed_at=None, created_at=None,", "= completed_at @property def created_at(self): \"\"\" Gets the created_at of this BulkCreateResponse. The", "this BulkCreateResponse. :rtype: datetime \"\"\" return self._created_at @created_at.setter def created_at(self, created_at): \"\"\" Sets", "'str', 'etag': 'str', 'full_report_file': 'str', 'id': 'str', 'object': 'str', 'processed_count': 'int', 'status': 'str',", "progress, the state is shown as 'processing'. When the request has been fully", "etag of this BulkCreateResponse. :rtype: str \"\"\" return self._etag @etag.setter def etag(self, etag):", "The id of this BulkCreateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self,", "\"\"\" Sets the processed_count of this BulkCreateResponse. The number of enrollment identities processed", "from the open market (OEM dealer) or a device transferred from an account", "in definition. \"\"\" swagger_types = { 'account_id': 'str', 'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count':", "allowed_values) ) self._status = status @property def total_count(self): \"\"\" Gets the total_count of", "must not be `None`\") if id is not None and not re.search('^[A-Za-z0-9]{32}', id):", "Service allows users to claim the ownership of a device which is not", "= object self._processed_count = processed_count self._status = status self._total_count = total_count self.discriminator =", "the object of this BulkCreateResponse. :return: The object of this BulkCreateResponse. :rtype: str", "changes to 'completed'. :param status: The status of this BulkCreateResponse. :type: str \"\"\"", "self._status @status.setter def status(self, status): \"\"\" Sets the status of this BulkCreateResponse. The", "BulkCreateResponse. :return: The object of this BulkCreateResponse. :rtype: str \"\"\" return self._object @object.setter", "BulkCreateResponse. :rtype: int \"\"\" return self._processed_count @processed_count.setter def processed_count(self, processed_count): \"\"\" Sets the", "`etag`, must be a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag", "follow pattern or equal to `/^[A-Za-z0-9]{32}/`\") self._id = id @property def object(self): \"\"\"", "The number of enrollment identities with failed processing. :param errors_count: The errors_count of", "`status` ({0}), must be one of {1}\" .format(status, allowed_values) ) self._status = status", "of this BulkCreateResponse. etag :param etag: The etag of this BulkCreateResponse. :type: str", "isinstance(other, BulkCreateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\" Returns", "this BulkCreateResponse. The state of the process is 'new' at the time of", "processed_count is None: raise ValueError(\"Invalid value for `processed_count`, must not be `None`\") self._processed_count", "is None: raise ValueError(\"Invalid value for `account_id`, must not be `None`\") self._account_id =", "\"\"\" Sets the full_report_file of this BulkCreateResponse. :param full_report_file: The full_report_file of this", "for `object`, must not be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if object not in", "@status.setter def status(self, status): \"\"\" Sets the status of this BulkCreateResponse. The state", "ValueError(\"Invalid value for `id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`\")", "full_report_file(self, full_report_file): \"\"\" Sets the full_report_file of this BulkCreateResponse. :param full_report_file: The full_report_file", "as a dict \"\"\" result = {} for attr, _ in iteritems(self.swagger_types): value", "ownership of a device which is not yet assigned to an account. A", "created_at self._errors_count = errors_count self._errors_report_file = errors_report_file self._etag = etag self._full_report_file = full_report_file", "defined in Swagger \"\"\" self._account_id = account_id self._completed_at = completed_at self._created_at = created_at", "not in allowed_values: raise ValueError( \"Invalid value for `object` ({0}), must be one", "Sets the object of this BulkCreateResponse. :param object: The object of this BulkCreateResponse.", "one of {1}\" .format(object, allowed_values) ) self._object = object @property def processed_count(self): \"\"\"", "= [\"enrollment-identity-bulk-uploads\"] if object not in allowed_values: raise ValueError( \"Invalid value for `object`", "id): raise ValueError(\"Invalid value for `id`, must be a follow pattern or equal", "an account. A device without an assigned account can be a device purchased", "object of this BulkCreateResponse. :param object: The object of this BulkCreateResponse. :type: str", "self._total_count = total_count self.discriminator = None @property def account_id(self): \"\"\" Gets the account_id", "'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file', 'id': 'id', 'object': 'object', 'processed_count': 'processed_count', 'status': 'status',", "this BulkCreateResponse. etag :param etag: The etag of this BulkCreateResponse. :type: str \"\"\"", "shown as 'processing'. When the request has been fully processed, the state changes", "The account_id of this BulkCreateResponse. :rtype: str \"\"\" return self._account_id @account_id.setter def account_id(self,", "be a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property def", "this BulkCreateResponse. :type: str \"\"\" if object is None: raise ValueError(\"Invalid value for", "the time of creation. If the creation is still in progress, the state", "dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item,", "is attribute name and the value is attribute type. attribute_map (dict): The key", "First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import", "both objects are equal \"\"\" if not isinstance(other, BulkCreateResponse): return False return self.__dict__", "total_count of this BulkCreateResponse. :rtype: int \"\"\" return self._total_count @total_count.setter def total_count(self, total_count):", "by the swagger code generator program. Do not edit the class manually. \"\"\"", "= value return result def to_str(self): \"\"\" Returns the string representation of the", "attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] =", "is None: raise ValueError(\"Invalid value for `status`, must not be `None`\") allowed_values =", "None: raise ValueError(\"Invalid value for `etag`, must not be `None`\") if etag is", "processed until now. :return: The processed_count of this BulkCreateResponse. :rtype: int \"\"\" return", "\"\"\" Gets the completed_at of this BulkCreateResponse. The time of completing the bulk", "Gets the object of this BulkCreateResponse. :return: The object of this BulkCreateResponse. :rtype:", "if created_at is None: raise ValueError(\"Invalid value for `created_at`, must not be `None`\")", "this BulkCreateResponse. Bulk ID :param id: The id of this BulkCreateResponse. :type: str", "@errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\" Sets the errors_report_file of this BulkCreateResponse. :param errors_report_file:", ":param id: The id of this BulkCreateResponse. :type: str \"\"\" if id is", "this BulkCreateResponse. Bulk ID :return: The id of this BulkCreateResponse. :rtype: str \"\"\"", "processed, the state changes to 'completed'. :return: The status of this BulkCreateResponse. :rtype:", "BulkCreateResponse. :type: str \"\"\" self._errors_report_file = errors_report_file @property def etag(self): \"\"\" Gets the", "of this BulkCreateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self, id): \"\"\"", "the account_id of this BulkCreateResponse. ID :return: The account_id of this BulkCreateResponse. :rtype:", "number of enrollment identities found in the input CSV. :return: The total_count of", "of enrollment identities found in the input CSV. :return: The total_count of this", "errors_report_file(self, errors_report_file): \"\"\" Sets the errors_report_file of this BulkCreateResponse. :param errors_report_file: The errors_report_file", "not edit the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key is", "receiving the bulk creation task. :param created_at: The created_at of this BulkCreateResponse. :type:", "the status of this BulkCreateResponse. The state of the process is 'new' at", "\"\"\" Gets the total_count of this BulkCreateResponse. Total number of enrollment identities found", "code generator program. Do not edit the class manually. \"\"\" \"\"\" Attributes: swagger_types", "identities with failed processing. :param errors_count: The errors_count of this BulkCreateResponse. :type: int", "created_at(self): \"\"\" Gets the created_at of this BulkCreateResponse. The time of receiving the", "processed_count(self, processed_count): \"\"\" Sets the processed_count of this BulkCreateResponse. The number of enrollment", "`None`\") if id is not None and not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value", ":rtype: str \"\"\" return self._id @id.setter def id(self, id): \"\"\" Sets the id", "state is shown as 'processing'. When the request has been fully processed, the", "assigned account can be a device purchased from the open market (OEM dealer)", ":param errors_count: The errors_count of this BulkCreateResponse. :type: int \"\"\" if errors_count is", "raise ValueError(\"Invalid value for `id`, must be a follow pattern or equal to", "= id self._object = object self._processed_count = processed_count self._status = status self._total_count =", "account_id of this BulkCreateResponse. :type: str \"\"\" if account_id is None: raise ValueError(\"Invalid", "is json key in definition. \"\"\" swagger_types = { 'account_id': 'str', 'completed_at': 'datetime',", "A device without an assigned account can be a device purchased from the", "for `account_id`, must not be `None`\") self._account_id = account_id @property def completed_at(self): \"\"\"", "of this BulkCreateResponse. :rtype: str \"\"\" return self._account_id @account_id.setter def account_id(self, account_id): \"\"\"", "this BulkCreateResponse. The time of completing the bulk creation task. :param completed_at: The", "the object of this BulkCreateResponse. :param object: The object of this BulkCreateResponse. :type:", "Bulk ID :return: The id of this BulkCreateResponse. :rtype: str \"\"\" return self._id", "name and the value is json key in definition. \"\"\" swagger_types = {", "If the creation is still in progress, the state is shown as 'processing'.", "'new' at the time of creation. If the creation is still in progress,", "ID :return: The account_id of this BulkCreateResponse. :rtype: str \"\"\" return self._account_id @account_id.setter", "(dict): The key is attribute name and the value is json key in", "this BulkCreateResponse. The number of enrollment identities with failed processing. :return: The errors_count", "The created_at of this BulkCreateResponse. :type: datetime \"\"\" if created_at is None: raise", "@property def total_count(self): \"\"\" Gets the total_count of this BulkCreateResponse. Total number of", "@account_id.setter def account_id(self, account_id): \"\"\" Sets the account_id of this BulkCreateResponse. ID :param", "int \"\"\" return self._total_count @total_count.setter def total_count(self, total_count): \"\"\" Sets the total_count of", "fully processed, the state changes to 'completed'. :param status: The status of this", "object of this BulkCreateResponse. :type: str \"\"\" if object is None: raise ValueError(\"Invalid", "BulkCreateResponse. :param full_report_file: The full_report_file of this BulkCreateResponse. :type: str \"\"\" self._full_report_file =", "'etag', 'full_report_file': 'full_report_file', 'id': 'id', 'object': 'object', 'processed_count': 'processed_count', 'status': 'status', 'total_count': 'total_count'", "created_at of this BulkCreateResponse. :type: datetime \"\"\" if created_at is None: raise ValueError(\"Invalid", "raise ValueError( \"Invalid value for `status` ({0}), must be one of {1}\" .format(status,", "input CSV. :return: The total_count of this BulkCreateResponse. :rtype: int \"\"\" return self._total_count", "Enrollment Service allows users to claim the ownership of a device which is", "`None`\") self._total_count = total_count def to_dict(self): \"\"\" Returns the model properties as a", "} def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None,", "must not be `None`\") if etag is not None and not re.search('[A-Za-z0-9]{0,256}', etag):", "'object': 'str', 'processed_count': 'int', 'status': 'str', 'total_count': 'int' } attribute_map = { 'account_id':", "completed_at of this BulkCreateResponse. The time of completing the bulk creation task. :return:", "'int', 'status': 'str', 'total_count': 'int' } attribute_map = { 'account_id': 'account_id', 'completed_at': 'completed_at',", "Gets the account_id of this BulkCreateResponse. ID :return: The account_id of this BulkCreateResponse.", "ValueError(\"Invalid value for `status`, must not be `None`\") allowed_values = [\"new\", \"processing\", \"completed\"]", "def account_id(self, account_id): \"\"\" Sets the account_id of this BulkCreateResponse. ID :param account_id:", "'full_report_file', 'id': 'id', 'object': 'object', 'processed_count': 'processed_count', 'status': 'status', 'total_count': 'total_count' } def", "'datetime', 'errors_count': 'int', 'errors_report_file': 'str', 'etag': 'str', 'full_report_file': 'str', 'id': 'str', 'object': 'str',", "value for `account_id`, must not be `None`\") self._account_id = account_id @property def completed_at(self):", "etag: The etag of this BulkCreateResponse. :type: str \"\"\" if etag is None:", "raise ValueError(\"Invalid value for `status`, must not be `None`\") allowed_values = [\"new\", \"processing\",", "BulkCreateResponse. :rtype: str \"\"\" return self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\" Sets the", "of this BulkCreateResponse. :param full_report_file: The full_report_file of this BulkCreateResponse. :type: str \"\"\"", "str \"\"\" if object is None: raise ValueError(\"Invalid value for `object`, must not", "return self._id @id.setter def id(self, id): \"\"\" Sets the id of this BulkCreateResponse.", "claim the ownership of a device which is not yet assigned to an", "BulkCreateResponse(object): \"\"\" NOTE: This class is auto generated by the swagger code generator", "self._processed_count = processed_count self._status = status self._total_count = total_count self.discriminator = None @property", "The errors_count of this BulkCreateResponse. :rtype: int \"\"\" return self._errors_count @errors_count.setter def errors_count(self,", "Sets the errors_count of this BulkCreateResponse. The number of enrollment identities with failed", ":type: int \"\"\" if total_count is None: raise ValueError(\"Invalid value for `total_count`, must", "'processed_count', 'status': 'status', 'total_count': 'total_count' } def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None,", "\"\"\" Enrollment API Mbed Cloud Connect Enrollment Service allows users to claim the", "\"\"\" Gets the etag of this BulkCreateResponse. etag :return: The etag of this", "of this BulkCreateResponse. :rtype: int \"\"\" return self._processed_count @processed_count.setter def processed_count(self, processed_count): \"\"\"", "account. A device without an assigned account can be a device purchased from", "\"\"\" self._completed_at = completed_at @property def created_at(self): \"\"\" Gets the created_at of this", "object=None, processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse - a model defined in Swagger \"\"\"", "3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import pformat from six import iteritems", "raise ValueError(\"Invalid value for `errors_count`, must not be `None`\") self._errors_count = errors_count @property", "errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse - a", "When the request has been fully processed, the state changes to 'completed'. :param", "from pprint import pformat from six import iteritems import re class BulkCreateResponse(object): \"\"\"", "`None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if object not in allowed_values: raise ValueError( \"Invalid value", "str \"\"\" return self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file): \"\"\" Sets the full_report_file of", "must not be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if object not in allowed_values: raise", "Total number of enrollment identities found in the input CSV. :return: The total_count", "BulkCreateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self, status): \"\"\" Sets the", "to_dict(self): \"\"\" Returns the model properties as a dict \"\"\" result = {}", "BulkCreateResponse. :type: str \"\"\" if id is None: raise ValueError(\"Invalid value for `id`,", "= total_count self.discriminator = None @property def account_id(self): \"\"\" Gets the account_id of", "attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\")", "} attribute_map = { 'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file':", "({0}), must be one of {1}\" .format(object, allowed_values) ) self._object = object @property", "= created_at self._errors_count = errors_count self._errors_report_file = errors_report_file self._etag = etag self._full_report_file =", "int \"\"\" return self._processed_count @processed_count.setter def processed_count(self, processed_count): \"\"\" Sets the processed_count of", "one of {1}\" .format(status, allowed_values) ) self._status = status @property def total_count(self): \"\"\"", "The time of completing the bulk creation task. :return: The completed_at of this", "import re class BulkCreateResponse(object): \"\"\" NOTE: This class is auto generated by the", "int \"\"\" if processed_count is None: raise ValueError(\"Invalid value for `processed_count`, must not", "'object': 'object', 'processed_count': 'processed_count', 'status': 'status', 'total_count': 'total_count' } def __init__(self, account_id=None, completed_at=None,", "of enrollment identities processed until now. :param processed_count: The processed_count of this BulkCreateResponse.", "Do not edit the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key", "still in progress, the state is shown as 'processing'. When the request has", "self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\" Returns true if both objects are", "The errors_report_file of this BulkCreateResponse. :type: str \"\"\" self._errors_report_file = errors_report_file @property def", "https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import pformat from six import iteritems import re class", "the value is attribute type. attribute_map (dict): The key is attribute name and", "completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse", "not be `None`\") self._total_count = total_count def to_dict(self): \"\"\" Returns the model properties", "The id of this BulkCreateResponse. :type: str \"\"\" if id is None: raise", "of this BulkCreateResponse. The time of receiving the bulk creation task. :param created_at:", "identities found in the input CSV. :param total_count: The total_count of this BulkCreateResponse.", "failed processing. :return: The errors_count of this BulkCreateResponse. :rtype: int \"\"\" return self._errors_count", "str \"\"\" if id is None: raise ValueError(\"Invalid value for `id`, must not", "of enrollment identities processed until now. :return: The processed_count of this BulkCreateResponse. :rtype:", "BulkCreateResponse. The number of enrollment identities with failed processing. :param errors_count: The errors_count", ":type: str \"\"\" if etag is None: raise ValueError(\"Invalid value for `etag`, must", "id of this BulkCreateResponse. Bulk ID :return: The id of this BulkCreateResponse. :rtype:", "if id is not None and not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value for", "this BulkCreateResponse. :rtype: datetime \"\"\" return self._completed_at @completed_at.setter def completed_at(self, completed_at): \"\"\" Sets", "information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git", "of this BulkCreateResponse. :type: int \"\"\" if errors_count is None: raise ValueError(\"Invalid value", "= errors_count self._errors_report_file = errors_report_file self._etag = etag self._full_report_file = full_report_file self._id =", "Sets the etag of this BulkCreateResponse. etag :param etag: The etag of this", "'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file': 'str', 'etag': 'str', 'full_report_file': 'str', 'id': 'str', 'object':", "BulkCreateResponse. The number of enrollment identities processed until now. :param processed_count: The processed_count", "of this BulkCreateResponse. The number of enrollment identities processed until now. :param processed_count:", "hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif", "the request has been fully processed, the state changes to 'completed'. :return: The", "fully processed, the state changes to 'completed'. :return: The status of this BulkCreateResponse.", "\"\"\" Gets the account_id of this BulkCreateResponse. ID :return: The account_id of this", "of this BulkCreateResponse. :type: str \"\"\" self._errors_report_file = errors_report_file @property def etag(self): \"\"\"", "value for `processed_count`, must not be `None`\") self._processed_count = processed_count @property def status(self):", "of this BulkCreateResponse. The time of completing the bulk creation task. :return: The", "of this BulkCreateResponse. :rtype: int \"\"\" return self._errors_count @errors_count.setter def errors_count(self, errors_count): \"\"\"", "def total_count(self): \"\"\" Gets the total_count of this BulkCreateResponse. Total number of enrollment", "in Swagger \"\"\" self._account_id = account_id self._completed_at = completed_at self._created_at = created_at self._errors_count", "elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\")", "id of this BulkCreateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self, id):", "if hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr] = value return result", "errors_report_file self._etag = etag self._full_report_file = full_report_file self._id = id self._object = object", "BulkCreateResponse. :type: str \"\"\" if etag is None: raise ValueError(\"Invalid value for `etag`,", "None: raise ValueError(\"Invalid value for `created_at`, must not be `None`\") self._created_at = created_at", "object of this BulkCreateResponse. :rtype: str \"\"\" return self._object @object.setter def object(self, object):", "BulkCreateResponse. :type: str \"\"\" if status is None: raise ValueError(\"Invalid value for `status`,", "object(self, object): \"\"\" Sets the object of this BulkCreateResponse. :param object: The object", "processed_count self._status = status self._total_count = total_count self.discriminator = None @property def account_id(self):", "full_report_file of this BulkCreateResponse. :type: str \"\"\" self._full_report_file = full_report_file @property def id(self):", "been fully processed, the state changes to 'completed'. :param status: The status of", "created_at): \"\"\" Sets the created_at of this BulkCreateResponse. The time of receiving the", "The total_count of this BulkCreateResponse. :type: int \"\"\" if total_count is None: raise", "def total_count(self, total_count): \"\"\" Sets the total_count of this BulkCreateResponse. Total number of", "to 'completed'. :param status: The status of this BulkCreateResponse. :type: str \"\"\" if", "None: raise ValueError(\"Invalid value for `account_id`, must not be `None`\") self._account_id = account_id", "status of this BulkCreateResponse. :type: str \"\"\" if status is None: raise ValueError(\"Invalid", "self._errors_count = errors_count self._errors_report_file = errors_report_file self._etag = etag self._full_report_file = full_report_file self._id", "object self._processed_count = processed_count self._status = status self._total_count = total_count self.discriminator = None", "def object(self, object): \"\"\" Sets the object of this BulkCreateResponse. :param object: The", ":param object: The object of this BulkCreateResponse. :type: str \"\"\" if object is", "datetime \"\"\" return self._completed_at @completed_at.setter def completed_at(self, completed_at): \"\"\" Sets the completed_at of", "\"\"\" if not isinstance(other, BulkCreateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self,", "version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import pformat from six import", "as 'processing'. When the request has been fully processed, the state changes to", "errors_count of this BulkCreateResponse. The number of enrollment identities with failed processing. :param", "result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items()", "be `None`\") self._errors_count = errors_count @property def errors_report_file(self): \"\"\" Gets the errors_report_file of", "of enrollment identities found in the input CSV. :param total_count: The total_count of", "time of creation. If the creation is still in progress, the state is", "self._object = object @property def processed_count(self): \"\"\" Gets the processed_count of this BulkCreateResponse.", "@property def status(self): \"\"\" Gets the status of this BulkCreateResponse. The state of", "etag @property def full_report_file(self): \"\"\" Gets the full_report_file of this BulkCreateResponse. :return: The", "number of enrollment identities processed until now. :param processed_count: The processed_count of this", "object): \"\"\" Sets the object of this BulkCreateResponse. :param object: The object of", "\"\"\" return pformat(self.to_dict()) def __repr__(self): \"\"\" For `print` and `pprint` \"\"\" return self.to_str()", "The number of enrollment identities with failed processing. :return: The errors_count of this", "else: result[attr] = value return result def to_str(self): \"\"\" Returns the string representation", "return self.to_str() def __eq__(self, other): \"\"\" Returns true if both objects are equal", "task. :return: The completed_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._completed_at @completed_at.setter", "the string representation of the model \"\"\" return pformat(self.to_dict()) def __repr__(self): \"\"\" For", "return self._total_count @total_count.setter def total_count(self, total_count): \"\"\" Sets the total_count of this BulkCreateResponse.", "are equal \"\"\" if not isinstance(other, BulkCreateResponse): return False return self.__dict__ == other.__dict__", "if not isinstance(other, BulkCreateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other):", "is shown as 'processing'. When the request has been fully processed, the state", "dealer) or a device transferred from an account to another. More information in", "not be `None`\") if id is not None and not re.search('^[A-Za-z0-9]{32}', id): raise", "account_id(self): \"\"\" Gets the account_id of this BulkCreateResponse. ID :return: The account_id of", "objects are equal \"\"\" if not isinstance(other, BulkCreateResponse): return False return self.__dict__ ==", "etag): raise ValueError(\"Invalid value for `etag`, must be a follow pattern or equal", "def created_at(self): \"\"\" Gets the created_at of this BulkCreateResponse. The time of receiving", "@property def completed_at(self): \"\"\" Gets the completed_at of this BulkCreateResponse. The time of", "{} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list):", "The time of completing the bulk creation task. :param completed_at: The completed_at of", "BulkCreateResponse. :rtype: str \"\"\" return self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file): \"\"\" Sets the", "Mbed Cloud Connect Enrollment Service allows users to claim the ownership of a", "not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value for `etag`, must be a follow pattern", "account_id): \"\"\" Sets the account_id of this BulkCreateResponse. ID :param account_id: The account_id", "of the model \"\"\" return pformat(self.to_dict()) def __repr__(self): \"\"\" For `print` and `pprint`", "of this BulkCreateResponse. The number of enrollment identities processed until now. :return: The", "completed_at): \"\"\" Sets the completed_at of this BulkCreateResponse. The time of completing the", "this BulkCreateResponse. The time of receiving the bulk creation task. :param created_at: The", "def object(self): \"\"\" Gets the object of this BulkCreateResponse. :return: The object of", "@errors_count.setter def errors_count(self, errors_count): \"\"\" Sets the errors_count of this BulkCreateResponse. The number", "model properties as a dict \"\"\" result = {} for attr, _ in", "an assigned account can be a device purchased from the open market (OEM", "# coding: utf-8 \"\"\" Enrollment API Mbed Cloud Connect Enrollment Service allows users", "which is not yet assigned to an account. A device without an assigned", "of this BulkCreateResponse. ID :param account_id: The account_id of this BulkCreateResponse. :type: str", "is 'new' at the time of creation. If the creation is still in", "of this BulkCreateResponse. The time of completing the bulk creation task. :param completed_at:", "created_at is None: raise ValueError(\"Invalid value for `created_at`, must not be `None`\") self._created_at", "not yet assigned to an account. A device without an assigned account can", "Attributes: swagger_types (dict): The key is attribute name and the value is attribute", "value for `created_at`, must not be `None`\") self._created_at = created_at @property def errors_count(self):", "'str', 'full_report_file': 'str', 'id': 'str', 'object': 'str', 'processed_count': 'int', 'status': 'str', 'total_count': 'int'", "= total_count def to_dict(self): \"\"\" Returns the model properties as a dict \"\"\"", "pformat(self.to_dict()) def __repr__(self): \"\"\" For `print` and `pprint` \"\"\" return self.to_str() def __eq__(self,", "created_at @property def errors_count(self): \"\"\" Gets the errors_count of this BulkCreateResponse. The number", "not be `None`\") self._processed_count = processed_count @property def status(self): \"\"\" Gets the status", "\"\"\" swagger_types = { 'account_id': 'str', 'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file':", "\"\"\" Gets the status of this BulkCreateResponse. The state of the process is", "account_id of this BulkCreateResponse. :rtype: str \"\"\" return self._account_id @account_id.setter def account_id(self, account_id):", "'etag': 'str', 'full_report_file': 'str', 'id': 'str', 'object': 'str', 'processed_count': 'int', 'status': 'str', 'total_count':", "\"\"\" Attributes: swagger_types (dict): The key is attribute name and the value is", "def id(self): \"\"\" Gets the id of this BulkCreateResponse. Bulk ID :return: The", "processed_count of this BulkCreateResponse. :type: int \"\"\" if processed_count is None: raise ValueError(\"Invalid", "receiving the bulk creation task. :return: The created_at of this BulkCreateResponse. :rtype: datetime", "enrollment identities found in the input CSV. :return: The total_count of this BulkCreateResponse.", "BulkCreateResponse. :rtype: str \"\"\" return self._account_id @account_id.setter def account_id(self, account_id): \"\"\" Sets the", "id self._object = object self._processed_count = processed_count self._status = status self._total_count = total_count", "completed_at(self): \"\"\" Gets the completed_at of this BulkCreateResponse. The time of completing the", "self._etag @etag.setter def etag(self, etag): \"\"\" Sets the etag of this BulkCreateResponse. etag", ":type: datetime \"\"\" if created_at is None: raise ValueError(\"Invalid value for `created_at`, must", "if both objects are equal \"\"\" if not isinstance(other, BulkCreateResponse): return False return", "'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file':", "the bulk creation task. :param created_at: The created_at of this BulkCreateResponse. :type: datetime", "'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file', 'id': 'id',", "account_id: The account_id of this BulkCreateResponse. :type: str \"\"\" if account_id is None:", "\"\"\" Sets the id of this BulkCreateResponse. Bulk ID :param id: The id", "The etag of this BulkCreateResponse. :type: str \"\"\" if etag is None: raise", "The status of this BulkCreateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self,", ":rtype: str \"\"\" return self._status @status.setter def status(self, status): \"\"\" Sets the status", "not None and not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value for `id`, must be", "not be `None`\") if etag is not None and not re.search('[A-Za-z0-9]{0,256}', etag): raise", "= object @property def processed_count(self): \"\"\" Gets the processed_count of this BulkCreateResponse. The", "number of enrollment identities processed until now. :return: The processed_count of this BulkCreateResponse.", "status is None: raise ValueError(\"Invalid value for `status`, must not be `None`\") allowed_values", "The completed_at of this BulkCreateResponse. :type: datetime \"\"\" self._completed_at = completed_at @property def", "if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else", "raise ValueError(\"Invalid value for `created_at`, must not be `None`\") self._created_at = created_at @property", "id(self, id): \"\"\" Sets the id of this BulkCreateResponse. Bulk ID :param id:", "Gets the id of this BulkCreateResponse. Bulk ID :return: The id of this", "must not be `None`\") self._errors_count = errors_count @property def errors_report_file(self): \"\"\" Gets the", "ValueError(\"Invalid value for `id`, must not be `None`\") if id is not None", "The key is attribute name and the value is json key in definition.", "the id of this BulkCreateResponse. Bulk ID :param id: The id of this", ":rtype: str \"\"\" return self._etag @etag.setter def etag(self, etag): \"\"\" Sets the etag", "self._completed_at @completed_at.setter def completed_at(self, completed_at): \"\"\" Sets the completed_at of this BulkCreateResponse. The", "a device which is not yet assigned to an account. A device without", "ValueError(\"Invalid value for `etag`, must not be `None`\") if etag is not None", "str \"\"\" return self._id @id.setter def id(self, id): \"\"\" Sets the id of", "is attribute name and the value is json key in definition. \"\"\" swagger_types", ":return: The object of this BulkCreateResponse. :rtype: str \"\"\" return self._object @object.setter def", "to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property def full_report_file(self): \"\"\" Gets the full_report_file of", "representation of the model \"\"\" return pformat(self.to_dict()) def __repr__(self): \"\"\" For `print` and", "other): \"\"\" Returns true if both objects are equal \"\"\" if not isinstance(other,", "of this BulkCreateResponse. etag :return: The etag of this BulkCreateResponse. :rtype: str \"\"\"", "id is not None and not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value for `id`,", "= processed_count self._status = status self._total_count = total_count self.discriminator = None @property def", "created_at: The created_at of this BulkCreateResponse. :type: datetime \"\"\" if created_at is None:", "if status is None: raise ValueError(\"Invalid value for `status`, must not be `None`\")", "\"\"\" return self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file): \"\"\" Sets the full_report_file of this", "x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr]", "@etag.setter def etag(self, etag): \"\"\" Sets the etag of this BulkCreateResponse. etag :param", "of this BulkCreateResponse. The number of enrollment identities with failed processing. :param errors_count:", "enrollment identities with failed processing. :return: The errors_count of this BulkCreateResponse. :rtype: int", "\"\"\" Gets the full_report_file of this BulkCreateResponse. :return: The full_report_file of this BulkCreateResponse.", "def completed_at(self, completed_at): \"\"\" Sets the completed_at of this BulkCreateResponse. The time of", ":type: str \"\"\" if id is None: raise ValueError(\"Invalid value for `id`, must", "BulkCreateResponse. The time of receiving the bulk creation task. :param created_at: The created_at", "\"\"\" return self._processed_count @processed_count.setter def processed_count(self, processed_count): \"\"\" Sets the processed_count of this", "self._errors_count @errors_count.setter def errors_count(self, errors_count): \"\"\" Sets the errors_count of this BulkCreateResponse. The", "'total_count' } def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None,", "bulk creation task. :param completed_at: The completed_at of this BulkCreateResponse. :type: datetime \"\"\"", "to another. More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version: 3", "'total_count': 'int' } attribute_map = { 'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count':", "the state changes to 'completed'. :param status: The status of this BulkCreateResponse. :type:", "the bulk creation task. :return: The completed_at of this BulkCreateResponse. :rtype: datetime \"\"\"", "def id(self, id): \"\"\" Sets the id of this BulkCreateResponse. Bulk ID :param", "'processed_count': 'processed_count', 'status': 'status', 'total_count': 'total_count' } def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None,", "equal \"\"\" if not isinstance(other, BulkCreateResponse): return False return self.__dict__ == other.__dict__ def", "without an assigned account can be a device purchased from the open market", "= getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if", "not in allowed_values: raise ValueError( \"Invalid value for `status` ({0}), must be one", "full_report_file of this BulkCreateResponse. :return: The full_report_file of this BulkCreateResponse. :rtype: str \"\"\"", "by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import pformat from six import iteritems import re", "'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file', 'id': 'id', 'object': 'object',", "if object is None: raise ValueError(\"Invalid value for `object`, must not be `None`\")", "@property def object(self): \"\"\" Gets the object of this BulkCreateResponse. :return: The object", "the created_at of this BulkCreateResponse. The time of receiving the bulk creation task.", "'completed'. :param status: The status of this BulkCreateResponse. :type: str \"\"\" if status", ":rtype: str \"\"\" return self._object @object.setter def object(self, object): \"\"\" Sets the object", "@property def full_report_file(self): \"\"\" Gets the full_report_file of this BulkCreateResponse. :return: The full_report_file", "the open market (OEM dealer) or a device transferred from an account to", "import pformat from six import iteritems import re class BulkCreateResponse(object): \"\"\" NOTE: This", "return self._account_id @account_id.setter def account_id(self, account_id): \"\"\" Sets the account_id of this BulkCreateResponse.", "ValueError(\"Invalid value for `errors_count`, must not be `None`\") self._errors_count = errors_count @property def", "Sets the completed_at of this BulkCreateResponse. The time of completing the bulk creation", "yet assigned to an account. A device without an assigned account can be", "value for `id`, must not be `None`\") if id is not None and", "def etag(self): \"\"\" Gets the etag of this BulkCreateResponse. etag :return: The etag", "the input CSV. :param total_count: The total_count of this BulkCreateResponse. :type: int \"\"\"", "completed_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._completed_at @completed_at.setter def completed_at(self, completed_at):", "this BulkCreateResponse. The time of completing the bulk creation task. :return: The completed_at", "bulk creation task. :return: The created_at of this BulkCreateResponse. :rtype: datetime \"\"\" return", "to 'completed'. :return: The status of this BulkCreateResponse. :rtype: str \"\"\" return self._status", "this BulkCreateResponse. :rtype: int \"\"\" return self._errors_count @errors_count.setter def errors_count(self, errors_count): \"\"\" Sets", "id of this BulkCreateResponse. Bulk ID :param id: The id of this BulkCreateResponse.", "this BulkCreateResponse. :param object: The object of this BulkCreateResponse. :type: str \"\"\" if", "<reponame>GQMai/mbed-cloud-sdk-python # coding: utf-8 \"\"\" Enrollment API Mbed Cloud Connect Enrollment Service allows", "status self._total_count = total_count self.discriminator = None @property def account_id(self): \"\"\" Gets the", "Returns the model properties as a dict \"\"\" result = {} for attr,", "not isinstance(other, BulkCreateResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\"", "full_report_file): \"\"\" Sets the full_report_file of this BulkCreateResponse. :param full_report_file: The full_report_file of", "'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file': 'str', 'etag': 'str', 'full_report_file': 'str', 'id':", "else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict):", "ID :param account_id: The account_id of this BulkCreateResponse. :type: str \"\"\" if account_id", "For `print` and `pprint` \"\"\" return self.to_str() def __eq__(self, other): \"\"\" Returns true", "BulkCreateResponse. :type: str \"\"\" if object is None: raise ValueError(\"Invalid value for `object`,", "ID :return: The id of this BulkCreateResponse. :rtype: str \"\"\" return self._id @id.setter", "\"\"\" NOTE: This class is auto generated by the swagger code generator program.", "status(self, status): \"\"\" Sets the status of this BulkCreateResponse. The state of the", "enrollment identities processed until now. :return: The processed_count of this BulkCreateResponse. :rtype: int", "spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import pformat from six", "\"\"\" return self._object @object.setter def object(self, object): \"\"\" Sets the object of this", "\"\"\" Sets the etag of this BulkCreateResponse. etag :param etag: The etag of", "\"\"\" return self.to_str() def __eq__(self, other): \"\"\" Returns true if both objects are", ":param completed_at: The completed_at of this BulkCreateResponse. :type: datetime \"\"\" self._completed_at = completed_at", "object of this BulkCreateResponse. :return: The object of this BulkCreateResponse. :rtype: str \"\"\"", "self._id @id.setter def id(self, id): \"\"\" Sets the id of this BulkCreateResponse. Bulk", "\"\"\" if object is None: raise ValueError(\"Invalid value for `object`, must not be", "this BulkCreateResponse. :rtype: int \"\"\" return self._total_count @total_count.setter def total_count(self, total_count): \"\"\" Sets", "the errors_count of this BulkCreateResponse. The number of enrollment identities with failed processing.", "creation is still in progress, the state is shown as 'processing'. When the", "'str', 'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file': 'str', 'etag': 'str', 'full_report_file': 'str',", ":param status: The status of this BulkCreateResponse. :type: str \"\"\" if status is", "completing the bulk creation task. :param completed_at: The completed_at of this BulkCreateResponse. :type:", "'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file', 'id': 'id', 'object': 'object', 'processed_count': 'processed_count', 'status':", "str \"\"\" if etag is None: raise ValueError(\"Invalid value for `etag`, must not", "return False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\" Returns true if", "generated by the swagger code generator program. Do not edit the class manually.", "`pprint` \"\"\" return self.to_str() def __eq__(self, other): \"\"\" Returns true if both objects", "this BulkCreateResponse. The number of enrollment identities processed until now. :return: The processed_count", "processing. :param errors_count: The errors_count of this BulkCreateResponse. :type: int \"\"\" if errors_count", "value is json key in definition. \"\"\" swagger_types = { 'account_id': 'str', 'completed_at':", "device purchased from the open market (OEM dealer) or a device transferred from", "errors_report_file(self): \"\"\" Gets the errors_report_file of this BulkCreateResponse. :return: The errors_report_file of this", "if account_id is None: raise ValueError(\"Invalid value for `account_id`, must not be `None`\")", "errors_report_file @property def etag(self): \"\"\" Gets the etag of this BulkCreateResponse. etag :return:", "processed_count(self): \"\"\" Gets the processed_count of this BulkCreateResponse. The number of enrollment identities", "if processed_count is None: raise ValueError(\"Invalid value for `processed_count`, must not be `None`\")", "total_count: The total_count of this BulkCreateResponse. :type: int \"\"\" if total_count is None:", "_ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map(", ":return: The status of this BulkCreateResponse. :rtype: str \"\"\" return self._status @status.setter def", ":type: int \"\"\" if errors_count is None: raise ValueError(\"Invalid value for `errors_count`, must", "this BulkCreateResponse. The number of enrollment identities with failed processing. :param errors_count: The", "= {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value,", "a model defined in Swagger \"\"\" self._account_id = account_id self._completed_at = completed_at self._created_at", "purchased from the open market (OEM dealer) or a device transferred from an", ":type: str \"\"\" self._errors_report_file = errors_report_file @property def etag(self): \"\"\" Gets the etag", "is still in progress, the state is shown as 'processing'. When the request", "must not be `None`\") self._account_id = account_id @property def completed_at(self): \"\"\" Gets the", "\"\"\" Sets the completed_at of this BulkCreateResponse. The time of completing the bulk", "json key in definition. \"\"\" swagger_types = { 'account_id': 'str', 'completed_at': 'datetime', 'created_at':", "must not be `None`\") self._created_at = created_at @property def errors_count(self): \"\"\" Gets the", "The time of receiving the bulk creation task. :param created_at: The created_at of", "self.discriminator = None @property def account_id(self): \"\"\" Gets the account_id of this BulkCreateResponse.", "account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None): \"\"\"", "enrollment identities found in the input CSV. :param total_count: The total_count of this", "Sets the id of this BulkCreateResponse. Bulk ID :param id: The id of", "\"\"\" Sets the created_at of this BulkCreateResponse. The time of receiving the bulk", "'str', 'id': 'str', 'object': 'str', 'processed_count': 'int', 'status': 'str', 'total_count': 'int' } attribute_map", "`print` and `pprint` \"\"\" return self.to_str() def __eq__(self, other): \"\"\" Returns true if", "self._full_report_file = full_report_file self._id = id self._object = object self._processed_count = processed_count self._status", ":return: The etag of this BulkCreateResponse. :rtype: str \"\"\" return self._etag @etag.setter def", "The completed_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._completed_at @completed_at.setter def completed_at(self,", "The number of enrollment identities processed until now. :param processed_count: The processed_count of", "raise ValueError(\"Invalid value for `object`, must not be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if", "account to another. More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version:", "until now. :param processed_count: The processed_count of this BulkCreateResponse. :type: int \"\"\" if", "'status': 'str', 'total_count': 'int' } attribute_map = { 'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at':", "enrollment identities with failed processing. :param errors_count: The errors_count of this BulkCreateResponse. :type:", "Sets the total_count of this BulkCreateResponse. Total number of enrollment identities found in", "task. :param created_at: The created_at of this BulkCreateResponse. :type: datetime \"\"\" if created_at", "\"\"\" return self._id @id.setter def id(self, id): \"\"\" Sets the id of this", "value for `object`, must not be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if object not", ":type: str \"\"\" if account_id is None: raise ValueError(\"Invalid value for `account_id`, must", "value for `total_count`, must not be `None`\") self._total_count = total_count def to_dict(self): \"\"\"", "be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`\") self._id = id @property def", "program. Do not edit the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The", "__eq__(self, other): \"\"\" Returns true if both objects are equal \"\"\" if not", "@property def etag(self): \"\"\" Gets the etag of this BulkCreateResponse. etag :return: The", "= [\"new\", \"processing\", \"completed\"] if status not in allowed_values: raise ValueError( \"Invalid value", "of this BulkCreateResponse. :rtype: datetime \"\"\" return self._completed_at @completed_at.setter def completed_at(self, completed_at): \"\"\"", "= full_report_file @property def id(self): \"\"\" Gets the id of this BulkCreateResponse. Bulk", "`id`, must not be `None`\") if id is not None and not re.search('^[A-Za-z0-9]{32}',", "The number of enrollment identities processed until now. :return: The processed_count of this", "(item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr] = value", "be one of {1}\" .format(status, allowed_values) ) self._status = status @property def total_count(self):", "'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file', 'id': 'id', 'object': 'object', 'processed_count': 'processed_count',", "total_count(self, total_count): \"\"\" Sets the total_count of this BulkCreateResponse. Total number of enrollment", "isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,", "identities processed until now. :return: The processed_count of this BulkCreateResponse. :rtype: int \"\"\"", "is attribute type. attribute_map (dict): The key is attribute name and the value", "Sets the account_id of this BulkCreateResponse. ID :param account_id: The account_id of this", "`None`\") self._created_at = created_at @property def errors_count(self): \"\"\" Gets the errors_count of this", "errors_report_file of this BulkCreateResponse. :param errors_report_file: The errors_report_file of this BulkCreateResponse. :type: str", "def status(self, status): \"\"\" Sets the status of this BulkCreateResponse. The state of", "BulkCreateResponse. Bulk ID :return: The id of this BulkCreateResponse. :rtype: str \"\"\" return", "'errors_report_file': 'str', 'etag': 'str', 'full_report_file': 'str', 'id': 'str', 'object': 'str', 'processed_count': 'int', 'status':", "swagger_types = { 'account_id': 'str', 'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file': 'str',", "allowed_values: raise ValueError( \"Invalid value for `object` ({0}), must be one of {1}\"", "\"\"\" if etag is None: raise ValueError(\"Invalid value for `etag`, must not be", "return self._created_at @created_at.setter def created_at(self, created_at): \"\"\" Sets the created_at of this BulkCreateResponse.", "= status self._total_count = total_count self.discriminator = None @property def account_id(self): \"\"\" Gets", "def processed_count(self): \"\"\" Gets the processed_count of this BulkCreateResponse. The number of enrollment", "'etag': 'etag', 'full_report_file': 'full_report_file', 'id': 'id', 'object': 'object', 'processed_count': 'processed_count', 'status': 'status', 'total_count':", "`None`\") if etag is not None and not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value", "datetime \"\"\" return self._created_at @created_at.setter def created_at(self, created_at): \"\"\" Sets the created_at of", "of this BulkCreateResponse. :return: The errors_report_file of this BulkCreateResponse. :rtype: str \"\"\" return", "of this BulkCreateResponse. :rtype: str \"\"\" return self._etag @etag.setter def etag(self, etag): \"\"\"", "def full_report_file(self, full_report_file): \"\"\" Sets the full_report_file of this BulkCreateResponse. :param full_report_file: The", "BulkCreateResponse. The time of completing the bulk creation task. :param completed_at: The completed_at", "completed_at of this BulkCreateResponse. The time of completing the bulk creation task. :param", "{1}\" .format(object, allowed_values) ) self._object = object @property def processed_count(self): \"\"\" Gets the", "account_id @property def completed_at(self): \"\"\" Gets the completed_at of this BulkCreateResponse. The time", "this BulkCreateResponse. The time of receiving the bulk creation task. :return: The created_at", "The processed_count of this BulkCreateResponse. :rtype: int \"\"\" return self._processed_count @processed_count.setter def processed_count(self,", "of this BulkCreateResponse. Bulk ID :return: The id of this BulkCreateResponse. :rtype: str", "The etag of this BulkCreateResponse. :rtype: str \"\"\" return self._etag @etag.setter def etag(self,", "= { 'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag':", "this BulkCreateResponse. :rtype: str \"\"\" return self._object @object.setter def object(self, object): \"\"\" Sets", "etag of this BulkCreateResponse. etag :return: The etag of this BulkCreateResponse. :rtype: str", "identities processed until now. :param processed_count: The processed_count of this BulkCreateResponse. :type: int", "result[attr] = value return result def to_str(self): \"\"\" Returns the string representation of", "of this BulkCreateResponse. :type: int \"\"\" if total_count is None: raise ValueError(\"Invalid value", "six import iteritems import re class BulkCreateResponse(object): \"\"\" NOTE: This class is auto", "result def to_str(self): \"\"\" Returns the string representation of the model \"\"\" return", "The key is attribute name and the value is attribute type. attribute_map (dict):", "Sets the status of this BulkCreateResponse. The state of the process is 'new'", "BulkCreateResponse - a model defined in Swagger \"\"\" self._account_id = account_id self._completed_at =", "object(self): \"\"\" Gets the object of this BulkCreateResponse. :return: The object of this", "int \"\"\" if errors_count is None: raise ValueError(\"Invalid value for `errors_count`, must not", "this BulkCreateResponse. :rtype: str \"\"\" return self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\" Sets", "this BulkCreateResponse. :type: str \"\"\" if id is None: raise ValueError(\"Invalid value for", "Sets the created_at of this BulkCreateResponse. The time of receiving the bulk creation", "raise ValueError(\"Invalid value for `total_count`, must not be `None`\") self._total_count = total_count def", "identities found in the input CSV. :return: The total_count of this BulkCreateResponse. :rtype:", "BulkCreateResponse. :return: The full_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._full_report_file @full_report_file.setter", "\"\"\" if errors_count is None: raise ValueError(\"Invalid value for `errors_count`, must not be", "this BulkCreateResponse. :type: str \"\"\" if account_id is None: raise ValueError(\"Invalid value for", "in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda", "be a device purchased from the open market (OEM dealer) or a device", "= etag self._full_report_file = full_report_file self._id = id self._object = object self._processed_count =", "@property def errors_report_file(self): \"\"\" Gets the errors_report_file of this BulkCreateResponse. :return: The errors_report_file", "'status', 'total_count': 'total_count' } def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None,", "value for `status`, must not be `None`\") allowed_values = [\"new\", \"processing\", \"completed\"] if", "\"\"\" return self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\" Sets the errors_report_file of this", "in allowed_values: raise ValueError( \"Invalid value for `object` ({0}), must be one of", "\"\"\" Gets the errors_report_file of this BulkCreateResponse. :return: The errors_report_file of this BulkCreateResponse.", "self._errors_count = errors_count @property def errors_report_file(self): \"\"\" Gets the errors_report_file of this BulkCreateResponse.", "completed_at self._created_at = created_at self._errors_count = errors_count self._errors_report_file = errors_report_file self._etag = etag", "identities with failed processing. :return: The errors_count of this BulkCreateResponse. :rtype: int \"\"\"", ":type: int \"\"\" if processed_count is None: raise ValueError(\"Invalid value for `processed_count`, must", "of this BulkCreateResponse. :type: str \"\"\" if object is None: raise ValueError(\"Invalid value", "value is attribute type. attribute_map (dict): The key is attribute name and the", "'status': 'status', 'total_count': 'total_count' } def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None,", "@property def id(self): \"\"\" Gets the id of this BulkCreateResponse. Bulk ID :return:", "BulkCreateResponse. The number of enrollment identities with failed processing. :return: The errors_count of", "= etag @property def full_report_file(self): \"\"\" Gets the full_report_file of this BulkCreateResponse. :return:", ":return: The processed_count of this BulkCreateResponse. :rtype: int \"\"\" return self._processed_count @processed_count.setter def", ") self._status = status @property def total_count(self): \"\"\" Gets the total_count of this", "API Mbed Cloud Connect Enrollment Service allows users to claim the ownership of", "processed, the state changes to 'completed'. :param status: The status of this BulkCreateResponse.", "this BulkCreateResponse. :type: str \"\"\" if etag is None: raise ValueError(\"Invalid value for", "\"\"\" return self._total_count @total_count.setter def total_count(self, total_count): \"\"\" Sets the total_count of this", "this BulkCreateResponse. ID :return: The account_id of this BulkCreateResponse. :rtype: str \"\"\" return", "`object` ({0}), must be one of {1}\" .format(object, allowed_values) ) self._object = object", "errors_count): \"\"\" Sets the errors_count of this BulkCreateResponse. The number of enrollment identities", "result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if", "creation task. :param created_at: The created_at of this BulkCreateResponse. :type: datetime \"\"\" if", "document. OpenAPI spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import pformat", "the completed_at of this BulkCreateResponse. The time of completing the bulk creation task.", "etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse - a model defined", "the account_id of this BulkCreateResponse. ID :param account_id: The account_id of this BulkCreateResponse.", ":rtype: int \"\"\" return self._processed_count @processed_count.setter def processed_count(self, processed_count): \"\"\" Sets the processed_count", "Gets the etag of this BulkCreateResponse. etag :return: The etag of this BulkCreateResponse.", "of completing the bulk creation task. :return: The completed_at of this BulkCreateResponse. :rtype:", "status='new', total_count=None): \"\"\" BulkCreateResponse - a model defined in Swagger \"\"\" self._account_id =", "str \"\"\" return self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\" Sets the errors_report_file of", "key is attribute name and the value is json key in definition. \"\"\"", "str \"\"\" return self._account_id @account_id.setter def account_id(self, account_id): \"\"\" Sets the account_id of", "object: The object of this BulkCreateResponse. :type: str \"\"\" if object is None:", "account_id of this BulkCreateResponse. ID :param account_id: The account_id of this BulkCreateResponse. :type:", "state of the process is 'new' at the time of creation. If the", "etag is not None and not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value for `etag`,", "\"\"\" Sets the total_count of this BulkCreateResponse. Total number of enrollment identities found", "else item, value.items() )) else: result[attr] = value return result def to_str(self): \"\"\"", "of this BulkCreateResponse. :type: str \"\"\" if id is None: raise ValueError(\"Invalid value", "of this BulkCreateResponse. :param object: The object of this BulkCreateResponse. :type: str \"\"\"", "errors_report_file): \"\"\" Sets the errors_report_file of this BulkCreateResponse. :param errors_report_file: The errors_report_file of", "errors_count is None: raise ValueError(\"Invalid value for `errors_count`, must not be `None`\") self._errors_count", "(OEM dealer) or a device transferred from an account to another. More information", "open market (OEM dealer) or a device transferred from an account to another.", "the state changes to 'completed'. :return: The status of this BulkCreateResponse. :rtype: str", "import iteritems import re class BulkCreateResponse(object): \"\"\" NOTE: This class is auto generated", "ValueError(\"Invalid value for `created_at`, must not be `None`\") self._created_at = created_at @property def", "The object of this BulkCreateResponse. :type: str \"\"\" if object is None: raise", "status @property def total_count(self): \"\"\" Gets the total_count of this BulkCreateResponse. Total number", "self._status = status @property def total_count(self): \"\"\" Gets the total_count of this BulkCreateResponse.", "str \"\"\" self._full_report_file = full_report_file @property def id(self): \"\"\" Gets the id of", "`/^[A-Za-z0-9]{32}/`\") self._id = id @property def object(self): \"\"\" Gets the object of this", "this BulkCreateResponse. :return: The object of this BulkCreateResponse. :rtype: str \"\"\" return self._object", "self._id = id self._object = object self._processed_count = processed_count self._status = status self._total_count", "this BulkCreateResponse. :param full_report_file: The full_report_file of this BulkCreateResponse. :type: str \"\"\" self._full_report_file", "in the input CSV. :return: The total_count of this BulkCreateResponse. :rtype: int \"\"\"", "users to claim the ownership of a device which is not yet assigned", "this BulkCreateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self, status): \"\"\" Sets", "errors_report_file of this BulkCreateResponse. :type: str \"\"\" self._errors_report_file = errors_report_file @property def etag(self):", "= errors_report_file self._etag = etag self._full_report_file = full_report_file self._id = id self._object =", "The account_id of this BulkCreateResponse. :type: str \"\"\" if account_id is None: raise", "of a device which is not yet assigned to an account. A device", "total_count): \"\"\" Sets the total_count of this BulkCreateResponse. Total number of enrollment identities", "must not be `None`\") allowed_values = [\"new\", \"processing\", \"completed\"] if status not in", "def created_at(self, created_at): \"\"\" Sets the created_at of this BulkCreateResponse. The time of", "None: raise ValueError(\"Invalid value for `total_count`, must not be `None`\") self._total_count = total_count", "processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse - a model defined in Swagger \"\"\" self._account_id", "\"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0],", "of this BulkCreateResponse. :param errors_report_file: The errors_report_file of this BulkCreateResponse. :type: str \"\"\"", "full_report_file @property def id(self): \"\"\" Gets the id of this BulkCreateResponse. Bulk ID", "etag(self, etag): \"\"\" Sets the etag of this BulkCreateResponse. etag :param etag: The", "`None`\") allowed_values = [\"new\", \"processing\", \"completed\"] if status not in allowed_values: raise ValueError(", "a device purchased from the open market (OEM dealer) or a device transferred", "= value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if", "creation task. :param completed_at: The completed_at of this BulkCreateResponse. :type: datetime \"\"\" self._completed_at", "The state of the process is 'new' at the time of creation. If", "str \"\"\" return self._status @status.setter def status(self, status): \"\"\" Sets the status of", "request has been fully processed, the state changes to 'completed'. :param status: The", "the model \"\"\" return pformat(self.to_dict()) def __repr__(self): \"\"\" For `print` and `pprint` \"\"\"", "\"\"\" return self._account_id @account_id.setter def account_id(self, account_id): \"\"\" Sets the account_id of this", "\"\"\" Sets the errors_report_file of this BulkCreateResponse. :param errors_report_file: The errors_report_file of this", "the input CSV. :return: The total_count of this BulkCreateResponse. :rtype: int \"\"\" return", "be one of {1}\" .format(object, allowed_values) ) self._object = object @property def processed_count(self):", "\"\"\" return self._status @status.setter def status(self, status): \"\"\" Sets the status of this", "total_count def to_dict(self): \"\"\" Returns the model properties as a dict \"\"\" result", "str \"\"\" return self._object @object.setter def object(self, object): \"\"\" Sets the object of", ":rtype: str \"\"\" return self._account_id @account_id.setter def account_id(self, account_id): \"\"\" Sets the account_id", "for `created_at`, must not be `None`\") self._created_at = created_at @property def errors_count(self): \"\"\"", "of this BulkCreateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self, status): \"\"\"", "processing. :return: The errors_count of this BulkCreateResponse. :rtype: int \"\"\" return self._errors_count @errors_count.setter", "object is None: raise ValueError(\"Invalid value for `object`, must not be `None`\") allowed_values", "@property def created_at(self): \"\"\" Gets the created_at of this BulkCreateResponse. The time of", "When the request has been fully processed, the state changes to 'completed'. :return:", "= dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() ))", "id @property def object(self): \"\"\" Gets the object of this BulkCreateResponse. :return: The", "this BulkCreateResponse. :rtype: int \"\"\" return self._processed_count @processed_count.setter def processed_count(self, processed_count): \"\"\" Sets", ":return: The created_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._created_at @created_at.setter def", "be `None`\") if etag is not None and not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid", "value.items() )) else: result[attr] = value return result def to_str(self): \"\"\" Returns the", "has been fully processed, the state changes to 'completed'. :return: The status of", "number of enrollment identities found in the input CSV. :param total_count: The total_count", "etag self._full_report_file = full_report_file self._id = id self._object = object self._processed_count = processed_count", "status of this BulkCreateResponse. :rtype: str \"\"\" return self._status @status.setter def status(self, status):", "definition. \"\"\" swagger_types = { 'account_id': 'str', 'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count': 'int',", "not be `None`\") self._created_at = created_at @property def errors_count(self): \"\"\" Gets the errors_count", "string representation of the model \"\"\" return pformat(self.to_dict()) def __repr__(self): \"\"\" For `print`", "return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\" Returns true if both objects", "ValueError(\"Invalid value for `total_count`, must not be `None`\") self._total_count = total_count def to_dict(self):", "\"\"\" Gets the errors_count of this BulkCreateResponse. The number of enrollment identities with", "errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse - a model", "'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file', 'id': 'id', 'object': 'object', 'processed_count':", "an account to another. More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec", "this BulkCreateResponse. :rtype: str \"\"\" return self._account_id @account_id.setter def account_id(self, account_id): \"\"\" Sets", "self._etag = etag @property def full_report_file(self): \"\"\" Gets the full_report_file of this BulkCreateResponse.", "@property def account_id(self): \"\"\" Gets the account_id of this BulkCreateResponse. ID :return: The", "etag is None: raise ValueError(\"Invalid value for `etag`, must not be `None`\") if", "for `etag`, must be a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag =", "x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr]", "and the value is attribute type. attribute_map (dict): The key is attribute name", "BulkCreateResponse. :type: int \"\"\" if processed_count is None: raise ValueError(\"Invalid value for `processed_count`,", "= errors_report_file @property def etag(self): \"\"\" Gets the etag of this BulkCreateResponse. etag", "if etag is None: raise ValueError(\"Invalid value for `etag`, must not be `None`\")", "ValueError(\"Invalid value for `processed_count`, must not be `None`\") self._processed_count = processed_count @property def", "ValueError( \"Invalid value for `status` ({0}), must be one of {1}\" .format(status, allowed_values)", "created_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._created_at @created_at.setter def created_at(self, created_at):", "str \"\"\" return self._etag @etag.setter def etag(self, etag): \"\"\" Sets the etag of", "if status not in allowed_values: raise ValueError( \"Invalid value for `status` ({0}), must", "task. :param completed_at: The completed_at of this BulkCreateResponse. :type: datetime \"\"\" self._completed_at =", "@id.setter def id(self, id): \"\"\" Sets the id of this BulkCreateResponse. Bulk ID", "in allowed_values: raise ValueError( \"Invalid value for `status` ({0}), must be one of", ":return: The errors_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._errors_report_file @errors_report_file.setter def", "item, value.items() )) else: result[attr] = value return result def to_str(self): \"\"\" Returns", "self._id = id @property def object(self): \"\"\" Gets the object of this BulkCreateResponse.", "\"\"\" For `print` and `pprint` \"\"\" return self.to_str() def __eq__(self, other): \"\"\" Returns", "\"\"\" return self._errors_count @errors_count.setter def errors_count(self, errors_count): \"\"\" Sets the errors_count of this", "\"Invalid value for `object` ({0}), must be one of {1}\" .format(object, allowed_values) )", "item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr] =", "id is None: raise ValueError(\"Invalid value for `id`, must not be `None`\") if", "item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr] = value return", "\"\"\" Gets the object of this BulkCreateResponse. :return: The object of this BulkCreateResponse.", "BulkCreateResponse. The time of completing the bulk creation task. :return: The completed_at of", "\"\"\" if created_at is None: raise ValueError(\"Invalid value for `created_at`, must not be", "total_count self.discriminator = None @property def account_id(self): \"\"\" Gets the account_id of this", "BulkCreateResponse. :type: str \"\"\" if account_id is None: raise ValueError(\"Invalid value for `account_id`,", "'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file', 'id': 'id', 'object':", "coding: utf-8 \"\"\" Enrollment API Mbed Cloud Connect Enrollment Service allows users to", "this BulkCreateResponse. :rtype: str \"\"\" return self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file): \"\"\" Sets", "\"\"\" BulkCreateResponse - a model defined in Swagger \"\"\" self._account_id = account_id self._completed_at", "is not yet assigned to an account. A device without an assigned account", "etag :param etag: The etag of this BulkCreateResponse. :type: str \"\"\" if etag", "result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict())", "been fully processed, the state changes to 'completed'. :return: The status of this", "The total_count of this BulkCreateResponse. :rtype: int \"\"\" return self._total_count @total_count.setter def total_count(self,", "is None: raise ValueError(\"Invalid value for `etag`, must not be `None`\") if etag", "BulkCreateResponse. etag :return: The etag of this BulkCreateResponse. :rtype: str \"\"\" return self._etag", "`created_at`, must not be `None`\") self._created_at = created_at @property def errors_count(self): \"\"\" Gets", "processed_count of this BulkCreateResponse. :rtype: int \"\"\" return self._processed_count @processed_count.setter def processed_count(self, processed_count):", ":type: str \"\"\" if status is None: raise ValueError(\"Invalid value for `status`, must", "self._processed_count @processed_count.setter def processed_count(self, processed_count): \"\"\" Sets the processed_count of this BulkCreateResponse. The", "attribute_map (dict): The key is attribute name and the value is json key", "type. attribute_map (dict): The key is attribute name and the value is json", "Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint import pformat from six import iteritems import", "if total_count is None: raise ValueError(\"Invalid value for `total_count`, must not be `None`\")", "\"\"\" Sets the object of this BulkCreateResponse. :param object: The object of this", "is None: raise ValueError(\"Invalid value for `created_at`, must not be `None`\") self._created_at =", "be `None`\") if id is not None and not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid", "= account_id @property def completed_at(self): \"\"\" Gets the completed_at of this BulkCreateResponse. The", "id=None, object=None, processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse - a model defined in Swagger", "BulkCreateResponse. The number of enrollment identities processed until now. :return: The processed_count of", "of this BulkCreateResponse. :return: The full_report_file of this BulkCreateResponse. :rtype: str \"\"\" return", "full_report_file of this BulkCreateResponse. :param full_report_file: The full_report_file of this BulkCreateResponse. :type: str", "the request has been fully processed, the state changes to 'completed'. :param status:", "\"\"\" Returns true if both objects are equal \"\"\" if not isinstance(other, BulkCreateResponse):", "def errors_count(self): \"\"\" Gets the errors_count of this BulkCreateResponse. The number of enrollment", ":rtype: str \"\"\" return self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\" Sets the errors_report_file", ":rtype: int \"\"\" return self._total_count @total_count.setter def total_count(self, total_count): \"\"\" Sets the total_count", "equal to `/^[A-Za-z0-9]{32}/`\") self._id = id @property def object(self): \"\"\" Gets the object", "full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse - a model defined in", "raise ValueError(\"Invalid value for `account_id`, must not be `None`\") self._account_id = account_id @property", "a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property def full_report_file(self):", ":return: The id of this BulkCreateResponse. :rtype: str \"\"\" return self._id @id.setter def", "'id': 'str', 'object': 'str', 'processed_count': 'int', 'status': 'str', 'total_count': 'int' } attribute_map =", "'processed_count': 'int', 'status': 'str', 'total_count': 'int' } attribute_map = { 'account_id': 'account_id', 'completed_at':", "raise ValueError(\"Invalid value for `processed_count`, must not be `None`\") self._processed_count = processed_count @property", "of this BulkCreateResponse. :type: datetime \"\"\" if created_at is None: raise ValueError(\"Invalid value", "The errors_count of this BulkCreateResponse. :type: int \"\"\" if errors_count is None: raise", "The status of this BulkCreateResponse. :type: str \"\"\" if status is None: raise", "Connect Enrollment Service allows users to claim the ownership of a device which", "`None`\") self._errors_count = errors_count @property def errors_report_file(self): \"\"\" Gets the errors_report_file of this", "return self._status @status.setter def status(self, status): \"\"\" Sets the status of this BulkCreateResponse.", "processed until now. :param processed_count: The processed_count of this BulkCreateResponse. :type: int \"\"\"", "@created_at.setter def created_at(self, created_at): \"\"\" Sets the created_at of this BulkCreateResponse. The time", "\"\"\" Gets the processed_count of this BulkCreateResponse. The number of enrollment identities processed", "raise ValueError(\"Invalid value for `etag`, must be a follow pattern or equal to", "The object of this BulkCreateResponse. :rtype: str \"\"\" return self._object @object.setter def object(self,", "time of receiving the bulk creation task. :return: The created_at of this BulkCreateResponse.", "This class is auto generated by the swagger code generator program. Do not", "is not None and not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value for `etag`, must", "model defined in Swagger \"\"\" self._account_id = account_id self._completed_at = completed_at self._created_at =", "the process is 'new' at the time of creation. If the creation is", "\"\"\" Returns true if both objects are not equal \"\"\" return not self", "the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key is attribute name", "self._created_at = created_at self._errors_count = errors_count self._errors_report_file = errors_report_file self._etag = etag self._full_report_file", "of creation. If the creation is still in progress, the state is shown", "\"\"\" Sets the errors_count of this BulkCreateResponse. The number of enrollment identities with", "re class BulkCreateResponse(object): \"\"\" NOTE: This class is auto generated by the swagger", "bulk creation task. :param created_at: The created_at of this BulkCreateResponse. :type: datetime \"\"\"", "re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value for `etag`, must be a follow pattern or", "of this BulkCreateResponse. :type: datetime \"\"\" self._completed_at = completed_at @property def created_at(self): \"\"\"", "class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key is attribute name and", "{1}\" .format(status, allowed_values) ) self._status = status @property def total_count(self): \"\"\" Gets the", "\"\"\" return self._created_at @created_at.setter def created_at(self, created_at): \"\"\" Sets the created_at of this", "found in the input CSV. :return: The total_count of this BulkCreateResponse. :rtype: int", "of this BulkCreateResponse. The number of enrollment identities with failed processing. :return: The", "\"Invalid value for `status` ({0}), must be one of {1}\" .format(status, allowed_values) )", "'id': 'id', 'object': 'object', 'processed_count': 'processed_count', 'status': 'status', 'total_count': 'total_count' } def __init__(self,", "return self._processed_count @processed_count.setter def processed_count(self, processed_count): \"\"\" Sets the processed_count of this BulkCreateResponse.", "ValueError(\"Invalid value for `account_id`, must not be `None`\") self._account_id = account_id @property def", "if object not in allowed_values: raise ValueError( \"Invalid value for `object` ({0}), must", "BulkCreateResponse. :param object: The object of this BulkCreateResponse. :type: str \"\"\" if object", ":type: str \"\"\" if object is None: raise ValueError(\"Invalid value for `object`, must", "def __ne__(self, other): \"\"\" Returns true if both objects are not equal \"\"\"", "attribute name and the value is json key in definition. \"\"\" swagger_types =", "__repr__(self): \"\"\" For `print` and `pprint` \"\"\" return self.to_str() def __eq__(self, other): \"\"\"", "ValueError(\"Invalid value for `etag`, must be a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`\")", "return self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\" Sets the errors_report_file of this BulkCreateResponse.", "self._account_id = account_id @property def completed_at(self): \"\"\" Gets the completed_at of this BulkCreateResponse.", "re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value for `id`, must be a follow pattern or", "self._account_id @account_id.setter def account_id(self, account_id): \"\"\" Sets the account_id of this BulkCreateResponse. ID", "Gets the created_at of this BulkCreateResponse. The time of receiving the bulk creation", "if errors_count is None: raise ValueError(\"Invalid value for `errors_count`, must not be `None`\")", "of this BulkCreateResponse. ID :return: The account_id of this BulkCreateResponse. :rtype: str \"\"\"", "be `None`\") self._created_at = created_at @property def errors_count(self): \"\"\" Gets the errors_count of", "[Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from", "this BulkCreateResponse. The number of enrollment identities processed until now. :param processed_count: The", "a dict \"\"\" result = {} for attr, _ in iteritems(self.swagger_types): value =", "self._full_report_file = full_report_file @property def id(self): \"\"\" Gets the id of this BulkCreateResponse.", "is None: raise ValueError(\"Invalid value for `object`, must not be `None`\") allowed_values =", "if id is None: raise ValueError(\"Invalid value for `id`, must not be `None`\")", "'account_id': 'str', 'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file': 'str', 'etag': 'str', 'full_report_file':", "now. :param processed_count: The processed_count of this BulkCreateResponse. :type: int \"\"\" if processed_count", "errors_count: The errors_count of this BulkCreateResponse. :type: int \"\"\" if errors_count is None:", "= account_id self._completed_at = completed_at self._created_at = created_at self._errors_count = errors_count self._errors_report_file =", "NOTE: This class is auto generated by the swagger code generator program. Do", "of this BulkCreateResponse. :rtype: str \"\"\" return self._object @object.setter def object(self, object): \"\"\"", "return self._errors_count @errors_count.setter def errors_count(self, errors_count): \"\"\" Sets the errors_count of this BulkCreateResponse.", "def __eq__(self, other): \"\"\" Returns true if both objects are equal \"\"\" if", "= created_at @property def errors_count(self): \"\"\" Gets the errors_count of this BulkCreateResponse. The", "iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x:", "`id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`\") self._id = id", "found in the input CSV. :param total_count: The total_count of this BulkCreateResponse. :type:", "created_at(self, created_at): \"\"\" Sets the created_at of this BulkCreateResponse. The time of receiving", "id): \"\"\" Sets the id of this BulkCreateResponse. Bulk ID :param id: The", "self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file): \"\"\" Sets the full_report_file of this BulkCreateResponse. :param", "attribute_map = { 'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file',", "is None: raise ValueError(\"Invalid value for `processed_count`, must not be `None`\") self._processed_count =", "Cloud Connect Enrollment Service allows users to claim the ownership of a device", "for `total_count`, must not be `None`\") self._total_count = total_count def to_dict(self): \"\"\" Returns", "this BulkCreateResponse. :return: The errors_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._errors_report_file", "the creation is still in progress, the state is shown as 'processing'. When", "self._created_at @created_at.setter def created_at(self, created_at): \"\"\" Sets the created_at of this BulkCreateResponse. The", "status not in allowed_values: raise ValueError( \"Invalid value for `status` ({0}), must be", "be `None`\") self._total_count = total_count def to_dict(self): \"\"\" Returns the model properties as", "int \"\"\" return self._errors_count @errors_count.setter def errors_count(self, errors_count): \"\"\" Sets the errors_count of", "id of this BulkCreateResponse. :type: str \"\"\" if id is None: raise ValueError(\"Invalid", "completed_at @property def created_at(self): \"\"\" Gets the created_at of this BulkCreateResponse. The time", "of this BulkCreateResponse. :rtype: str \"\"\" return self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\"", "\"\"\" return self._completed_at @completed_at.setter def completed_at(self, completed_at): \"\"\" Sets the completed_at of this", "`account_id`, must not be `None`\") self._account_id = account_id @property def completed_at(self): \"\"\" Gets", "None: raise ValueError(\"Invalid value for `object`, must not be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"]", "this BulkCreateResponse. :type: str \"\"\" self._full_report_file = full_report_file @property def id(self): \"\"\" Gets", "value for `id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`\") self._id", "of {1}\" .format(status, allowed_values) ) self._status = status @property def total_count(self): \"\"\" Gets", "state changes to 'completed'. :return: The status of this BulkCreateResponse. :rtype: str \"\"\"", "self._created_at = created_at @property def errors_count(self): \"\"\" Gets the errors_count of this BulkCreateResponse.", "the etag of this BulkCreateResponse. etag :param etag: The etag of this BulkCreateResponse.", "a follow pattern or equal to `/^[A-Za-z0-9]{32}/`\") self._id = id @property def object(self):", "`processed_count`, must not be `None`\") self._processed_count = processed_count @property def status(self): \"\"\" Gets", "- a model defined in Swagger \"\"\" self._account_id = account_id self._completed_at = completed_at", "Total number of enrollment identities found in the input CSV. :param total_count: The", "utf-8 \"\"\" Enrollment API Mbed Cloud Connect Enrollment Service allows users to claim", "processed_count): \"\"\" Sets the processed_count of this BulkCreateResponse. The number of enrollment identities", "total_count of this BulkCreateResponse. Total number of enrollment identities found in the input", "errors_count @property def errors_report_file(self): \"\"\" Gets the errors_report_file of this BulkCreateResponse. :return: The", "`total_count`, must not be `None`\") self._total_count = total_count def to_dict(self): \"\"\" Returns the", ":param total_count: The total_count of this BulkCreateResponse. :type: int \"\"\" if total_count is", "must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`\") self._id = id @property", "number of enrollment identities with failed processing. :return: The errors_count of this BulkCreateResponse.", "to_str(self): \"\"\" Returns the string representation of the model \"\"\" return pformat(self.to_dict()) def", "full_report_file: The full_report_file of this BulkCreateResponse. :type: str \"\"\" self._full_report_file = full_report_file @property", "or a device transferred from an account to another. More information in [Device", "this BulkCreateResponse. :type: str \"\"\" self._errors_report_file = errors_report_file @property def etag(self): \"\"\" Gets", "return self._etag @etag.setter def etag(self, etag): \"\"\" Sets the etag of this BulkCreateResponse.", "a device transferred from an account to another. More information in [Device ownership:", ":return: The total_count of this BulkCreateResponse. :rtype: int \"\"\" return self._total_count @total_count.setter def", "BulkCreateResponse. :type: datetime \"\"\" if created_at is None: raise ValueError(\"Invalid value for `created_at`,", "BulkCreateResponse. :type: datetime \"\"\" self._completed_at = completed_at @property def created_at(self): \"\"\" Gets the", "etag(self): \"\"\" Gets the etag of this BulkCreateResponse. etag :return: The etag of", "value for `errors_count`, must not be `None`\") self._errors_count = errors_count @property def errors_report_file(self):", "errors_count self._errors_report_file = errors_report_file self._etag = etag self._full_report_file = full_report_file self._id = id", "allowed_values) ) self._object = object @property def processed_count(self): \"\"\" Gets the processed_count of", "is None: raise ValueError(\"Invalid value for `id`, must not be `None`\") if id", "is not None and not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value for `id`, must", "'full_report_file': 'full_report_file', 'id': 'id', 'object': 'object', 'processed_count': 'processed_count', 'status': 'status', 'total_count': 'total_count' }", "\"to_dict\") else item, value.items() )) else: result[attr] = value return result def to_str(self):", "other): \"\"\" Returns true if both objects are not equal \"\"\" return not", "More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version: 3 Generated by:", "account can be a device purchased from the open market (OEM dealer) or", "the bulk creation task. :return: The created_at of this BulkCreateResponse. :rtype: datetime \"\"\"", "def status(self): \"\"\" Gets the status of this BulkCreateResponse. The state of the", "None and not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value for `etag`, must be a", "value for `object` ({0}), must be one of {1}\" .format(object, allowed_values) ) self._object", "errors_count(self): \"\"\" Gets the errors_count of this BulkCreateResponse. The number of enrollment identities", "'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file',", "BulkCreateResponse. ID :param account_id: The account_id of this BulkCreateResponse. :type: str \"\"\" if", "def processed_count(self, processed_count): \"\"\" Sets the processed_count of this BulkCreateResponse. The number of", "with failed processing. :param errors_count: The errors_count of this BulkCreateResponse. :type: int \"\"\"", "\"\"\" return self._etag @etag.setter def etag(self, etag): \"\"\" Sets the etag of this", "request has been fully processed, the state changes to 'completed'. :return: The status", ":param processed_count: The processed_count of this BulkCreateResponse. :type: int \"\"\" if processed_count is", "total_count of this BulkCreateResponse. :type: int \"\"\" if total_count is None: raise ValueError(\"Invalid", "for `status`, must not be `None`\") allowed_values = [\"new\", \"processing\", \"completed\"] if status", ".format(status, allowed_values) ) self._status = status @property def total_count(self): \"\"\" Gets the total_count", "processed_count: The processed_count of this BulkCreateResponse. :type: int \"\"\" if processed_count is None:", "CSV. :param total_count: The total_count of this BulkCreateResponse. :type: int \"\"\" if total_count", "= full_report_file self._id = id self._object = object self._processed_count = processed_count self._status =", "def account_id(self): \"\"\" Gets the account_id of this BulkCreateResponse. ID :return: The account_id", "The full_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._full_report_file @full_report_file.setter def full_report_file(self,", "input CSV. :param total_count: The total_count of this BulkCreateResponse. :type: int \"\"\" if", ":param created_at: The created_at of this BulkCreateResponse. :type: datetime \"\"\" if created_at is", "equal to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property def full_report_file(self): \"\"\" Gets the full_report_file", "self._object @object.setter def object(self, object): \"\"\" Sets the object of this BulkCreateResponse. :param", "has been fully processed, the state changes to 'completed'. :param status: The status", "of the process is 'new' at the time of creation. If the creation", "number of enrollment identities with failed processing. :param errors_count: The errors_count of this", "@total_count.setter def total_count(self, total_count): \"\"\" Sets the total_count of this BulkCreateResponse. Total number", "follow pattern or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property def full_report_file(self): \"\"\"", "def etag(self, etag): \"\"\" Sets the etag of this BulkCreateResponse. etag :param etag:", "account_id is None: raise ValueError(\"Invalid value for `account_id`, must not be `None`\") self._account_id", "raise ValueError(\"Invalid value for `etag`, must not be `None`\") if etag is not", ":rtype: datetime \"\"\" return self._completed_at @completed_at.setter def completed_at(self, completed_at): \"\"\" Sets the completed_at", "raise ValueError(\"Invalid value for `id`, must not be `None`\") if id is not", "enrollment identities processed until now. :param processed_count: The processed_count of this BulkCreateResponse. :type:", "for `id`, must not be `None`\") if id is not None and not", "status: The status of this BulkCreateResponse. :type: str \"\"\" if status is None:", "int \"\"\" if total_count is None: raise ValueError(\"Invalid value for `total_count`, must not", "def to_str(self): \"\"\" Returns the string representation of the model \"\"\" return pformat(self.to_dict())", "created_at of this BulkCreateResponse. The time of receiving the bulk creation task. :return:", "\"\"\" Sets the status of this BulkCreateResponse. The state of the process is", "pformat from six import iteritems import re class BulkCreateResponse(object): \"\"\" NOTE: This class", "and not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value for `etag`, must be a follow", "etag :return: The etag of this BulkCreateResponse. :rtype: str \"\"\" return self._etag @etag.setter", "BulkCreateResponse. :rtype: int \"\"\" return self._errors_count @errors_count.setter def errors_count(self, errors_count): \"\"\" Sets the", "list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value", "account_id(self, account_id): \"\"\" Sets the account_id of this BulkCreateResponse. ID :param account_id: The", "can be a device purchased from the open market (OEM dealer) or a", "the value is json key in definition. \"\"\" swagger_types = { 'account_id': 'str',", "return self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file): \"\"\" Sets the full_report_file of this BulkCreateResponse.", "The time of receiving the bulk creation task. :return: The created_at of this", "attribute name and the value is attribute type. attribute_map (dict): The key is", "self._errors_report_file = errors_report_file @property def etag(self): \"\"\" Gets the etag of this BulkCreateResponse.", "true if both objects are equal \"\"\" if not isinstance(other, BulkCreateResponse): return False", "completed_at: The completed_at of this BulkCreateResponse. :type: datetime \"\"\" self._completed_at = completed_at @property", "BulkCreateResponse. :rtype: int \"\"\" return self._total_count @total_count.setter def total_count(self, total_count): \"\"\" Sets the", "'total_count': 'total_count' } def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None,", "Sets the processed_count of this BulkCreateResponse. The number of enrollment identities processed until", "Gets the completed_at of this BulkCreateResponse. The time of completing the bulk creation", "another. More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version: 3 Generated", "the full_report_file of this BulkCreateResponse. :param full_report_file: The full_report_file of this BulkCreateResponse. :type:", "of this BulkCreateResponse. The state of the process is 'new' at the time", "`errors_count`, must not be `None`\") self._errors_count = errors_count @property def errors_report_file(self): \"\"\" Gets", "and the value is json key in definition. \"\"\" swagger_types = { 'account_id':", "__init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None):", "total_count=None): \"\"\" BulkCreateResponse - a model defined in Swagger \"\"\" self._account_id = account_id", "not None and not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value for `etag`, must be", "None: raise ValueError(\"Invalid value for `processed_count`, must not be `None`\") self._processed_count = processed_count", "now. :return: The processed_count of this BulkCreateResponse. :rtype: int \"\"\" return self._processed_count @processed_count.setter", "must not be `None`\") self._total_count = total_count def to_dict(self): \"\"\" Returns the model", "Gets the errors_count of this BulkCreateResponse. The number of enrollment identities with failed", "hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr] = value return result def", "processed_count of this BulkCreateResponse. The number of enrollment identities processed until now. :return:", "= errors_count @property def errors_report_file(self): \"\"\" Gets the errors_report_file of this BulkCreateResponse. :return:", "ValueError(\"Invalid value for `object`, must not be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if object", "lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"):", ":param account_id: The account_id of this BulkCreateResponse. :type: str \"\"\" if account_id is", "@object.setter def object(self, object): \"\"\" Sets the object of this BulkCreateResponse. :param object:", "dict \"\"\" result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self,", ":return: The full_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._full_report_file @full_report_file.setter def", "datetime \"\"\" self._completed_at = completed_at @property def created_at(self): \"\"\" Gets the created_at of", "from an account to another. More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI", "this BulkCreateResponse. :rtype: str \"\"\" return self._etag @etag.setter def etag(self, etag): \"\"\" Sets", "time of receiving the bulk creation task. :param created_at: The created_at of this", "self._total_count @total_count.setter def total_count(self, total_count): \"\"\" Sets the total_count of this BulkCreateResponse. Total", "BulkCreateResponse. Total number of enrollment identities found in the input CSV. :return: The", "Returns the string representation of the model \"\"\" return pformat(self.to_dict()) def __repr__(self): \"\"\"", "of this BulkCreateResponse. :type: int \"\"\" if processed_count is None: raise ValueError(\"Invalid value", "of this BulkCreateResponse. Total number of enrollment identities found in the input CSV.", "to an account. A device without an assigned account can be a device", "ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\" from pprint", "({0}), must be one of {1}\" .format(status, allowed_values) ) self._status = status @property", "errors_count(self, errors_count): \"\"\" Sets the errors_count of this BulkCreateResponse. The number of enrollment", "for `id`, must be a follow pattern or equal to `/^[A-Za-z0-9]{32}/`\") self._id =", "__ne__(self, other): \"\"\" Returns true if both objects are not equal \"\"\" return", "Gets the total_count of this BulkCreateResponse. Total number of enrollment identities found in", "Gets the status of this BulkCreateResponse. The state of the process is 'new'", "allows users to claim the ownership of a device which is not yet", "must be one of {1}\" .format(status, allowed_values) ) self._status = status @property def", "Returns true if both objects are not equal \"\"\" return not self ==", "this BulkCreateResponse. :type: str \"\"\" if status is None: raise ValueError(\"Invalid value for", "creation. If the creation is still in progress, the state is shown as", "BulkCreateResponse. :param errors_report_file: The errors_report_file of this BulkCreateResponse. :type: str \"\"\" self._errors_report_file =", "the model properties as a dict \"\"\" result = {} for attr, _", "elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda", "this BulkCreateResponse. Total number of enrollment identities found in the input CSV. :return:", "value for `status` ({0}), must be one of {1}\" .format(status, allowed_values) ) self._status", ":return: The completed_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._completed_at @completed_at.setter def", "def __repr__(self): \"\"\" For `print` and `pprint` \"\"\" return self.to_str() def __eq__(self, other):", "result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value ))", ":param errors_report_file: The errors_report_file of this BulkCreateResponse. :type: str \"\"\" self._errors_report_file = errors_report_file", "self._processed_count = processed_count @property def status(self): \"\"\" Gets the status of this BulkCreateResponse.", "\"\"\" Gets the created_at of this BulkCreateResponse. The time of receiving the bulk", "Gets the errors_report_file of this BulkCreateResponse. :return: The errors_report_file of this BulkCreateResponse. :rtype:", "self._completed_at = completed_at self._created_at = created_at self._errors_count = errors_count self._errors_report_file = errors_report_file self._etag", "\"\"\" self._errors_report_file = errors_report_file @property def etag(self): \"\"\" Gets the etag of this", "\"\"\" self._account_id = account_id self._completed_at = completed_at self._created_at = created_at self._errors_count = errors_count", "of this BulkCreateResponse. The time of receiving the bulk creation task. :return: The", "Sets the full_report_file of this BulkCreateResponse. :param full_report_file: The full_report_file of this BulkCreateResponse.", "bulk creation task. :return: The completed_at of this BulkCreateResponse. :rtype: datetime \"\"\" return", "\"\"\" Gets the id of this BulkCreateResponse. Bulk ID :return: The id of", "is None: raise ValueError(\"Invalid value for `errors_count`, must not be `None`\") self._errors_count =", "[\"enrollment-identity-bulk-uploads\"] if object not in allowed_values: raise ValueError( \"Invalid value for `object` ({0}),", "lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else: result[attr]", "'str', 'total_count': 'int' } attribute_map = { 'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at',", "self._completed_at = completed_at @property def created_at(self): \"\"\" Gets the created_at of this BulkCreateResponse.", "or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property def full_report_file(self): \"\"\" Gets the", "None and not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value for `id`, must be a", "self._status = status self._total_count = total_count self.discriminator = None @property def account_id(self): \"\"\"", "value for `etag`, must not be `None`\") if etag is not None and", "this BulkCreateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self, id): \"\"\" Sets", "'str', 'processed_count': 'int', 'status': 'str', 'total_count': 'int' } attribute_map = { 'account_id': 'account_id',", "'id', 'object': 'object', 'processed_count': 'processed_count', 'status': 'status', 'total_count': 'total_count' } def __init__(self, account_id=None,", "{ 'account_id': 'str', 'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file': 'str', 'etag': 'str',", "device which is not yet assigned to an account. A device without an", "until now. :return: The processed_count of this BulkCreateResponse. :rtype: int \"\"\" return self._processed_count", "def completed_at(self): \"\"\" Gets the completed_at of this BulkCreateResponse. The time of completing", "The full_report_file of this BulkCreateResponse. :type: str \"\"\" self._full_report_file = full_report_file @property def", "of this BulkCreateResponse. :type: str \"\"\" if status is None: raise ValueError(\"Invalid value", ":type: str \"\"\" self._full_report_file = full_report_file @property def id(self): \"\"\" Gets the id", "\"\"\" Sets the account_id of this BulkCreateResponse. ID :param account_id: The account_id of", "= None @property def account_id(self): \"\"\" Gets the account_id of this BulkCreateResponse. ID", "state changes to 'completed'. :param status: The status of this BulkCreateResponse. :type: str", "the full_report_file of this BulkCreateResponse. :return: The full_report_file of this BulkCreateResponse. :rtype: str", "CSV. :return: The total_count of this BulkCreateResponse. :rtype: int \"\"\" return self._total_count @total_count.setter", "BulkCreateResponse. The state of the process is 'new' at the time of creation.", "not be `None`\") self._errors_count = errors_count @property def errors_report_file(self): \"\"\" Gets the errors_report_file", "None: raise ValueError(\"Invalid value for `errors_count`, must not be `None`\") self._errors_count = errors_count", "task. :return: The created_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._created_at @created_at.setter", "None @property def account_id(self): \"\"\" Gets the account_id of this BulkCreateResponse. ID :return:", "key is attribute name and the value is attribute type. attribute_map (dict): The", "the processed_count of this BulkCreateResponse. The number of enrollment identities processed until now.", "account_id self._completed_at = completed_at self._created_at = created_at self._errors_count = errors_count self._errors_report_file = errors_report_file", "the bulk creation task. :param completed_at: The completed_at of this BulkCreateResponse. :type: datetime", ":rtype: datetime \"\"\" return self._created_at @created_at.setter def created_at(self, created_at): \"\"\" Sets the created_at", ":return: The errors_count of this BulkCreateResponse. :rtype: int \"\"\" return self._errors_count @errors_count.setter def", "must be one of {1}\" .format(object, allowed_values) ) self._object = object @property def", "manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key is attribute name and the", "be `None`\") allowed_values = [\"new\", \"processing\", \"completed\"] if status not in allowed_values: raise", "full_report_file self._id = id self._object = object self._processed_count = processed_count self._status = status", "The processed_count of this BulkCreateResponse. :type: int \"\"\" if processed_count is None: raise", "time of completing the bulk creation task. :param completed_at: The completed_at of this", "BulkCreateResponse. :rtype: datetime \"\"\" return self._created_at @created_at.setter def created_at(self, created_at): \"\"\" Sets the", "processed_count of this BulkCreateResponse. The number of enrollment identities processed until now. :param", "completing the bulk creation task. :return: The completed_at of this BulkCreateResponse. :rtype: datetime", "processed_count @property def status(self): \"\"\" Gets the status of this BulkCreateResponse. The state", "device without an assigned account can be a device purchased from the open", "@completed_at.setter def completed_at(self, completed_at): \"\"\" Sets the completed_at of this BulkCreateResponse. The time", "False return self.__dict__ == other.__dict__ def __ne__(self, other): \"\"\" Returns true if both", "of completing the bulk creation task. :param completed_at: The completed_at of this BulkCreateResponse.", "'datetime', 'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file': 'str', 'etag': 'str', 'full_report_file': 'str', 'id': 'str',", "be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if object not in allowed_values: raise ValueError( \"Invalid", "value return result def to_str(self): \"\"\" Returns the string representation of the model", "for `errors_count`, must not be `None`\") self._errors_count = errors_count @property def errors_report_file(self): \"\"\"", "def __init__(self, account_id=None, completed_at=None, created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new',", "at the time of creation. If the creation is still in progress, the", "== other.__dict__ def __ne__(self, other): \"\"\" Returns true if both objects are not", "of enrollment identities with failed processing. :return: The errors_count of this BulkCreateResponse. :rtype:", "the id of this BulkCreateResponse. Bulk ID :return: The id of this BulkCreateResponse.", "self._total_count = total_count def to_dict(self): \"\"\" Returns the model properties as a dict", ":type: datetime \"\"\" self._completed_at = completed_at @property def created_at(self): \"\"\" Gets the created_at", "total_count(self): \"\"\" Gets the total_count of this BulkCreateResponse. Total number of enrollment identities", "ValueError( \"Invalid value for `object` ({0}), must be one of {1}\" .format(object, allowed_values)", "BulkCreateResponse. Total number of enrollment identities found in the input CSV. :param total_count:", "BulkCreateResponse. :return: The errors_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._errors_report_file @errors_report_file.setter", "id: The id of this BulkCreateResponse. :type: str \"\"\" if id is None:", "this BulkCreateResponse. Total number of enrollment identities found in the input CSV. :param", "created_at=None, errors_count=None, errors_report_file=None, etag=None, full_report_file=None, id=None, object=None, processed_count=None, status='new', total_count=None): \"\"\" BulkCreateResponse -", "\"\"\" result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr)", "Gets the full_report_file of this BulkCreateResponse. :return: The full_report_file of this BulkCreateResponse. :rtype:", ")) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map(", "is None: raise ValueError(\"Invalid value for `total_count`, must not be `None`\") self._total_count =", "created_at of this BulkCreateResponse. The time of receiving the bulk creation task. :param", "completed_at of this BulkCreateResponse. :type: datetime \"\"\" self._completed_at = completed_at @property def created_at(self):", "def full_report_file(self): \"\"\" Gets the full_report_file of this BulkCreateResponse. :return: The full_report_file of", ":rtype: int \"\"\" return self._errors_count @errors_count.setter def errors_count(self, errors_count): \"\"\" Sets the errors_count", "creation task. :return: The created_at of this BulkCreateResponse. :rtype: datetime \"\"\" return self._created_at", "return result def to_str(self): \"\"\" Returns the string representation of the model \"\"\"", "device transferred from an account to another. More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html)", "for `object` ({0}), must be one of {1}\" .format(object, allowed_values) ) self._object =", "Sets the errors_report_file of this BulkCreateResponse. :param errors_report_file: The errors_report_file of this BulkCreateResponse.", "BulkCreateResponse. Bulk ID :param id: The id of this BulkCreateResponse. :type: str \"\"\"", "other.__dict__ def __ne__(self, other): \"\"\" Returns true if both objects are not equal", "name and the value is attribute type. attribute_map (dict): The key is attribute", "str \"\"\" self._errors_report_file = errors_report_file @property def etag(self): \"\"\" Gets the etag of", "class BulkCreateResponse(object): \"\"\" NOTE: This class is auto generated by the swagger code", "errors_report_file: The errors_report_file of this BulkCreateResponse. :type: str \"\"\" self._errors_report_file = errors_report_file @property", "of this BulkCreateResponse. :type: str \"\"\" if etag is None: raise ValueError(\"Invalid value", "of this BulkCreateResponse. :type: str \"\"\" if account_id is None: raise ValueError(\"Invalid value", "time of completing the bulk creation task. :return: The completed_at of this BulkCreateResponse.", "of {1}\" .format(object, allowed_values) ) self._object = object @property def processed_count(self): \"\"\" Gets", "this BulkCreateResponse. :type: datetime \"\"\" if created_at is None: raise ValueError(\"Invalid value for", "this BulkCreateResponse. :type: int \"\"\" if errors_count is None: raise ValueError(\"Invalid value for", "class is auto generated by the swagger code generator program. Do not edit", "= id @property def object(self): \"\"\" Gets the object of this BulkCreateResponse. :return:", "dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else item, value.items() )) else:", "= { 'account_id': 'str', 'completed_at': 'datetime', 'created_at': 'datetime', 'errors_count': 'int', 'errors_report_file': 'str', 'etag':", "Enrollment API Mbed Cloud Connect Enrollment Service allows users to claim the ownership", "Bulk ID :param id: The id of this BulkCreateResponse. :type: str \"\"\" if", "\"\"\" if total_count is None: raise ValueError(\"Invalid value for `total_count`, must not be", ":param etag: The etag of this BulkCreateResponse. :type: str \"\"\" if etag is", "of this BulkCreateResponse. :rtype: datetime \"\"\" return self._created_at @created_at.setter def created_at(self, created_at): \"\"\"", "datetime \"\"\" if created_at is None: raise ValueError(\"Invalid value for `created_at`, must not", "self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file): \"\"\" Sets the errors_report_file of this BulkCreateResponse. :param", ":param full_report_file: The full_report_file of this BulkCreateResponse. :type: str \"\"\" self._full_report_file = full_report_file", "changes to 'completed'. :return: The status of this BulkCreateResponse. :rtype: str \"\"\" return", "this BulkCreateResponse. :type: datetime \"\"\" self._completed_at = completed_at @property def created_at(self): \"\"\" Gets", "failed processing. :param errors_count: The errors_count of this BulkCreateResponse. :type: int \"\"\" if", "the errors_report_file of this BulkCreateResponse. :return: The errors_report_file of this BulkCreateResponse. :rtype: str", "isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], \"to_dict\") else", "edit the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict): The key is attribute", "status(self): \"\"\" Gets the status of this BulkCreateResponse. The state of the process", ") self._object = object @property def processed_count(self): \"\"\" Gets the processed_count of this", "and `pprint` \"\"\" return self.to_str() def __eq__(self, other): \"\"\" Returns true if both", "swagger code generator program. Do not edit the class manually. \"\"\" \"\"\" Attributes:", "object @property def processed_count(self): \"\"\" Gets the processed_count of this BulkCreateResponse. The number", "= status @property def total_count(self): \"\"\" Gets the total_count of this BulkCreateResponse. Total", "with failed processing. :return: The errors_count of this BulkCreateResponse. :rtype: int \"\"\" return", "\"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value,", "in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document. OpenAPI spec version: 3 Generated by: https://github.com/swagger-api/swagger-codegen.git \"\"\"", "errors_count of this BulkCreateResponse. :rtype: int \"\"\" return self._errors_count @errors_count.setter def errors_count(self, errors_count):", "the swagger code generator program. Do not edit the class manually. \"\"\" \"\"\"", ":rtype: str \"\"\" return self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file): \"\"\" Sets the full_report_file", "of this BulkCreateResponse. :type: str \"\"\" self._full_report_file = full_report_file @property def id(self): \"\"\"", "process is 'new' at the time of creation. If the creation is still", "ID :param id: The id of this BulkCreateResponse. :type: str \"\"\" if id", "BulkCreateResponse. :rtype: str \"\"\" return self._object @object.setter def object(self, object): \"\"\" Sets the", "self.to_str() def __eq__(self, other): \"\"\" Returns true if both objects are equal \"\"\"", "'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag', 'full_report_file': 'full_report_file', 'id':", "BulkCreateResponse. :type: str \"\"\" self._full_report_file = full_report_file @property def id(self): \"\"\" Gets the", "\"completed\"] if status not in allowed_values: raise ValueError( \"Invalid value for `status` ({0}),", "pattern or equal to `/^[A-Za-z0-9]{32}/`\") self._id = id @property def object(self): \"\"\" Gets", "not re.search('^[A-Za-z0-9]{32}', id): raise ValueError(\"Invalid value for `id`, must be a follow pattern", "'errors_count': 'int', 'errors_report_file': 'str', 'etag': 'str', 'full_report_file': 'str', 'id': 'str', 'object': 'str', 'processed_count':", "None: raise ValueError(\"Invalid value for `id`, must not be `None`\") if id is", "if etag is not None and not re.search('[A-Za-z0-9]{0,256}', etag): raise ValueError(\"Invalid value for", "`etag`, must not be `None`\") if etag is not None and not re.search('[A-Za-z0-9]{0,256}',", "of receiving the bulk creation task. :return: The created_at of this BulkCreateResponse. :rtype:", "of enrollment identities with failed processing. :param errors_count: The errors_count of this BulkCreateResponse.", "not be `None`\") allowed_values = [\"new\", \"processing\", \"completed\"] if status not in allowed_values:", "value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] =", "\"\"\" if processed_count is None: raise ValueError(\"Invalid value for `processed_count`, must not be", "or equal to `/^[A-Za-z0-9]{32}/`\") self._id = id @property def object(self): \"\"\" Gets the", "be `None`\") self._processed_count = processed_count @property def status(self): \"\"\" Gets the status of", "'completed'. :return: The status of this BulkCreateResponse. :rtype: str \"\"\" return self._status @status.setter", "def errors_count(self, errors_count): \"\"\" Sets the errors_count of this BulkCreateResponse. The number of", "swagger_types (dict): The key is attribute name and the value is attribute type.", "the ownership of a device which is not yet assigned to an account.", "model \"\"\" return pformat(self.to_dict()) def __repr__(self): \"\"\" For `print` and `pprint` \"\"\" return", ":return: The account_id of this BulkCreateResponse. :rtype: str \"\"\" return self._account_id @account_id.setter def", "the state is shown as 'processing'. When the request has been fully processed,", "[\"new\", \"processing\", \"completed\"] if status not in allowed_values: raise ValueError( \"Invalid value for", "return pformat(self.to_dict()) def __repr__(self): \"\"\" For `print` and `pprint` \"\"\" return self.to_str() def", ".format(object, allowed_values) ) self._object = object @property def processed_count(self): \"\"\" Gets the processed_count", "transferred from an account to another. More information in [Device ownership: First-to-claim](https://cloud.mbed.com/docs/current/connecting/device-ownership.html) document.", "properties as a dict \"\"\" result = {} for attr, _ in iteritems(self.swagger_types):", "errors_count of this BulkCreateResponse. :type: int \"\"\" if errors_count is None: raise ValueError(\"Invalid", "BulkCreateResponse. ID :return: The account_id of this BulkCreateResponse. :rtype: str \"\"\" return self._account_id", "full_report_file(self): \"\"\" Gets the full_report_file of this BulkCreateResponse. :return: The full_report_file of this", "etag of this BulkCreateResponse. etag :param etag: The etag of this BulkCreateResponse. :type:", "this BulkCreateResponse. ID :param account_id: The account_id of this BulkCreateResponse. :type: str \"\"\"", "allowed_values: raise ValueError( \"Invalid value for `status` ({0}), must be one of {1}\"", "\"\"\" \"\"\" Attributes: swagger_types (dict): The key is attribute name and the value", "BulkCreateResponse. :rtype: datetime \"\"\" return self._completed_at @completed_at.setter def completed_at(self, completed_at): \"\"\" Sets the", "the etag of this BulkCreateResponse. etag :return: The etag of this BulkCreateResponse. :rtype:", "attribute type. attribute_map (dict): The key is attribute name and the value is", "this BulkCreateResponse. :return: The full_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._full_report_file", "the total_count of this BulkCreateResponse. Total number of enrollment identities found in the", "value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1],", "for `etag`, must not be `None`\") if etag is not None and not", "`status`, must not be `None`\") allowed_values = [\"new\", \"processing\", \"completed\"] if status not", "value for `etag`, must be a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag", "pprint import pformat from six import iteritems import re class BulkCreateResponse(object): \"\"\" NOTE:", "'full_report_file': 'str', 'id': 'str', 'object': 'str', 'processed_count': 'int', 'status': 'str', 'total_count': 'int' }", "BulkCreateResponse. :rtype: str \"\"\" return self._etag @etag.setter def etag(self, etag): \"\"\" Sets the", "raise ValueError( \"Invalid value for `object` ({0}), must be one of {1}\" .format(object,", "@full_report_file.setter def full_report_file(self, full_report_file): \"\"\" Sets the full_report_file of this BulkCreateResponse. :param full_report_file:", "the errors_report_file of this BulkCreateResponse. :param errors_report_file: The errors_report_file of this BulkCreateResponse. :type:", "is auto generated by the swagger code generator program. Do not edit the", "= completed_at self._created_at = created_at self._errors_count = errors_count self._errors_report_file = errors_report_file self._etag =", "of this BulkCreateResponse. Bulk ID :param id: The id of this BulkCreateResponse. :type:", "must not be `None`\") self._processed_count = processed_count @property def status(self): \"\"\" Gets the", "\"\"\" if status is None: raise ValueError(\"Invalid value for `status`, must not be", "self._etag = etag self._full_report_file = full_report_file self._id = id self._object = object self._processed_count", "id(self): \"\"\" Gets the id of this BulkCreateResponse. Bulk ID :return: The id", "\"processing\", \"completed\"] if status not in allowed_values: raise ValueError( \"Invalid value for `status`", "completed_at(self, completed_at): \"\"\" Sets the completed_at of this BulkCreateResponse. The time of completing", "Returns true if both objects are equal \"\"\" if not isinstance(other, BulkCreateResponse): return", "self._object = object self._processed_count = processed_count self._status = status self._total_count = total_count self.discriminator", "status of this BulkCreateResponse. The state of the process is 'new' at the", "BulkCreateResponse. :type: int \"\"\" if total_count is None: raise ValueError(\"Invalid value for `total_count`,", "account_id of this BulkCreateResponse. ID :return: The account_id of this BulkCreateResponse. :rtype: str", "`object`, must not be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if object not in allowed_values:", "return self._completed_at @completed_at.setter def completed_at(self, completed_at): \"\"\" Sets the completed_at of this BulkCreateResponse.", "assigned to an account. A device without an assigned account can be a", "auto generated by the swagger code generator program. Do not edit the class", "to `/^[A-Za-z0-9]{32}/`\") self._id = id @property def object(self): \"\"\" Gets the object of", "not be `None`\") allowed_values = [\"enrollment-identity-bulk-uploads\"] if object not in allowed_values: raise ValueError(", "@processed_count.setter def processed_count(self, processed_count): \"\"\" Sets the processed_count of this BulkCreateResponse. The number", "of this BulkCreateResponse. :rtype: str \"\"\" return self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file): \"\"\"", "\"\"\" if account_id is None: raise ValueError(\"Invalid value for `account_id`, must not be", "value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict()", "generator program. Do not edit the class manually. \"\"\" \"\"\" Attributes: swagger_types (dict):", "not be `None`\") self._account_id = account_id @property def completed_at(self): \"\"\" Gets the completed_at", "self._errors_report_file = errors_report_file self._etag = etag self._full_report_file = full_report_file self._id = id self._object", "def errors_report_file(self): \"\"\" Gets the errors_report_file of this BulkCreateResponse. :return: The errors_report_file of", "full_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._full_report_file @full_report_file.setter def full_report_file(self, full_report_file):", "BulkCreateResponse. The time of receiving the bulk creation task. :return: The created_at of", "= list(map( lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value )) elif", "\"\"\" Returns the model properties as a dict \"\"\" result = {} for", "errors_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._errors_report_file @errors_report_file.setter def errors_report_file(self, errors_report_file):", "`None`\") self._processed_count = processed_count @property def status(self): \"\"\" Gets the status of this", "etag): \"\"\" Sets the etag of this BulkCreateResponse. etag :param etag: The etag", "must be a follow pattern or equal to `/[A-Za-z0-9]{0,256}/`\") self._etag = etag @property", "BulkCreateResponse. :rtype: str \"\"\" return self._id @id.setter def id(self, id): \"\"\" Sets the", "key in definition. \"\"\" swagger_types = { 'account_id': 'str', 'completed_at': 'datetime', 'created_at': 'datetime',", "The errors_report_file of this BulkCreateResponse. :rtype: str \"\"\" return self._errors_report_file @errors_report_file.setter def errors_report_file(self,", "\"\"\" from pprint import pformat from six import iteritems import re class BulkCreateResponse(object):", "`None`\") self._account_id = account_id @property def completed_at(self): \"\"\" Gets the completed_at of this", "object not in allowed_values: raise ValueError( \"Invalid value for `object` ({0}), must be", "in progress, the state is shown as 'processing'. When the request has been", "{ 'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count', 'errors_report_file': 'errors_report_file', 'etag': 'etag',", "if hasattr(x, \"to_dict\") else x, value )) elif hasattr(value, \"to_dict\"): result[attr] = value.to_dict()", "to claim the ownership of a device which is not yet assigned to", "this BulkCreateResponse. etag :return: The etag of this BulkCreateResponse. :rtype: str \"\"\" return", "'int' } attribute_map = { 'account_id': 'account_id', 'completed_at': 'completed_at', 'created_at': 'created_at', 'errors_count': 'errors_count',", "\"\"\" self._full_report_file = full_report_file @property def id(self): \"\"\" Gets the id of this", "def to_dict(self): \"\"\" Returns the model properties as a dict \"\"\" result =", "'int', 'errors_report_file': 'str', 'etag': 'str', 'full_report_file': 'str', 'id': 'str', 'object': 'str', 'processed_count': 'int'," ]
[ "torch import torch.nn as nn from torchvision.models.utils import load_state_dict_from_url model_urls = { 'vgg16':", "-> 14,14,512 -> 7,7,512 cfgs = { 'D': [64, 64, 'M', 128, 128,", "if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier = nn.Sequential(", "nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x): x =", "def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if", "= features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 *", "-> 7,7,512 cfgs = { 'D': [64, 64, 'M', 128, 128, 'M', 256,", "cfgs = { 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256,", "nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x):", "nn from torchvision.models.utils import load_state_dict_from_url model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module):", "batch_norm=False): layers = [] in_channels = 3 for v in cfg: if v", "load_state_dict_from_url model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def __init__(self, features, num_classes=1000,", "param in self.features.parameters(): param.requires_grad = True def make_layers(cfg, batch_norm=False): layers = [] in_channels", "features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier", "nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def", "nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def", "elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self): for param in", "self.features.parameters(): param.requires_grad = False def Unfreeze_backbone(self): for param in self.features.parameters(): param.requires_grad = True", "def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg:", "isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif", "224,224,64 -> 112,112,64 -> 112,112,128 -> 56,56,128 -> 56,56,256 -> 28,28,256 -> 28,28,512", "-> 112,112,128 -> 56,56,128 -> 56,56,256 -> 28,28,256 -> 28,28,512 # 14,14,512 ->", "= nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True),", "nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self): for param in self.features.parameters(): param.requires_grad =", "nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers", "None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear):", "224,224,3 -> 224,224,64 -> 112,112,64 -> 112,112,128 -> 56,56,128 -> 56,56,256 -> 28,28,256", "in_channels = v return nn.Sequential(*layers) # 224,224,3 -> 224,224,64 -> 112,112,64 -> 112,112,128", "nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(),", "112,112,64 -> 112,112,128 -> 56,56,128 -> 56,56,256 -> 28,28,256 -> 28,28,512 # 14,14,512", "VGG(make_layers(cfgs['D'])) if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier =", "kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d,", "128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512,", "for param in self.features.parameters(): param.requires_grad = False def Unfreeze_backbone(self): for param in self.features.parameters():", "num_classes), ) if init_weights: self._initialize_weights() def forward(self, x): x = self.features(x) x =", "} def vgg16(pretrained=False, progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D'])) if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'],", "cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels,", "v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers +=", "torchvision.models.utils import load_state_dict_from_url model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def __init__(self,", "nn.init.constant_(m.bias, 0) def freeze_backbone(self): for param in self.features.parameters(): param.requires_grad = False def Unfreeze_backbone(self):", "in self.features.parameters(): param.requires_grad = True def make_layers(cfg, batch_norm=False): layers = [] in_channels =", "nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) # 224,224,3 -> 224,224,64 -> 112,112,64 ->", "+= [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return", "def vgg16(pretrained=False, progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D'])) if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data',", "init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential(", "1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self):", "batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels =", "class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool", "progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096),", "stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d,", "if init_weights: self._initialize_weights() def forward(self, x): x = self.features(x) x = self.avgpool(x) x", "self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x", "nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m,", "'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features", "512, 512, 'M'], } def vgg16(pretrained=False, progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D'])) if pretrained:", "'M'], } def vgg16(pretrained=False, progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D'])) if pretrained: state_dict =", "def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7,", "v return nn.Sequential(*layers) # 224,224,3 -> 224,224,64 -> 112,112,64 -> 112,112,128 -> 56,56,128", "-> 112,112,64 -> 112,112,128 -> 56,56,128 -> 56,56,256 -> 28,28,256 -> 28,28,512 #", "256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], } def", "+= [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers", "progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D'])) if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False)", "self.features.parameters(): param.requires_grad = True def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3", "as nn from torchvision.models.utils import load_state_dict_from_url model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class", "Unfreeze_backbone(self): for param in self.features.parameters(): param.requires_grad = True def make_layers(cfg, batch_norm=False): layers =", "mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight,", "== 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)", "__init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7))", "torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self): for m in self.modules():", "self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight,", "x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x =", "[conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) # 224,224,3 -> 224,224,64 -> 112,112,64", "[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512,", "256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], } def vgg16(pretrained=False,", "for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is", "param in self.features.parameters(): param.requires_grad = False def Unfreeze_backbone(self): for param in self.features.parameters(): param.requires_grad", "0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0,", "torch.nn as nn from torchvision.models.utils import load_state_dict_from_url model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', }", "-> 56,56,256 -> 28,28,256 -> 28,28,512 # 14,14,512 -> 14,14,512 -> 7,7,512 cfgs", "_initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias", "56,56,128 -> 56,56,256 -> 28,28,256 -> 28,28,512 # 14,14,512 -> 14,14,512 -> 7,7,512", "layers = [] in_channels = 3 for v in cfg: if v ==", "self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096),", "{ 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512,", "pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512", "model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True),", "= 3 for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2,", "init_weights: self._initialize_weights() def forward(self, x): x = self.features(x) x = self.avgpool(x) x =", "in self.features.parameters(): param.requires_grad = False def Unfreeze_backbone(self): for param in self.features.parameters(): param.requires_grad =", "forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x", "= v return nn.Sequential(*layers) # 224,224,3 -> 224,224,64 -> 112,112,64 -> 112,112,128 ->", "* 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes),", "x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x)", "model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True):", "return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out',", "0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self): for param", "= [] in_channels = 3 for v in cfg: if v == 'M':", "return nn.Sequential(*layers) # 224,224,3 -> 224,224,64 -> 112,112,64 -> 112,112,128 -> 56,56,128 ->", "512, 'M', 512, 512, 512, 'M'], } def vgg16(pretrained=False, progress=True, num_classes=1000): model =", "self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096),", "num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096,", "-> 28,28,256 -> 28,28,512 # 14,14,512 -> 14,14,512 -> 7,7,512 cfgs = {", "= { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG,", "} class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features", "conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]", "* 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if", "nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self,", "import load_state_dict_from_url model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def __init__(self, features,", "[conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers)", "nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self): for", "False def Unfreeze_backbone(self): for param in self.features.parameters(): param.requires_grad = True def make_layers(cfg, batch_norm=False):", "= nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else:", "layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) # 224,224,3 -> 224,224,64", "{ 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__()", "= self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return", "else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v),", "x = torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self): for m", "if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels", "64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M',", "model.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096),", "layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm:", "128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512,", "7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights:", "param.requires_grad = False def Unfreeze_backbone(self): for param in self.features.parameters(): param.requires_grad = True def", "-> 28,28,512 # 14,14,512 -> 14,14,512 -> 7,7,512 cfgs = { 'D': [64,", "'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], } def vgg16(pretrained=False, progress=True,", "0, 0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self): for param in self.features.parameters(): param.requires_grad = False", "x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def", "* 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) return", "True def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v in", "7,7,512 cfgs = { 'D': [64, 64, 'M', 128, 128, 'M', 256, 256,", "0) def freeze_backbone(self): for param in self.features.parameters(): param.requires_grad = False def Unfreeze_backbone(self): for", "nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self): for param in self.features.parameters(): param.requires_grad", "= self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d):", "= nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True),", "'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512,", "param.requires_grad = True def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for", "v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d", "1) x = self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if", "= { 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M',", "512, 512, 'M', 512, 512, 512, 'M'], } def vgg16(pretrained=False, progress=True, num_classes=1000): model", "x = self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m,", "nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x): x = self.features(x)", "nn.Sequential(*layers) # 224,224,3 -> 224,224,64 -> 112,112,64 -> 112,112,128 -> 56,56,128 -> 56,56,256", "num_classes=1000): model = VGG(make_layers(cfgs['D'])) if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if", "nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x): x = self.features(x) x", "if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias,", "self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self): for", "'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features =", "num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier =", "512, 512, 512, 'M'], } def vgg16(pretrained=False, progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D'])) if", "not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m,", "for param in self.features.parameters(): param.requires_grad = True def make_layers(cfg, batch_norm=False): layers = []", "'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512,", "28,28,512 # 14,14,512 -> 14,14,512 -> 7,7,512 cfgs = { 'D': [64, 64,", "7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) return model", "nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight,", "import torch.nn as nn from torchvision.models.utils import load_state_dict_from_url model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',", "nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(),", "nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096,", "in_channels = 3 for v in cfg: if v == 'M': layers +=", "3 for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)]", "'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],", "# 224,224,3 -> 224,224,64 -> 112,112,64 -> 112,112,128 -> 56,56,128 -> 56,56,256 ->", "if num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(),", "import torch import torch.nn as nn from torchvision.models.utils import load_state_dict_from_url model_urls = {", "def Unfreeze_backbone(self): for param in self.features.parameters(): param.requires_grad = True def make_layers(cfg, batch_norm=False): layers", "28,28,256 -> 28,28,512 # 14,14,512 -> 14,14,512 -> 7,7,512 cfgs = { 'D':", "padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)]", "is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif", "def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1)", "layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v", "self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias,", "= torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self): for m in", "features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7,", "freeze_backbone(self): for param in self.features.parameters(): param.requires_grad = False def Unfreeze_backbone(self): for param in", "if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v,", "512, 512, 512, 'M', 512, 512, 512, 'M'], } def vgg16(pretrained=False, progress=True, num_classes=1000):", "nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d):", "56,56,256 -> 28,28,256 -> 28,28,512 # 14,14,512 -> 14,14,512 -> 7,7,512 cfgs =", "0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self): for param in self.features.parameters(): param.requires_grad = False def", "make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg: if", "if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0)", "112,112,128 -> 56,56,128 -> 56,56,256 -> 28,28,256 -> 28,28,512 # 14,14,512 -> 14,14,512", "= False def Unfreeze_backbone(self): for param in self.features.parameters(): param.requires_grad = True def make_layers(cfg,", "14,14,512 -> 7,7,512 cfgs = { 'D': [64, 64, 'M', 128, 128, 'M',", "4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights()", "nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1)", ") if init_weights: self._initialize_weights() def forward(self, x): x = self.features(x) x = self.avgpool(x)", "= True def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v", "[] in_channels = 3 for v in cfg: if v == 'M': layers", "+= [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) # 224,224,3 -> 224,224,64 ->", "512, 'M'], } def vgg16(pretrained=False, progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D'])) if pretrained: state_dict", "-> 224,224,64 -> 112,112,64 -> 112,112,128 -> 56,56,128 -> 56,56,256 -> 28,28,256 ->", "def freeze_backbone(self): for param in self.features.parameters(): param.requires_grad = False def Unfreeze_backbone(self): for param", "isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def freeze_backbone(self): for param in self.features.parameters():", "isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias,", "nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) # 224,224,3", "-> 56,56,128 -> 56,56,256 -> 28,28,256 -> 28,28,512 # 14,14,512 -> 14,14,512 ->", "'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if", "256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], }", "= VGG(make_layers(cfgs['D'])) if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier", "m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)", "from torchvision.models.utils import load_state_dict_from_url model_urls = { 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', } class VGG(nn.Module): def", "model = VGG(make_layers(cfgs['D'])) if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000:", "state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512 *", "4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x): x", "vgg16(pretrained=False, progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D'])) if pretrained: state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress)", "self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 *", "14,14,512 -> 14,14,512 -> 7,7,512 cfgs = { 'D': [64, 64, 'M', 128,", "self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7", "in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None:", "7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), )", "elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01)", "model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512 * 7 * 7,", "nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) #", "7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096,", "[nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers +=", "= self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self):", "v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3,", "x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')", "self._initialize_weights() def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x,", "in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d =", "else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) # 224,224,3 ->", "load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512 * 7 *", "nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0)", "m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not", "for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else:", "VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool =", "'M', 512, 512, 512, 'M'], } def vgg16(pretrained=False, progress=True, num_classes=1000): model = VGG(make_layers(cfgs['D']))", "= load_state_dict_from_url(model_urls['vgg16'], model_dir='./model_data', progress=progress) model.load_state_dict(state_dict,strict=False) if num_classes!=1000: model.classifier = nn.Sequential( nn.Linear(512 * 7", "# 14,14,512 -> 14,14,512 -> 7,7,512 cfgs = { 'D': [64, 64, 'M',", "super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512" ]
[ "based on the status of previous operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context)", "exclude_cols = exclude_cols or [] e_cols = list(task.exclude_columns) + exclude_cols actual = actual.drop(e_cols,", "isinstance(actual, tuple): # pylint: disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols = exclude_cols or []", "dag: TDag, task_id: str, test_datasets: dict, mocker: MockerFixture, exclude_cols: list = None ):", "context) return task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset: dict[str, Any], mocker: MockerFixture", "e_cols = list(task.exclude_columns) + exclude_cols actual = actual.drop(e_cols, axis=1, errors='ignore') expected = expected.drop(e_cols,", "import MockerFixture from data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions import", "will be cleared first, and then after the launch, compare the contents of", "Id of the running task :param test_datasets: Dictionary with examples :param exclude_cols: Columns", "Union import petl from airflow.models import BaseOperator, TaskInstance from airflow.utils import timezone from", "TDag :param task_id: Id of the running task :param test_datasets: Dictionary with examples", "and if it is TBaseOperator then get the result and compare it with", "run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset: dict[str, Any], mocker: MockerFixture = None, exclude_cols: list", ":param test_datasets: Dictionary with examples :param exclude_cols: Columns excluded from comparison :param mocker:", "= list(task.exclude_columns) + exclude_cols actual = actual.drop(e_cols, axis=1, errors='ignore') expected = expected.drop(e_cols, axis=1,", "excluded from comparison :param mocker: MockerFixture fixture \"\"\" task: TBaseOperator = dag.task_dict[task_id] run_and_assert_task(task,", "mocker: MockerFixture fixture \"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context() if mocker", "dag: TDag :param task_id: Id of the running task :param test_datasets: Dictionary with", "BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task, context) return task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator,", "\"\"\"Run the task, get the result and compare :param task: Id of the", "execution_date=timezone.utcnow()) context = task_instance.get_template_context() if mocker and dataset: mock_task_inputs(task, dataset, mocker) actual =", ":param mocker: MockerFixture fixture \"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context() if", "pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the task and if it is TBaseOperator then", "from the BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task, context) return task.read_result(context) def run_and_assert_task(", "run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict = None) -> DataFrame: \"\"\"Run the task and", "task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset: dict[str, Any], mocker: MockerFixture = None,", "launch restrictions based on the status of previous operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context())", "\"\"\"Using run_and_assert_task Run the task and if it is TBaseOperator then get the", "\"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context() if mocker and dataset: mock_task_inputs(task,", "enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict = None) ->", "= mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict = None) -> DataFrame: \"\"\"Run", "result and compare it with the example Also if the task is PgReplacePartitions", "task, get the result and compare :param task: Id of the running task", "get the result and compare :param task: Id of the running task :param", "mocker and dataset: mock_task_inputs(task, dataset, mocker) actual = run_and_read(task=task, context=context) if actual is", "Removing launch restrictions based on the status of previous operators task.trigger_rule = 'dummy'", "and input datasets are needed. :param exclude_cols: Columns excluded from comparison :param mocker:", "errors='ignore') expected = expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs) else: assert actual ==", "pandas import DataFrame from pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator", "the result and compare it with the example Also if the task is", "import DataFrame from pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator import", "from data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator], context: dict = None): \"\"\"Run", "Union[TBaseOperator, BaseOperator], context: dict = None) -> DataFrame: \"\"\"Run the task and return", "with the example :param dag: TDag :param task_id: Id of the running task", "actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if isinstance(actual, tuple): # pylint: disable=no-member actual = petl.wrap(actual).todataframe()", "+ exclude_cols actual = actual.drop(e_cols, axis=1, errors='ignore') expected = expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual,", "if actual is not None: expected = dataset[task.task_id] if isinstance(expected, DataFrame): if task.include_columns:", "None) -> DataFrame: \"\"\"Run the task and return the DataFrame from the BaseResult", "the task and if it is TBaseOperator then get the result and compare", "run_and_assert_task Run the task and if it is TBaseOperator then get the result", "list = None ): # pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the task and", "of the running task :param test_datasets: Dictionary with examples :param exclude_cols: Columns excluded", "is TBaseOperator then get the result and compare it with the example Also", "import TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task: Union[TBaseOperator,", "from data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator], context:", "status of previous operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task,", "task and return the DataFrame from the BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task,", "i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict", "task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset, mocker): for i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read =", "expected def run_and_assert( dag: TDag, task_id: str, test_datasets: dict, mocker: MockerFixture, exclude_cols: list", "task_id: Id of the running task :param test_datasets: Dictionary with examples :param exclude_cols:", "DataFrame from pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator", "comparison :param mocker: MockerFixture fixture \"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context()", "or [] e_cols = list(task.exclude_columns) + exclude_cols actual = actual.drop(e_cols, axis=1, errors='ignore') expected", "= actual.drop(e_cols, axis=1, errors='ignore') expected = expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs) else:", "task_instance.get_template_context() if mocker and dataset: mock_task_inputs(task, dataset, mocker) actual = run_and_read(task=task, context=context) if", "dataset, mocker): for i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator,", "task {task.task_id}') run_task(task, context) return task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset: dict[str,", "task :param test_datasets: Dictionary with examples :param exclude_cols: Columns excluded from comparison :param", "= expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs) else: assert actual == expected def", "[] e_cols = list(task.exclude_columns) + exclude_cols actual = actual.drop(e_cols, axis=1, errors='ignore') expected =", "MockerFixture from data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal", "import timezone from pandas import DataFrame from pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator import", "timezone from pandas import DataFrame from pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator import TDag", "from comparison :param mocker: MockerFixture fixture \"\"\" task: TBaseOperator = dag.task_dict[task_id] run_and_assert_task(task, dataset=test_datasets,", "it with the example Also if the task is PgReplacePartitions then the target", "actual is not None: expected = dataset[task.task_id] if isinstance(expected, DataFrame): if task.include_columns: actual", "mocker: MockerFixture = None, exclude_cols: list = None, **kwargs ): \"\"\"Run the task,", "on the status of previous operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context)", "expected = expected[list(task.include_columns)] if isinstance(actual, tuple): # pylint: disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols", ":param task: Id of the running task :param dataset: Dictionary with comparison examples.", "None, exclude_cols: list = None, **kwargs ): \"\"\"Run the task, get the result", "= TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context() if mocker and dataset: mock_task_inputs(task, dataset, mocker)", "else: assert actual == expected def run_and_assert( dag: TDag, task_id: str, test_datasets: dict,", "exclude_cols: list = None, **kwargs ): \"\"\"Run the task, get the result and", "# pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the task and if it is TBaseOperator", "pylint: disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols = exclude_cols or [] e_cols = list(task.exclude_columns)", "None): \"\"\"Run a task\"\"\" # Removing launch restrictions based on the status of", "actual = actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if isinstance(actual, tuple): # pylint: disable=no-member actual", "BaseOperator, TaskInstance from airflow.utils import timezone from pandas import DataFrame from pytest_mock.plugin import", "examples :param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\" task:", "task is PgReplacePartitions then the target table will be cleared first, and then", "return task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset: dict[str, Any], mocker: MockerFixture =", "-> DataFrame: \"\"\"Run the task and return the DataFrame from the BaseResult instance.\"\"\"", "the task is PgReplacePartitions then the target table will be cleared first, and", "then the target table will be cleared first, and then after the launch,", "the target table with the example :param dag: TDag :param task_id: Id of", "None, **kwargs ): \"\"\"Run the task, get the result and compare :param task:", "= None): \"\"\"Run a task\"\"\" # Removing launch restrictions based on the status", "run_task(task, context) return task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset: dict[str, Any], mocker:", "typing import Any, Union import petl from airflow.models import BaseOperator, TaskInstance from airflow.utils", "**kwargs ): \"\"\"Run the task, get the result and compare :param task: Id", "list = None, **kwargs ): \"\"\"Run the task, get the result and compare", "operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset, mocker): for", "context = task_instance.get_template_context() if mocker and dataset: mock_task_inputs(task, dataset, mocker) actual = run_and_read(task=task,", "and return the DataFrame from the BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task, context)", "with examples :param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\"", "contents of the target table with the example :param dag: TDag :param task_id:", "import BaseOperator, TaskInstance from airflow.utils import timezone from pandas import DataFrame from pytest_mock.plugin", "dataset: Dictionary with comparison examples. Output and input datasets are needed. :param exclude_cols:", "actual.drop(e_cols, axis=1, errors='ignore') expected = expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs) else: assert", "from airflow.models import BaseOperator, TaskInstance from airflow.utils import timezone from pandas import DataFrame", "is not None: expected = dataset[task.task_id] if isinstance(expected, DataFrame): if task.include_columns: actual =", "errors='ignore') assert_frame_equal(actual, expected, **kwargs) else: assert actual == expected def run_and_assert( dag: TDag,", "excluded from comparison :param mocker: MockerFixture fixture \"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context", "exclude_cols actual = actual.drop(e_cols, axis=1, errors='ignore') expected = expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected,", "not None: expected = dataset[task.task_id] if isinstance(expected, DataFrame): if task.include_columns: actual = actual[list(task.include_columns)]", "from pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator from", "table will be cleared first, and then after the launch, compare the contents", "str, test_datasets: dict, mocker: MockerFixture, exclude_cols: list = None ): # pylint: disable=inconsistent-return-statements", "disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the task and if it is TBaseOperator then get", "logging from typing import Any, Union import petl from airflow.models import BaseOperator, TaskInstance", "actual = petl.wrap(actual).todataframe() exclude_cols = exclude_cols or [] e_cols = list(task.exclude_columns) + exclude_cols", "def run_and_assert( dag: TDag, task_id: str, test_datasets: dict, mocker: MockerFixture, exclude_cols: list =", "actual == expected def run_and_assert( dag: TDag, task_id: str, test_datasets: dict, mocker: MockerFixture,", "= None, **kwargs ): \"\"\"Run the task, get the result and compare :param", "context: dict = None): \"\"\"Run a task\"\"\" # Removing launch restrictions based on", "the DataFrame from the BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task, context) return task.read_result(context)", "data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task:", "actual = run_and_read(task=task, context=context) if actual is not None: expected = dataset[task.task_id] if", "dataset[task.task_id] if isinstance(expected, DataFrame): if task.include_columns: actual = actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if", "expected = expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs) else: assert actual == expected", "context: dict = None) -> DataFrame: \"\"\"Run the task and return the DataFrame", "assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator], context: dict = None): \"\"\"Run a task\"\"\" #", "import assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator], context: dict = None): \"\"\"Run a task\"\"\"", "def mock_task_inputs(task, dataset, mocker): for i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def", ":param task_id: Id of the running task :param test_datasets: Dictionary with examples :param", "Union[TBaseOperator, BaseOperator], context: dict = None): \"\"\"Run a task\"\"\" # Removing launch restrictions", "Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\" task: TBaseOperator = dag.task_dict[task_id]", "and compare :param task: Id of the running task :param dataset: Dictionary with", "TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator],", "if isinstance(expected, DataFrame): if task.include_columns: actual = actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if isinstance(actual,", "in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict = None)", ":param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\" task: TBaseOperator", "def run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset: dict[str, Any], mocker: MockerFixture = None, exclude_cols:", "mock_task_inputs(task, dataset, mocker) actual = run_and_read(task=task, context=context) if actual is not None: expected", "of previous operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset,", "uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict =", "BaseOperator], context: dict = None): \"\"\"Run a task\"\"\" # Removing launch restrictions based", "target table will be cleared first, and then after the launch, compare the", "BaseOperator], context: dict = None) -> DataFrame: \"\"\"Run the task and return the", "= None) -> DataFrame: \"\"\"Run the task and return the DataFrame from the", ":param mocker: MockerFixture fixture \"\"\" task: TBaseOperator = dag.task_dict[task_id] run_and_assert_task(task, dataset=test_datasets, mocker=mocker, exclude_cols=exclude_cols)", "= None ): # pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the task and if", "task :param dataset: Dictionary with comparison examples. Output and input datasets are needed.", "are needed. :param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\"", "creating DAG tests \"\"\" import logging from typing import Any, Union import petl", "the example :param dag: TDag :param task_id: Id of the running task :param", "comparison :param mocker: MockerFixture fixture \"\"\" task: TBaseOperator = dag.task_dict[task_id] run_and_assert_task(task, dataset=test_datasets, mocker=mocker,", "DataFrame): if task.include_columns: actual = actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if isinstance(actual, tuple): #", "running task :param dataset: Dictionary with comparison examples. Output and input datasets are", "Run the task and if it is TBaseOperator then get the result and", "the contents of the target table with the example :param dag: TDag :param", "compare the contents of the target table with the example :param dag: TDag", "of the target table with the example :param dag: TDag :param task_id: Id", "Id of the running task :param dataset: Dictionary with comparison examples. Output and", "petl from airflow.models import BaseOperator, TaskInstance from airflow.utils import timezone from pandas import", "and compare it with the example Also if the task is PgReplacePartitions then", "axis=1, errors='ignore') expected = expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs) else: assert actual", "dict, mocker: MockerFixture, exclude_cols: list = None ): # pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task", "after the launch, compare the contents of the target table with the example", "task: Union[TBaseOperator, BaseOperator], dataset: dict[str, Any], mocker: MockerFixture = None, exclude_cols: list =", "the BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task, context) return task.read_result(context) def run_and_assert_task( task:", "Dictionary with examples :param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture", "== expected def run_and_assert( dag: TDag, task_id: str, test_datasets: dict, mocker: MockerFixture, exclude_cols:", "test_datasets: Dictionary with examples :param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture", "Union[TBaseOperator, BaseOperator], dataset: dict[str, Any], mocker: MockerFixture = None, exclude_cols: list = None,", "def run_task(task: Union[TBaseOperator, BaseOperator], context: dict = None): \"\"\"Run a task\"\"\" # Removing", "the status of previous operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def", "example Also if the task is PgReplacePartitions then the target table will be", "a task\"\"\" # Removing launch restrictions based on the status of previous operators", "datasets are needed. :param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture", "then after the launch, compare the contents of the target table with the", "run_and_read(task=task, context=context) if actual is not None: expected = dataset[task.task_id] if isinstance(expected, DataFrame):", "def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict = None) -> DataFrame: \"\"\"Run the task", "if task.include_columns: actual = actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if isinstance(actual, tuple): # pylint:", "the target table will be cleared first, and then after the launch, compare", "MockerFixture = None, exclude_cols: list = None, **kwargs ): \"\"\"Run the task, get", "= None, exclude_cols: list = None, **kwargs ): \"\"\"Run the task, get the", "= dataset[task.task_id] if isinstance(expected, DataFrame): if task.include_columns: actual = actual[list(task.include_columns)] expected = expected[list(task.include_columns)]", "examples. Output and input datasets are needed. :param exclude_cols: Columns excluded from comparison", "task and if it is TBaseOperator then get the result and compare it", "expected = dataset[task.task_id] if isinstance(expected, DataFrame): if task.include_columns: actual = actual[list(task.include_columns)] expected =", "if the task is PgReplacePartitions then the target table will be cleared first,", "BaseOperator], dataset: dict[str, Any], mocker: MockerFixture = None, exclude_cols: list = None, **kwargs", "if mocker and dataset: mock_task_inputs(task, dataset, mocker) actual = run_and_read(task=task, context=context) if actual", "run_task(task: Union[TBaseOperator, BaseOperator], context: dict = None): \"\"\"Run a task\"\"\" # Removing launch", "if isinstance(actual, tuple): # pylint: disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols = exclude_cols or", "= actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if isinstance(actual, tuple): # pylint: disable=no-member actual =", "axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs) else: assert actual == expected def run_and_assert( dag:", "TaskInstance from airflow.utils import timezone from pandas import DataFrame from pytest_mock.plugin import MockerFixture", "the running task :param test_datasets: Dictionary with examples :param exclude_cols: Columns excluded from", "dataset, mocker) actual = run_and_read(task=task, context=context) if actual is not None: expected =", "PgReplacePartitions then the target table will be cleared first, and then after the", "Dictionary with comparison examples. Output and input datasets are needed. :param exclude_cols: Columns", "list(task.exclude_columns) + exclude_cols actual = actual.drop(e_cols, axis=1, errors='ignore') expected = expected.drop(e_cols, axis=1, errors='ignore')", "running task :param test_datasets: Dictionary with examples :param exclude_cols: Columns excluded from comparison", "MockerFixture fixture \"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context() if mocker and", "from typing import Any, Union import petl from airflow.models import BaseOperator, TaskInstance from", "mock_task_inputs(task, dataset, mocker): for i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task:", "for i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator], context:", "compare :param task: Id of the running task :param dataset: Dictionary with comparison", "task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset, mocker): for i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read", "= expected[list(task.include_columns)] if isinstance(actual, tuple): # pylint: disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols =", "run_and_assert( dag: TDag, task_id: str, test_datasets: dict, mocker: MockerFixture, exclude_cols: list = None", "import TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator], context: dict =", "return the DataFrame from the BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task, context) return", "result and compare :param task: Id of the running task :param dataset: Dictionary", "Also if the task is PgReplacePartitions then the target table will be cleared", "input datasets are needed. :param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture", "logging.info(f'Running task {task.task_id}') run_task(task, context) return task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset:", "compare it with the example Also if the task is PgReplacePartitions then the", "task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset, mocker): for i,", "and then after the launch, compare the contents of the target table with", "exclude_cols or [] e_cols = list(task.exclude_columns) + exclude_cols actual = actual.drop(e_cols, axis=1, errors='ignore')", "from comparison :param mocker: MockerFixture fixture \"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context =", "# Removing launch restrictions based on the status of previous operators task.trigger_rule =", ":param dataset: Dictionary with comparison examples. Output and input datasets are needed. :param", "with the example Also if the task is PgReplacePartitions then the target table", "# pylint: disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols = exclude_cols or [] e_cols =", "import Any, Union import petl from airflow.models import BaseOperator, TaskInstance from airflow.utils import", "Any, Union import petl from airflow.models import BaseOperator, TaskInstance from airflow.utils import timezone", "petl.wrap(actual).todataframe() exclude_cols = exclude_cols or [] e_cols = list(task.exclude_columns) + exclude_cols actual =", "cleared first, and then after the launch, compare the contents of the target", "None ): # pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the task and if it", "= petl.wrap(actual).todataframe() exclude_cols = exclude_cols or [] e_cols = list(task.exclude_columns) + exclude_cols actual", "task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict = None) -> DataFrame:", "be cleared first, and then after the launch, compare the contents of the", "disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols = exclude_cols or [] e_cols = list(task.exclude_columns) +", "comparison examples. Output and input datasets are needed. :param exclude_cols: Columns excluded from", "exclude_cols: list = None ): # pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the task", "task.post_execute(context) def mock_task_inputs(task, dataset, mocker): for i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id])", "if it is TBaseOperator then get the result and compare it with the", "the task and return the DataFrame from the BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}')", "= run_and_read(task=task, context=context) if actual is not None: expected = dataset[task.task_id] if isinstance(expected,", "MockerFixture, exclude_cols: list = None ): # pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the", "isinstance(expected, DataFrame): if task.include_columns: actual = actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if isinstance(actual, tuple):", "Output and input datasets are needed. :param exclude_cols: Columns excluded from comparison :param", "): \"\"\"Run the task, get the result and compare :param task: Id of", "is PgReplacePartitions then the target table will be cleared first, and then after", "test_datasets: dict, mocker: MockerFixture, exclude_cols: list = None ): # pylint: disable=inconsistent-return-statements \"\"\"Using", "the example Also if the task is PgReplacePartitions then the target table will", "previous operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset, mocker):", "data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator], context: dict = None): \"\"\"Run a", "dict = None): \"\"\"Run a task\"\"\" # Removing launch restrictions based on the", "expected[list(task.include_columns)] if isinstance(actual, tuple): # pylint: disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols = exclude_cols", "TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator], context: dict = None):", "mocker: MockerFixture, exclude_cols: list = None ): # pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run", "Any], mocker: MockerFixture = None, exclude_cols: list = None, **kwargs ): \"\"\"Run the", "fixture \"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context() if mocker and dataset:", "of the running task :param dataset: Dictionary with comparison examples. Output and input", "mocker) actual = run_and_read(task=task, context=context) if actual is not None: expected = dataset[task.task_id]", "DataFrame: \"\"\"Run the task and return the DataFrame from the BaseResult instance.\"\"\" logging.info(f'Running", "table with the example :param dag: TDag :param task_id: Id of the running", "= exclude_cols or [] e_cols = list(task.exclude_columns) + exclude_cols actual = actual.drop(e_cols, axis=1,", "task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset, mocker): for i, uptask in enumerate(task.upstream_list):", "tuple): # pylint: disable=no-member actual = petl.wrap(actual).todataframe() exclude_cols = exclude_cols or [] e_cols", "TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context() if mocker and dataset: mock_task_inputs(task, dataset, mocker) actual", "actual = actual.drop(e_cols, axis=1, errors='ignore') expected = expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs)", "mocker): for i, uptask in enumerate(task.upstream_list): task.upstream_list[i].result.read = mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator],", "= task_instance.get_template_context() if mocker and dataset: mock_task_inputs(task, dataset, mocker) actual = run_and_read(task=task, context=context)", "first, and then after the launch, compare the contents of the target table", "exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\" task_instance = TaskInstance(task=task,", ":param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\" task_instance =", "None: expected = dataset[task.task_id] if isinstance(expected, DataFrame): if task.include_columns: actual = actual[list(task.include_columns)] expected", ":param dag: TDag :param task_id: Id of the running task :param test_datasets: Dictionary", "dataset: dict[str, Any], mocker: MockerFixture = None, exclude_cols: list = None, **kwargs ):", "from pandas import DataFrame from pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator import TDag from", "= 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset, mocker): for i, uptask", "\"\"\" import logging from typing import Any, Union import petl from airflow.models import", "import petl from airflow.models import BaseOperator, TaskInstance from airflow.utils import timezone from pandas", "DAG tests \"\"\" import logging from typing import Any, Union import petl from", "expected, **kwargs) else: assert actual == expected def run_and_assert( dag: TDag, task_id: str,", "then get the result and compare it with the example Also if the", "from data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal def", "TDag, task_id: str, test_datasets: dict, mocker: MockerFixture, exclude_cols: list = None ): #", "DataFrame from the BaseResult instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task, context) return task.read_result(context) def", "launch, compare the contents of the target table with the example :param dag:", "task.include_columns: actual = actual[list(task.include_columns)] expected = expected[list(task.include_columns)] if isinstance(actual, tuple): # pylint: disable=no-member", "airflow.models import BaseOperator, TaskInstance from airflow.utils import timezone from pandas import DataFrame from", "and dataset: mock_task_inputs(task, dataset, mocker) actual = run_and_read(task=task, context=context) if actual is not", "dict[str, Any], mocker: MockerFixture = None, exclude_cols: list = None, **kwargs ): \"\"\"Run", "restrictions based on the status of previous operators task.trigger_rule = 'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context)", "the launch, compare the contents of the target table with the example :param", "import logging from typing import Any, Union import petl from airflow.models import BaseOperator,", "with comparison examples. Output and input datasets are needed. :param exclude_cols: Columns excluded", "the task, get the result and compare :param task: Id of the running", "tests \"\"\" import logging from typing import Any, Union import petl from airflow.models", "Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\" task_instance = TaskInstance(task=task, execution_date=timezone.utcnow())", "task_id: str, test_datasets: dict, mocker: MockerFixture, exclude_cols: list = None ): # pylint:", "example :param dag: TDag :param task_id: Id of the running task :param test_datasets:", "pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator import TDag from data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions", "task: Id of the running task :param dataset: Dictionary with comparison examples. Output", "**kwargs) else: assert actual == expected def run_and_assert( dag: TDag, task_id: str, test_datasets:", "<reponame>dmitriy-e/metadata-governance \"\"\"Helper for creating DAG tests \"\"\" import logging from typing import Any,", "{task.task_id}') run_task(task, context) return task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator, BaseOperator], dataset: dict[str, Any],", "needed. :param exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\" task_instance", "dict = None) -> DataFrame: \"\"\"Run the task and return the DataFrame from", "\"\"\"Run a task\"\"\" # Removing launch restrictions based on the status of previous", "the running task :param dataset: Dictionary with comparison examples. Output and input datasets", "task\"\"\" # Removing launch restrictions based on the status of previous operators task.trigger_rule", "instance.\"\"\" logging.info(f'Running task {task.task_id}') run_task(task, context) return task.read_result(context) def run_and_assert_task( task: Union[TBaseOperator, BaseOperator],", "assert actual == expected def run_and_assert( dag: TDag, task_id: str, test_datasets: dict, mocker:", "TBaseOperator then get the result and compare it with the example Also if", "get the result and compare it with the example Also if the task", "dataset: mock_task_inputs(task, dataset, mocker) actual = run_and_read(task=task, context=context) if actual is not None:", "): # pylint: disable=inconsistent-return-statements \"\"\"Using run_and_assert_task Run the task and if it is", "assert_frame_equal(actual, expected, **kwargs) else: assert actual == expected def run_and_assert( dag: TDag, task_id:", "for creating DAG tests \"\"\" import logging from typing import Any, Union import", "the result and compare :param task: Id of the running task :param dataset:", "airflow.utils import timezone from pandas import DataFrame from pytest_mock.plugin import MockerFixture from data_detective_airflow.dag_generator", "exclude_cols: Columns excluded from comparison :param mocker: MockerFixture fixture \"\"\" task: TBaseOperator =", "\"\"\"Run the task and return the DataFrame from the BaseResult instance.\"\"\" logging.info(f'Running task", "context=context) if actual is not None: expected = dataset[task.task_id] if isinstance(expected, DataFrame): if", "target table with the example :param dag: TDag :param task_id: Id of the", "'dummy' task.render_template_fields(task.generate_context()) task.pre_execute(context) task.execute(context) task.post_execute(context) def mock_task_inputs(task, dataset, mocker): for i, uptask in", "from airflow.utils import timezone from pandas import DataFrame from pytest_mock.plugin import MockerFixture from", "mocker.MagicMock(return_value=dataset[uptask.task_id]) def run_and_read(task: Union[TBaseOperator, BaseOperator], context: dict = None) -> DataFrame: \"\"\"Run the", "expected.drop(e_cols, axis=1, errors='ignore') assert_frame_equal(actual, expected, **kwargs) else: assert actual == expected def run_and_assert(", "task_instance = TaskInstance(task=task, execution_date=timezone.utcnow()) context = task_instance.get_template_context() if mocker and dataset: mock_task_inputs(task, dataset,", "it is TBaseOperator then get the result and compare it with the example", "data_detective_airflow.operators.tbaseoperator import TBaseOperator from data_detective_airflow.test_utilities.assertions import assert_frame_equal def run_task(task: Union[TBaseOperator, BaseOperator], context: dict", "\"\"\"Helper for creating DAG tests \"\"\" import logging from typing import Any, Union" ]
[ "@app.post(\"/token\", response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user =", ") class L(BaseModel): username: str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async def index(request: Request,", "\"bearer\"} async def get_pages(user: User): global INIT_PERMISSION app = settings.app # 初始化permission if", "from datetime import timedelta from fastapi import Form from fastapi.security import OAuth2PasswordRequestForm from", "{\"access_token\": access_token, \"token_type\": \"bearer\"} async def get_pages(user: User): global INIT_PERMISSION app = settings.app", "tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\"", "username: str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async def index(request: Request, u: L): user", "= await authenticate_user(u.username, u.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or", "detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token(", "\"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async def login(form_data:", "HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token = create_access_token( data={\"sub\":", "or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token = create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return", "{\"request\": request, }, ) class L(BaseModel): username: str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async", "Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async def get_site(user: User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param", "starlette import status from starlette.requests import Request from fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends", "= False @app.post(\"/token\", response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\"", "= await authenticate_user(form_data.username, form_data.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or", "fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends import get_current_active_user from fast_tmp.func import get_site_from_permissionschema, init_permission from", "from fastapi import Depends, HTTPException from pydantic import BaseModel from starlette import status", "import Form from fastapi.security import OAuth2PasswordRequestForm from fastapi import Depends, HTTPException from pydantic", "response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user = await", "import get_site_from_permissionschema, init_permission from fast_tmp.models import Permission, User from fast_tmp.amis_router import AmisRouter from", "password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async def index(request: Request, u: L): user = await", "password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token = create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\":", "form_data.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"},", "Permission.all())) INIT_PERMISSION = True permissions = await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\",", "user: raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk},", "def get_site(user: User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user: :return: \"\"\" global INIT_PERMISSION", "data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} async def", "user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return [site] else: return", "from fast_tmp.func import get_site_from_permissionschema, init_permission from fast_tmp.models import Permission, User from fast_tmp.amis_router import", "create_access_token from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"])", "fast_tmp.conf import settings from fast_tmp.depends import authenticate_user from fast_tmp.responses import Success, LoginError from", "@app.post(\"/get-token\") async def login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\" user = await authenticate_user(form_data.username, form_data.password)", "获取左侧导航栏 :param user: :return: \"\"\" global INIT_PERMISSION app = settings.app # 初始化permission if", "User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user: :return: \"\"\" global INIT_PERMISSION app =", "INIT_PERMISSION = True permissions = await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser)", "fast_tmp.models import Permission, User from fast_tmp.amis_router import AmisRouter from fast_tmp.conf import settings from", "from fast_tmp.templates_app import templates from fast_tmp.utils.token import create_access_token from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES", "import templates from fast_tmp.utils.token import create_access_token from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA", "INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION = True permissions = await user.perms site", "status from starlette.requests import Request from fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends import get_current_active_user", "= settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse) async", "return {\"access_token\": access_token, \"token_type\": \"bearer\"} async def get_pages(user: User): global INIT_PERMISSION app =", "headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token = create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token})", "timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token,", "user: :return: \"\"\" global INIT_PERMISSION app = settings.app # 初始化permission if not INIT_PERMISSION:", "access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async def login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\" user =", "return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, }, ) class L(BaseModel): username: str password: <PASSWORD>", "import Permission, User from fast_tmp.amis_router import AmisRouter from fast_tmp.conf import settings from fast_tmp.depends", "expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} async def get_pages(user: User): global INIT_PERMISSION", "Depends, HTTPException from pydantic import BaseModel from starlette import status from starlette.requests import", "password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\":", "detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token = create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA)", "permissions, \"\", user.is_superuser) if site: return [site] else: return [] @app.get(\"/index\", summary=\"主页面\") async", "import OAuth2PasswordRequestForm from fastapi import Depends, HTTPException from pydantic import BaseModel from starlette", "from fast_tmp.depends import authenticate_user from fast_tmp.responses import Success, LoginError from fast_tmp.templates_app import templates", "return [] @app.get(\"/index\", summary=\"主页面\") async def index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request,", "create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async def get_site(user:", "get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return {\"pages\": [site]} else: return {\"pages\": []}", "import LoginR from fast_tmp.depends import get_current_active_user from fast_tmp.func import get_site_from_permissionschema, init_permission from fast_tmp.models", "from fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends import get_current_active_user from fast_tmp.func import get_site_from_permissionschema, init_permission", "import Request from fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends import get_current_active_user from fast_tmp.func import", "not user: raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\":", "from fast_tmp.utils.token import create_access_token from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app =", "async def get_pages(user: User): global INIT_PERMISSION app = settings.app # 初始化permission if not", "import BaseModel from starlette import status from starlette.requests import Request from fast_tmp.apps.api.schemas import", "not INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION = True permissions = await user.perms", "user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async def login(form_data: LoginR):", "fastapi import Form from fastapi.security import OAuth2PasswordRequestForm from fastapi import Depends, HTTPException from", "from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION", ") return {\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async def login(form_data: LoginR): \"\"\" 标准的请求接口", "authenticate_user from fast_tmp.responses import Success, LoginError from fast_tmp.templates_app import templates from fast_tmp.utils.token import", "import status from starlette.requests import Request from fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends import", "site: return [site] else: return [] @app.get(\"/index\", summary=\"主页面\") async def index(request: Request): return", "timedelta from fastapi import Form from fastapi.security import OAuth2PasswordRequestForm from fastapi import Depends,", "Success, LoginError from fast_tmp.templates_app import templates from fast_tmp.utils.token import create_access_token from fastapi.responses import", "# 初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION = True permissions", "init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION = True permissions = await user.perms site = get_site_from_permissionschema(app.site_schema,", "index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, }, ) class L(BaseModel): username: str", "not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires", "\"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} async def get_pages(user: User):", "INIT_PERMISSION app = settings.app # 初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all()))", ") access_token = create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\")", "= timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\":", "{\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async def login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\" user", "async def index(request: Request, u: L): user = await authenticate_user(u.username, u.password) if not", "await authenticate_user(form_data.username, form_data.password) if not user: raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token =", ") access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires )", "Form from fastapi.security import OAuth2PasswordRequestForm from fastapi import Depends, HTTPException from pydantic import", "= True permissions = await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if", "Request from fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends import get_current_active_user from fast_tmp.func import get_site_from_permissionschema,", "def login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not", "u.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"},", "from starlette.requests import Request from fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends import get_current_active_user from", "fast_tmp.responses import Success, LoginError from fast_tmp.templates_app import templates from fast_tmp.utils.token import create_access_token from", "fast_tmp.templates_app import templates from fast_tmp.utils.token import create_access_token from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES =", "<PASSWORD> @app.post(\"/index\", summary=\"登录\") async def index(request: Request, u: L): user = await authenticate_user(u.username,", "LoginR from fast_tmp.depends import get_current_active_user from fast_tmp.func import get_site_from_permissionschema, init_permission from fast_tmp.models import", "Request, u: L): user = await authenticate_user(u.username, u.password) if not user: raise HTTPException(", "import timedelta from fastapi import Form from fastapi.security import OAuth2PasswordRequestForm from fastapi import", "async def get_site(user: User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user: :return: \"\"\" global", "仅用于docs页面测试返回用 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED,", "[] @app.get(\"/index\", summary=\"主页面\") async def index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, },", "return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async def get_site(user: User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏", "import Success, LoginError from fast_tmp.templates_app import templates from fast_tmp.utils.token import create_access_token from fastapi.responses", "LoginR): \"\"\" 标准的请求接口 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user: raise", "from fastapi.security import OAuth2PasswordRequestForm from fastapi import Depends, HTTPException from pydantic import BaseModel", "\"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect", "not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token", "user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async def", "summary=\"主页面\") async def index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, }, ) class", "def index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, }, ) class L(BaseModel): username:", "get_site(user: User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user: :return: \"\"\" global INIT_PERMISSION app", "from fastapi import Form from fastapi.security import OAuth2PasswordRequestForm from fastapi import Depends, HTTPException", "User from fast_tmp.amis_router import AmisRouter from fast_tmp.conf import settings from fast_tmp.depends import authenticate_user", "\"token_type\": \"bearer\"} @app.post(\"/get-token\") async def login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\" user = await", "app = settings.app # 初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION", "from pydantic import BaseModel from starlette import status from starlette.requests import Request from", "import get_current_active_user from fast_tmp.func import get_site_from_permissionschema, init_permission from fast_tmp.models import Permission, User from", "raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token = create_access_token(", "from fast_tmp.depends import get_current_active_user from fast_tmp.func import get_site_from_permissionschema, init_permission from fast_tmp.models import Permission,", "= await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return {\"pages\":", "fast_tmp.depends import get_current_active_user from fast_tmp.func import get_site_from_permissionschema, init_permission from fast_tmp.models import Permission, User", "fastapi.security import OAuth2PasswordRequestForm from fastapi import Depends, HTTPException from pydantic import BaseModel from", "\"bearer\"} @app.post(\"/get-token\") async def login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\" user = await authenticate_user(form_data.username,", "permissions = await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return", "create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\")", "user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return {\"pages\": [site]} else:", "HTTPException from pydantic import BaseModel from starlette import status from starlette.requests import Request", "login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if", "data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async", "LoginError from fast_tmp.templates_app import templates from fast_tmp.utils.token import create_access_token from fastapi.responses import JSONResponse", "= await authenticate_user(form_data.username, form_data.password) if not user: raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token", "templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, }, ) class L(BaseModel): username: str password: <PASSWORD> @app.post(\"/index\",", "user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires =", "datetime import timedelta from fastapi import Form from fastapi.security import OAuth2PasswordRequestForm from fastapi", "async def login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user = await authenticate_user(form_data.username,", "True permissions = await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site:", "import settings from fast_tmp.depends import authenticate_user from fast_tmp.responses import Success, LoginError from fast_tmp.templates_app", "username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\":", "= Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user: :return: \"\"\" global INIT_PERMISSION app = settings.app", "init_permission from fast_tmp.models import Permission, User from fast_tmp.amis_router import AmisRouter from fast_tmp.conf import", "summary=\"获取目录\") async def get_site(user: User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user: :return: \"\"\"", "get_current_active_user from fast_tmp.func import get_site_from_permissionschema, init_permission from fast_tmp.models import Permission, User from fast_tmp.amis_router", "u: L): user = await authenticate_user(u.username, u.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED,", "= Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user:", "get_pages(user: User): global INIT_PERMISSION app = settings.app # 初始化permission if not INIT_PERMISSION: await", "data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async def get_site(user: User", "fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION =", "expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async def login(form_data: LoginR): \"\"\"", "from starlette import status from starlette.requests import Request from fast_tmp.apps.api.schemas import LoginR from", "user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} async def get_pages(user: User): global", "await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return {\"pages\": [site]}", "pydantic import BaseModel from starlette import status from starlette.requests import Request from fast_tmp.apps.api.schemas", "False @app.post(\"/token\", response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user", "settings.app # 初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION = True", "= AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm", "from fast_tmp.amis_router import AmisRouter from fast_tmp.conf import settings from fast_tmp.depends import authenticate_user from", "headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk},", "\"Bearer\"}, ) access_token = create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token}) @app.get(\"/site\",", "AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm =", ") return {\"access_token\": access_token, \"token_type\": \"bearer\"} async def get_pages(user: User): global INIT_PERMISSION app", "user.is_superuser) if site: return [site] else: return [] @app.get(\"/index\", summary=\"主页面\") async def index(request:", "from fast_tmp.conf import settings from fast_tmp.depends import authenticate_user from fast_tmp.responses import Success, LoginError", "login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user:", "status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token = create_access_token( data={\"sub\": user.username},", "Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user: :return: \"\"\" global INIT_PERMISSION app = settings.app #", "index(request: Request, u: L): user = await authenticate_user(u.username, u.password) if not user: raise", "app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse) async def login(form_data:", "fast_tmp.func import get_site_from_permissionschema, init_permission from fast_tmp.models import Permission, User from fast_tmp.amis_router import AmisRouter", "access_token, \"token_type\": \"bearer\"} async def get_pages(user: User): global INIT_PERMISSION app = settings.app #", "\"token_type\": \"bearer\"} async def get_pages(user: User): global INIT_PERMISSION app = settings.app # 初始化permission", "LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires )", "else: return [] @app.get(\"/index\", summary=\"主页面\") async def index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\":", "get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return [site] else: return [] @app.get(\"/index\", summary=\"主页面\")", "or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username,", "= get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return [site] else: return [] @app.get(\"/index\",", "[site] else: return [] @app.get(\"/index\", summary=\"主页面\") async def index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\",", "status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token =", "access_token = create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async", "@app.post(\"/index\", summary=\"登录\") async def index(request: Request, u: L): user = await authenticate_user(u.username, u.password)", ":return: \"\"\" global INIT_PERMISSION app = settings.app # 初始化permission if not INIT_PERMISSION: await", "user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} async def get_pages(user:", "L): user = await authenticate_user(u.username, u.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect", "User): global INIT_PERMISSION app = settings.app # 初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema,", "templates from fast_tmp.utils.token import create_access_token from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app", "if not user: raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username,", "@app.get(\"/site\", summary=\"获取目录\") async def get_site(user: User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user: :return:", "\"\"\" 获取左侧导航栏 :param user: :return: \"\"\" global INIT_PERMISSION app = settings.app # 初始化permission", "site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return {\"pages\": [site]} else: return", "AmisRouter from fast_tmp.conf import settings from fast_tmp.depends import authenticate_user from fast_tmp.responses import Success,", "form_data.password) if not user: raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\":", "str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async def index(request: Request, u: L): user =", "request, }, ) class L(BaseModel): username: str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async def", "import AmisRouter from fast_tmp.conf import settings from fast_tmp.depends import authenticate_user from fast_tmp.responses import", "= settings.app # 初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION =", "HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token", "from fast_tmp.models import Permission, User from fast_tmp.amis_router import AmisRouter from fast_tmp.conf import settings", "prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm = Depends()):", "from fast_tmp.responses import Success, LoginError from fast_tmp.templates_app import templates from fast_tmp.utils.token import create_access_token", "Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user: raise", "access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\":", "access_token}) @app.get(\"/site\", summary=\"获取目录\") async def get_site(user: User = Depends(get_current_active_user)): \"\"\" 获取左侧导航栏 :param user:", "await authenticate_user(u.username, u.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\",", "if not INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION = True permissions = await", "= await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return [site]", "async def index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, }, ) class L(BaseModel):", "fast_tmp.amis_router import AmisRouter from fast_tmp.conf import settings from fast_tmp.depends import authenticate_user from fast_tmp.responses", "user = await authenticate_user(u.username, u.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username", "= create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"}", "authenticate_user(form_data.username, form_data.password) if not user: raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token(", "summary=\"登录\") async def index(request: Request, u: L): user = await authenticate_user(u.username, u.password) if", "await authenticate_user(form_data.username, form_data.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\",", "list(await Permission.all())) INIT_PERMISSION = True permissions = await user.perms site = get_site_from_permissionschema(app.site_schema, permissions,", "}, ) class L(BaseModel): username: str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async def index(request:", "import Depends, HTTPException from pydantic import BaseModel from starlette import status from starlette.requests", "OAuth2PasswordRequestForm from fastapi import Depends, HTTPException from pydantic import BaseModel from starlette import", "return [site] else: return [] @app.get(\"/index\", summary=\"主页面\") async def index(request: Request): return templates.TemplateResponse(", "fast_tmp.utils.token import create_access_token from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\",", "if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, )", "L(BaseModel): username: str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async def index(request: Request, u: L):", "INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse) async def login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用", "user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token =", "def login(form_data: OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user = await authenticate_user(form_data.username, form_data.password)", "= get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return {\"pages\": [site]} else: return {\"pages\":", "global INIT_PERMISSION app = settings.app # 初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema, list(await", "site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return [site] else: return []", "if site: return [site] else: return [] @app.get(\"/index\", summary=\"主页面\") async def index(request: Request):", "BaseModel from starlette import status from starlette.requests import Request from fast_tmp.apps.api.schemas import LoginR", "starlette.requests import Request from fast_tmp.apps.api.schemas import LoginR from fast_tmp.depends import get_current_active_user from fast_tmp.func", ") return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async def get_site(user: User = Depends(get_current_active_user)): \"\"\"", "Permission, User from fast_tmp.amis_router import AmisRouter from fast_tmp.conf import settings from fast_tmp.depends import", "Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, }, ) class L(BaseModel): username: str password:", "raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)", "settings from fast_tmp.depends import authenticate_user from fast_tmp.responses import Success, LoginError from fast_tmp.templates_app import", "await user.perms site = get_site_from_permissionschema(app.site_schema, permissions, \"\", user.is_superuser) if site: return [site] else:", "authenticate_user(u.username, u.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\":", "fast_tmp.depends import authenticate_user from fast_tmp.responses import Success, LoginError from fast_tmp.templates_app import templates from", "标准的请求接口 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user: raise LoginError() access_token_expires", "return {\"access_token\": access_token, \"token_type\": \"bearer\"} @app.post(\"/get-token\") async def login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\"", "create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return {\"access_token\": access_token, \"token_type\": \"bearer\"} async", "ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse)", "\"\"\" 仅用于docs页面测试返回用 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user: raise HTTPException(", "import create_access_token from fastapi.responses import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\",", "\"gh-pages/index.html\", {\"request\": request, }, ) class L(BaseModel): username: str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\")", "JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\",", "access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires ) return", "raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires", "fastapi import Depends, HTTPException from pydantic import BaseModel from starlette import status from", "def get_pages(user: User): global INIT_PERMISSION app = settings.app # 初始化permission if not INIT_PERMISSION:", "\"\"\" global INIT_PERMISSION app = settings.app # 初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema,", "\"Bearer\"}, ) access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) access_token = create_access_token( data={\"sub\": user.username, \"id\": user.pk}, expires_delta=access_token_expires", "def index(request: Request, u: L): user = await authenticate_user(u.username, u.password) if not user:", "user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async def get_site(user: User =", "OAuth2PasswordRequestForm = Depends()): \"\"\" 仅用于docs页面测试返回用 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not", "\"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user: raise LoginError() access_token_expires =", "user = await authenticate_user(form_data.username, form_data.password) if not user: raise LoginError() access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)", ":param user: :return: \"\"\" global INIT_PERMISSION app = settings.app # 初始化permission if not", "username or password\", headers={\"WWW-Authenticate\": \"Bearer\"}, ) access_token = create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) )", "expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async def get_site(user: User = Depends(get_current_active_user)):", "await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION = True permissions = await user.perms site =", "初始化permission if not INIT_PERMISSION: await init_permission(app.site_schema, list(await Permission.all())) INIT_PERMISSION = True permissions =", "settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False @app.post(\"/token\", response_class=JSONResponse) async def", "\"\"\" 标准的请求接口 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if not user: raise LoginError()", "\"\", user.is_superuser) if site: return [site] else: return [] @app.get(\"/index\", summary=\"主页面\") async def", "authenticate_user(form_data.username, form_data.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username or password\", headers={\"WWW-Authenticate\":", "@app.get(\"/index\", summary=\"主页面\") async def index(request: Request): return templates.TemplateResponse( \"gh-pages/index.html\", {\"request\": request, }, )", "get_site_from_permissionschema, init_permission from fast_tmp.models import Permission, User from fast_tmp.amis_router import AmisRouter from fast_tmp.conf", "import authenticate_user from fast_tmp.responses import Success, LoginError from fast_tmp.templates_app import templates from fast_tmp.utils.token", "class L(BaseModel): username: str password: <PASSWORD> @app.post(\"/index\", summary=\"登录\") async def index(request: Request, u:", "= create_access_token( data={\"sub\": user.username}, expires_delta=timedelta(minutes=settings.EXPIRES_DELTA) ) return Success(data={\"access_token\": access_token}) @app.get(\"/site\", summary=\"获取目录\") async def", "import JSONResponse ACCESS_TOKEN_EXPIRE_MINUTES = settings.EXPIRES_DELTA app = AmisRouter(title=\"fast_tmp\", prefix=\"/auth\", tags=[\"auth\"]) INIT_PERMISSION = False", "user = await authenticate_user(form_data.username, form_data.password) if not user: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail=\"Incorrect username", "async def login(form_data: LoginR): \"\"\" 标准的请求接口 \"\"\" user = await authenticate_user(form_data.username, form_data.password) if" ]
[ "#fraction of entire training period over which the exploration rate is annealed exploration_final_eps=0.02", "buffer_size = 50000 #size of the replay buffer exploration_fraction=0.1 #fraction of entire training", "= gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq,", "#size of the replay buffer exploration_fraction=0.1 #fraction of entire training period over which", "the exploration rate is annealed exploration_final_eps=0.02 #final value of random action probability exploration_initial_eps=1.0", "optimizer buffer_size = 50000 #size of the replay buffer exploration_fraction=0.1 #fraction of entire", "learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") #", "batched sampled from replay buffer for training double_q=True #whether to enable Double-Q learning", "before learning starts timesteps = 1000#2000 verbose = 1 env = gym.make('Boxoban-Train-v1') model", "random action probability train_freq=1 #update the model every train_freq steps. set to None", "env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps)", "every train_freq steps. set to None to disable printing batch_size=32 #size of a", "learning or not. learning_starts=100 #how many steps of the model to collect transitions", "is annealed exploration_final_eps=0.02 #final value of random action probability exploration_initial_eps=1.0 #initial value of", "learning_starts=100 #how many steps of the model to collect transitions for before learning", "stable_baselines.common.vec_env import DummyVecEnv from stable_baselines.deepq.policies import MlpPolicy from stable_baselines import DQN def run():", "of random action probability train_freq=1 #update the model every train_freq steps. set to", "exploration_initial_eps=1.0 #initial value of random action probability train_freq=1 #update the model every train_freq", "steps of the model to collect transitions for before learning starts timesteps =", "to enable Double-Q learning or not. learning_starts=100 #how many steps of the model", "printing batch_size=32 #size of a batched sampled from replay buffer for training double_q=True", "set to None to disable printing batch_size=32 #size of a batched sampled from", "Enjoy trained agent obs = env.reset() print(model.action_probability(obs)) while True: action, _states = model.predict(obs)", "from stable_baselines.deepq.policies import MlpPolicy from stable_baselines import DQN def run(): # hyperparameters gamma", "of the model to collect transitions for before learning starts timesteps = 1000#2000", "#initial value of random action probability train_freq=1 #update the model every train_freq steps.", "many steps of the model to collect transitions for before learning starts timesteps", "enable Double-Q learning or not. learning_starts=100 #how many steps of the model to", "gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\")", "sampled from replay buffer for training double_q=True #whether to enable Double-Q learning or", "#update the model every train_freq steps. set to None to disable printing batch_size=32", "import DQN def run(): # hyperparameters gamma = 0.99 #discount factor learning_rate =", "model.predict(obs) obs, rewards, done, info = env.step(action) env.render() if __name__ == \"__main__\": run()", "def run(): # hyperparameters gamma = 0.99 #discount factor learning_rate = 0.00025 #learning", "annealed exploration_final_eps=0.02 #final value of random action probability exploration_initial_eps=1.0 #initial value of random", "buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy", "from replay buffer for training double_q=True #whether to enable Double-Q learning or not.", "buffer exploration_fraction=0.1 #fraction of entire training period over which the exploration rate is", "import gym_sokoban from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines.deepq.policies import MlpPolicy from stable_baselines import", "# hyperparameters gamma = 0.99 #discount factor learning_rate = 0.00025 #learning rate for", "double_q=True #whether to enable Double-Q learning or not. learning_starts=100 #how many steps of", "#size of a batched sampled from replay buffer for training double_q=True #whether to", "for adam optimizer buffer_size = 50000 #size of the replay buffer exploration_fraction=0.1 #fraction", "probability exploration_initial_eps=1.0 #initial value of random action probability train_freq=1 #update the model every", "trained agent obs = env.reset() print(model.action_probability(obs)) while True: action, _states = model.predict(obs) obs,", "exploration_fraction=0.1 #fraction of entire training period over which the exploration rate is annealed", "of the replay buffer exploration_fraction=0.1 #fraction of entire training period over which the", "the replay buffer exploration_fraction=0.1 #fraction of entire training period over which the exploration", "random action probability exploration_initial_eps=1.0 #initial value of random action probability train_freq=1 #update the", "= env.reset() print(model.action_probability(obs)) while True: action, _states = model.predict(obs) obs, rewards, done, info", "stable_baselines.deepq.policies import MlpPolicy from stable_baselines import DQN def run(): # hyperparameters gamma =", "a batched sampled from replay buffer for training double_q=True #whether to enable Double-Q", "exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent", "model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent obs = env.reset() print(model.action_probability(obs)) while True: action,", "the model every train_freq steps. set to None to disable printing batch_size=32 #size", "1000#2000 verbose = 1 env = gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate,", "= 0.00025 #learning rate for adam optimizer buffer_size = 50000 #size of the", "exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent obs", "while True: action, _states = model.predict(obs) obs, rewards, done, info = env.step(action) env.render()", "to collect transitions for before learning starts timesteps = 1000#2000 verbose = 1", "timesteps = 1000#2000 verbose = 1 env = gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env,", "run(): # hyperparameters gamma = 0.99 #discount factor learning_rate = 0.00025 #learning rate", "#how many steps of the model to collect transitions for before learning starts", "stable_baselines import DQN def run(): # hyperparameters gamma = 0.99 #discount factor learning_rate", "over which the exploration rate is annealed exploration_final_eps=0.02 #final value of random action", "replay buffer for training double_q=True #whether to enable Double-Q learning or not. learning_starts=100", "= 0.99 #discount factor learning_rate = 0.00025 #learning rate for adam optimizer buffer_size", "exploration_final_eps=0.02 #final value of random action probability exploration_initial_eps=1.0 #initial value of random action", "= 1000#2000 verbose = 1 env = gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env, gamma=gamma,", "verbose = 1 env = gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size,", "buffer for training double_q=True #whether to enable Double-Q learning or not. learning_starts=100 #how", "50000 #size of the replay buffer exploration_fraction=0.1 #fraction of entire training period over", "exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained", "True: action, _states = model.predict(obs) obs, rewards, done, info = env.step(action) env.render() if", "period over which the exploration rate is annealed exploration_final_eps=0.02 #final value of random", "of random action probability exploration_initial_eps=1.0 #initial value of random action probability train_freq=1 #update", "env.reset() print(model.action_probability(obs)) while True: action, _states = model.predict(obs) obs, rewards, done, info =", "not. learning_starts=100 #how many steps of the model to collect transitions for before", "import MlpPolicy from stable_baselines import DQN def run(): # hyperparameters gamma = 0.99", "learning starts timesteps = 1000#2000 verbose = 1 env = gym.make('Boxoban-Train-v1') model =", "0.00025 #learning rate for adam optimizer buffer_size = 50000 #size of the replay", "agent obs = env.reset() print(model.action_probability(obs)) while True: action, _states = model.predict(obs) obs, rewards,", "from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines.deepq.policies import MlpPolicy from stable_baselines import DQN def", "train_freq=1 #update the model every train_freq steps. set to None to disable printing", "or not. learning_starts=100 #how many steps of the model to collect transitions for", "collect transitions for before learning starts timesteps = 1000#2000 verbose = 1 env", "probability train_freq=1 #update the model every train_freq steps. set to None to disable", "adam optimizer buffer_size = 50000 #size of the replay buffer exploration_fraction=0.1 #fraction of", "#final value of random action probability exploration_initial_eps=1.0 #initial value of random action probability", "model = DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q,", "gym_sokoban from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines.deepq.policies import MlpPolicy from stable_baselines import DQN", "exploration rate is annealed exploration_final_eps=0.02 #final value of random action probability exploration_initial_eps=1.0 #initial", "model to collect transitions for before learning starts timesteps = 1000#2000 verbose =", "double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent obs = env.reset() print(model.action_probability(obs))", "#learning rate for adam optimizer buffer_size = 50000 #size of the replay buffer", "value of random action probability train_freq=1 #update the model every train_freq steps. set", "hyperparameters gamma = 0.99 #discount factor learning_rate = 0.00025 #learning rate for adam", "DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1)", "factor learning_rate = 0.00025 #learning rate for adam optimizer buffer_size = 50000 #size", "# Enjoy trained agent obs = env.reset() print(model.action_probability(obs)) while True: action, _states =", "model every train_freq steps. set to None to disable printing batch_size=32 #size of", "rate for adam optimizer buffer_size = 50000 #size of the replay buffer exploration_fraction=0.1", "value of random action probability exploration_initial_eps=1.0 #initial value of random action probability train_freq=1", "None to disable printing batch_size=32 #size of a batched sampled from replay buffer", "learning_rate = 0.00025 #learning rate for adam optimizer buffer_size = 50000 #size of", "which the exploration rate is annealed exploration_final_eps=0.02 #final value of random action probability", "env = gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps,", "= 1 env = gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction,", "gym import gym_sokoban from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines.deepq.policies import MlpPolicy from stable_baselines", "gamma = 0.99 #discount factor learning_rate = 0.00025 #learning rate for adam optimizer", "0.99 #discount factor learning_rate = 0.00025 #learning rate for adam optimizer buffer_size =", "training double_q=True #whether to enable Double-Q learning or not. learning_starts=100 #how many steps", "= 50000 #size of the replay buffer exploration_fraction=0.1 #fraction of entire training period", "from stable_baselines import DQN def run(): # hyperparameters gamma = 0.99 #discount factor", "model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent obs = env.reset() print(model.action_probability(obs)) while True: action, _states", "action, _states = model.predict(obs) obs, rewards, done, info = env.step(action) env.render() if __name__", "#discount factor learning_rate = 0.00025 #learning rate for adam optimizer buffer_size = 50000", "replay buffer exploration_fraction=0.1 #fraction of entire training period over which the exploration rate", "action probability train_freq=1 #update the model every train_freq steps. set to None to", "verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent obs = env.reset() print(model.action_probability(obs)) while True:", "transitions for before learning starts timesteps = 1000#2000 verbose = 1 env =", "train_freq steps. set to None to disable printing batch_size=32 #size of a batched", "for training double_q=True #whether to enable Double-Q learning or not. learning_starts=100 #how many", "_states = model.predict(obs) obs, rewards, done, info = env.step(action) env.render() if __name__ ==", "batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent obs = env.reset()", "to None to disable printing batch_size=32 #size of a batched sampled from replay", "for before learning starts timesteps = 1000#2000 verbose = 1 env = gym.make('Boxoban-Train-v1')", "1 env = gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps,", "entire training period over which the exploration rate is annealed exploration_final_eps=0.02 #final value", "= model.predict(obs) obs, rewards, done, info = env.step(action) env.render() if __name__ == \"__main__\":", "learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent obs = env.reset() print(model.action_probability(obs)) while", "disable printing batch_size=32 #size of a batched sampled from replay buffer for training", "gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size,", "print(model.action_probability(obs)) while True: action, _states = model.predict(obs) obs, rewards, done, info = env.step(action)", "DQN def run(): # hyperparameters gamma = 0.99 #discount factor learning_rate = 0.00025", "of a batched sampled from replay buffer for training double_q=True #whether to enable", "MlpPolicy from stable_baselines import DQN def run(): # hyperparameters gamma = 0.99 #discount", "= DQN(MlpPolicy, env, gamma=gamma, learning_rate=learning_rate, buffer_size=buffer_size, exploration_fraction=exploration_fraction, exploration_final_eps=exploration_final_eps, exploration_initial_eps=exploration_initial_eps, train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts,", "DummyVecEnv from stable_baselines.deepq.policies import MlpPolicy from stable_baselines import DQN def run(): # hyperparameters", "training period over which the exploration rate is annealed exploration_final_eps=0.02 #final value of", "Double-Q learning or not. learning_starts=100 #how many steps of the model to collect", "import DummyVecEnv from stable_baselines.deepq.policies import MlpPolicy from stable_baselines import DQN def run(): #", "to disable printing batch_size=32 #size of a batched sampled from replay buffer for", "train_freq=train_freq, batch_size=batch_size, double_q=double_q, learning_starts=learning_starts, verbose=1) model.learn(total_timesteps=timesteps) model.save(\"trained_models/dqn_sokoban_model\") # Enjoy trained agent obs =", "import gym import gym_sokoban from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines.deepq.policies import MlpPolicy from", "obs = env.reset() print(model.action_probability(obs)) while True: action, _states = model.predict(obs) obs, rewards, done,", "the model to collect transitions for before learning starts timesteps = 1000#2000 verbose", "#whether to enable Double-Q learning or not. learning_starts=100 #how many steps of the", "rate is annealed exploration_final_eps=0.02 #final value of random action probability exploration_initial_eps=1.0 #initial value", "starts timesteps = 1000#2000 verbose = 1 env = gym.make('Boxoban-Train-v1') model = DQN(MlpPolicy,", "steps. set to None to disable printing batch_size=32 #size of a batched sampled", "of entire training period over which the exploration rate is annealed exploration_final_eps=0.02 #final", "action probability exploration_initial_eps=1.0 #initial value of random action probability train_freq=1 #update the model", "batch_size=32 #size of a batched sampled from replay buffer for training double_q=True #whether" ]
[ "def SetArm(self, gesture, left): for p in range(0,len(gesture)): id = gesture[p][0] value =", "servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1);", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "time.sleep(0.1) def PrintLeftArmValues(self): for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1)", "left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release() def exit_handler(): tester.Release() servoManager.Release()", "<EMAIL> # # Permission is hereby granted, free of charge, to any person", "\\__/ # # Project website: http://roobert.springwald.de # # ######## # # Arms #", "== False): self._released = True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True);", "self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\" +", "False; servos = LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft();", "left=True); tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start() #while(True): #plus = - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus)", "SharedFloats import SharedFloats from LX16AServos import LX16AServos from SmartServoManager import SmartServoManager import atexit", "reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17,", "#self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print (str(id) +", "# a copy of this software and associated documentation files (the # \"Software\"),", "## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached();", "#tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1, left=", "EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "to # the following conditions: # # The above copyright notice and this", "left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached();", "(the # \"Software\"), to deal in the Software without restriction, including # without", "___/ __/ # _ _, _// /_/ / /_/ / /_/ / __/", "self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self,", "NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "#servoManager.Start() #while(True): #plus = - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1)", "##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left= False);", "#tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached();", "self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5,", "\"Software\"), to deal in the Software without restriction, including # without limitation the", "centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1,", "clear = lambda: os.system('cls' if os.name=='nt' else 'clear') class Arms(): _servoManager = None;", "False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ##", "ramp=0, maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached();", "/ / /_ # /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ # # Project website: http://roobert.springwald.de", "of this software and associated documentation files (the # \"Software\"), to deal in", "in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture, left): for p in range(0,len(gesture)): id", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "for id in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print (str(id) + \":\" +str(value)) value", "# ######## # # Arms # # ######## # # Licensed under MIT", "False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]]", "_stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430):", "gesture, left): for p in range(0,len(gesture)): id = gesture[p][0] value = gesture[p][1] if", "from SharedFloats import SharedFloats from LX16AServos import LX16AServos from SmartServoManager import SmartServoManager import", "|_| \\____/\\____//_.___/\\___//_/ \\__/ # # Project website: http://roobert.springwald.de # # ######## # #", "='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import * from array", "import * from array import array from SharedInts import SharedInts from SharedFloats import", "= [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager", "for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture, left): for p in", "##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left=", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "| <EMAIL> # # Permission is hereby granted, free of charge, to any", "left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True);", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "furnished to do so, subject to # the following conditions: # # The", "os my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from", "second version of home robot project # ________ ______ _____ # ___ __", "tester.Release() servoManager.Release() servos.Release() if __name__ == \"__main__\": atexit.register(exit_handler) ended = False; servos =", "self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False): time.sleep(0.1); def", "left= True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left= False);", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "def PrintRightArmValues(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def", "whom the Software is furnished to do so, subject to # the following", "id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]: value", "#tester.WaitTillTargetsReached(); #while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached();", "left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start() #while(True): #plus = -", "self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15,", "self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545);", "# without limitation the rights to use, copy, modify, merge, publish, # distribute,", "for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus = 100", "Arms # # ######## # # Licensed under MIT License (MIT) # #", "any person obtaining # a copy of this software and associated documentation files", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #", "masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460);", "_released = False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2", "self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release() def exit_handler(): tester.Release() servoManager.Release() servos.Release()", "MirrorRightArmToLeftStart(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id in", "(c) 2018 <NAME> | <EMAIL> # # Permission is hereby granted, free of", "this permission notice shall be # included in all copies or substantial portions", "left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True);", "range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start() #while(True): #plus", "atexit clear = lambda: os.system('cls' if os.name=='nt' else 'clear') class Arms(): _servoManager =", "Permission is hereby granted, free of charge, to any person obtaining # a", "sublicense, and/or sell copies of the Software, and to permit # persons to", "left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1,", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "gesture[p][1] if (left == True): id = id + 10; value = -(value", "- self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id,", "centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495);", "self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6,", "self._servoManager = smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose", "WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False): time.sleep(0.1); def SetHand(self, opened, left): if (left==True): if", "self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329);", "LX16AServos import LX16AServos from SmartServoManager import SmartServoManager import atexit clear = lambda: os.system('cls'", "rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies", "self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459);", "tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus =", "SharedInts import SharedInts from SharedFloats import SharedFloats from LX16AServos import LX16AServos from SmartServoManager", "tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached();", "leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose = leftHandClose", "centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501);", "limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or", "SetArm(self, gesture, left): for p in range(0,len(gesture)): id = gesture[p][0] value = gesture[p][1]", "id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for id", "without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense,", "#self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1,", "self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id)) def", "left=False); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging,", "# ___ __ \\______________ /_______________ /_ # __ /_/ / __ \\ __", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "of the Software, and to permit # persons to whom the Software is", "self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id,", "array import array from SharedInts import SharedInts from SharedFloats import SharedFloats from LX16AServos", "from SharedInts import SharedInts from SharedFloats import SharedFloats from LX16AServos import LX16AServos from", "reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for", "modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and", "tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start() #while(True): #plus = - plus", "/ __ \\ __ \\_ __ \\ _ \\_ ___/ __/ # _", "left): for p in range(0,len(gesture)): id = gesture[p][0] value = gesture[p][1] if (left", "in range(0,len(gesture)): id = gesture[p][0] value = gesture[p][1] if (left == True): id", "\":\" +str(value)) value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self):", "left): if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else:", "# left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13,", "+ \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import * from array import array", "ended = False; servos = LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester =", "import SmartServoManager import atexit clear = lambda: os.system('cls' if os.name=='nt' else 'clear') class", "KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "rightHandOpen self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True);", "PrintRightArmValues(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self):", "copies of the Software, and to permit # persons to whom the Software", "== True): id = id + 10; value = -(value - self._servoManager.GetCenteredValue(id-10)) +", "# \"Software\"), to deal in the Software without restriction, including # without limitation", "# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id in", "left=True); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus", "import SharedInts from SharedFloats import SharedFloats from LX16AServos import LX16AServos from SmartServoManager import", "for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for", "#tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left=", "permit # persons to whom the Software is furnished to do so, subject", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "os.system('cls' if os.name=='nt' else 'clear') class Arms(): _servoManager = None; _released = False;", "#time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True,", "/_/ / /_/ / __/ / / /_ # /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/", "in the Software without restriction, including # without limitation the rights to use,", "else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released == False): self._released", "#tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand,", "from __future__ import division import time, sys, os my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1])", "left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False);", "import division import time, sys, os my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path +", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT", "self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture,", "__ \\______________ /_______________ /_ # __ /_/ / __ \\ __ \\_ __", "this software and associated documentation files (the # \"Software\"), to deal in the", "project # ________ ______ _____ # ___ __ \\______________ /_______________ /_ # __", "MultiProcessing import * from array import array from SharedInts import SharedInts from SharedFloats", "# right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3,", "if __name__ == \"__main__\": atexit.register(exit_handler) ended = False; servos = LX16AServos() servoManager =", "= [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues =", "centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14,", "SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True);", "isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print (str(id)", "self._rightHandOpen = rightHandOpen self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True);", "opened, left): if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen);", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose = rightHandClose", "_wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480,", "while (self._servoManager.allTargetsReached == False): time.sleep(0.1); def SetHand(self, opened, left): if (left==True): if (opened==True):", "= LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues();", "in all copies or substantial portions of the Software. # # THE SOFTWARE", "in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for id in", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "* from array import array from SharedInts import SharedInts from SharedFloats import SharedFloats", "reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm", "lambda: os.system('cls' if os.name=='nt' else 'clear') class Arms(): _servoManager = None; _released =", "range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached();", "if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released == False): self._released =", "#tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False,", "self.WaitTillTargetsReached(); def __del__(self): self.Release() def exit_handler(): tester.Release() servoManager.Release() servos.Release() if __name__ == \"__main__\":", "= None; _released = False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 =", "exit_handler(): tester.Release() servoManager.Release() servos.Release() if __name__ == \"__main__\": atexit.register(exit_handler) ended = False; servos", "MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print (str(id) + \":\" +str(value))", "def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False): time.sleep(0.1); def SetHand(self, opened, left): if (left==True):", "left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand,", "shall be # included in all copies or substantial portions of the Software.", "isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);", "person obtaining # a copy of this software and associated documentation files (the", "True): id = id + 10; value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id)", "= os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT", "tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True);", "(MIT) # # Copyright (c) 2018 <NAME> | <EMAIL> # # Permission is", "for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for", "ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__", "range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture, left): for p in range(0,len(gesture)): id =", "arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352);", "self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11,", "<NAME> | <EMAIL> # # Permission is hereby granted, free of charge, to", "CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH", "# Licensed under MIT License (MIT) # # Copyright (c) 2018 <NAME> |", "#tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2,", "\\ __ \\_ __ \\ _ \\_ ___/ __/ # _ _, _//", "plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left=", "def __del__(self): self.Release() def exit_handler(): tester.Release() servoManager.Release() servos.Release() if __name__ == \"__main__\": atexit.register(exit_handler)", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING", "is furnished to do so, subject to # the following conditions: # #", "/_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ # # Project website: http://roobert.springwald.de # # ######## #", "SmartServoManager import atexit clear = lambda: os.system('cls' if os.name=='nt' else 'clear') class Arms():", "the following conditions: # # The above copyright notice and this permission notice", "left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2,", "#tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached();", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE.", "= -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value);", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM,", "_ _, _// /_/ / /_/ / /_/ / __/ / / /_", "servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True);", "________ ______ _____ # ___ __ \\______________ /_______________ /_ # __ /_/ /", "LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start();", "left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "self._servoManager.ReadServo(id); #print (str(id) + \":\" +str(value)) value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10)", "def MirrorRightArmToLeftEnd(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture, left): for", "tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start() #while(True): #plus =", "#print (str(id) + \":\" +str(value)) value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10,", "for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached();", "USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import division import", "tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False);", "rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen =", "notice shall be # included in all copies or substantial portions of the", "(\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "# print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i", "#tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging,", "[[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager,", "self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start()", "True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached();", "# /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ # # Project website: http://roobert.springwald.de # # ########", "permission notice shall be # included in all copies or substantial portions of", "and this permission notice shall be # included in all copies or substantial", "copy of this software and associated documentation files (the # \"Software\"), to deal", "tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i in", "\"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import * from array import array from SharedInts import SharedInts", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS", "= SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues,", "self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached();", "\"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import * from array import array from", "from array import array from SharedInts import SharedInts from SharedFloats import SharedFloats from", "centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);", "value = self._servoManager.ReadServo(id); #print (str(id) + \":\" +str(value)) value = -(value - self._servoManager.GetCenteredValue(id))", "= rightHandOpen self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True,", "in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print (str(id) + \":\" +str(value)) value = -(value", "left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1,", "hereby granted, free of charge, to any person obtaining # a copy of", "self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16,", "tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging,", "else: self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False): time.sleep(0.1);", "rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen = rightHandOpen", "centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13,", "/_/ / /_/ / /_/ / __/ / / /_ # /_/ |_|", "the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell", "OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR", "#tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached();", "else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released ==", "[[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager =", "# persons to whom the Software is furnished to do so, subject to", "#print (\"right:\" + str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False): time.sleep(0.1); def SetHand(self,", "from SmartServoManager import SmartServoManager import atexit clear = lambda: os.system('cls' if os.name=='nt' else", "tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start() #while(True): #plus = - plus ##tester._servoManager.MoveServo(1,400+plus)", "= [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580,", "granted, free of charge, to any person obtaining # a copy of this", "######## # # Licensed under MIT License (MIT) # # Copyright (c) 2018", "== False): time.sleep(0.1); def SetHand(self, opened, left): if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else:", "(left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def", "centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11,", "+ self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def", "SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "left=True); #self.WaitTillTargetsReached(); def DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608);", "time.sleep(0.1); def SetHand(self, opened, left): if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else:", "tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True);", "#tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left=", "# Copyright (c) 2018 <NAME> | <EMAIL> # # Permission is hereby granted,", "+ 10; value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" +", "# # Project website: http://roobert.springwald.de # # ######## # # Arms # #", "servoManager.Release() servos.Release() if __name__ == \"__main__\": atexit.register(exit_handler) ended = False; servos = LX16AServos()", "left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached();", "#tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1, left= True);", "# ######## # # Licensed under MIT License (MIT) # # Copyright (c)", "(opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if", "self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release() def", "= leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging,", "'clear') class Arms(): _servoManager = None; _released = False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "#time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached();", "_servoManager = None; _released = False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1", "from MultiProcessing import * from array import array from SharedInts import SharedInts from", "obtaining # a copy of this software and associated documentation files (the #", "to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of", "= 100 #servoManager.Start() #while(True): #plus = - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached ==", "atexit.register(exit_handler) ended = False; servos = LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester", "masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left", "= Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): # print()", "-(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value); #print", "#tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True);", "smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose =", "/ /_ # /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ # # Project website: http://roobert.springwald.de #", "self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id,", "associated documentation files (the # \"Software\"), to deal in the Software without restriction,", "time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False);", "in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]: value =", "def Release(self): if (self._released == False): self._released = True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True);", "LX16AServos from SmartServoManager import SmartServoManager import atexit clear = lambda: os.system('cls' if os.name=='nt'", "my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing", "# Arms # # ######## # # Licensed under MIT License (MIT) #", "rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self):", "#print (\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id)) def WaitTillTargetsReached(self): while", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE", "self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for id in", "tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached();", "Arms(): _servoManager = None; _released = False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]]", "reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7,", "id = id + 10; value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value);", "\\_ __ \\ _ \\_ ___/ __/ # _ _, _// /_/ /", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import * from array import array from SharedInts", "leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False);", "_ \\_ ___/ __/ # _ _, _// /_/ / /_/ / /_/", "\\_ ___/ __/ # _ _, _// /_/ / /_/ / /_/ /", "range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id in range(1,8):", "http://roobert.springwald.de # # ######## # # Arms # # ######## # # Licensed", "[[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]]", "self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release() def exit_handler(): tester.Release()", "# ________ ______ _____ # ___ __ \\______________ /_______________ /_ # __ /_/", "right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1,", "left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1, left= True); #tester.WaitTillTargetsReached();", "Software, and to permit # persons to whom the Software is furnished to", "+ str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached ==", "\\ _ \\_ ___/ __/ # _ _, _// /_/ / /_/ /", "= lambda: os.system('cls' if os.name=='nt' else 'clear') class Arms(): _servoManager = None; _released", "under MIT License (MIT) # # Copyright (c) 2018 <NAME> | <EMAIL> #", "[[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]]", "if os.name=='nt' else 'clear') class Arms(): _servoManager = None; _released = False; _armHanging", "range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for id in range(11,18):", "if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose);", "#tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left=", "= [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen", "def MirrorRightArmToLeftStart(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id", "#tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1) ## print(\"sleep\")", "# # Arms # # ######## # # Licensed under MIT License (MIT)", "sell copies of the Software, and to permit # persons to whom the", "THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from", "left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1,", "self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False);", "_// /_/ / /_/ / /_/ / __/ / / /_ # /_/", "# Project website: http://roobert.springwald.de # # ######## # # Arms # # ########", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #", "= False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 =", "including # without limitation the rights to use, copy, modify, merge, publish, #", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS", "# _ _, _// /_/ / /_/ / /_/ / __/ / /", "True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True);", "left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._ghettoFist2, left= True);", "/ __/ / / /_ # /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ # # Project", "False): time.sleep(0.1); def SetHand(self, opened, left): if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose)", "for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]:", "- plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False,", "sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import * from array import", "TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE", "array from SharedInts import SharedInts from SharedFloats import SharedFloats from LX16AServos import LX16AServos", "self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id,", "id in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print (str(id) + \":\" +str(value)) value =", "#while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1);", "+ \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import * from array import array from SharedInts import", "SmartServoManager import SmartServoManager import atexit clear = lambda: os.system('cls' if os.name=='nt' else 'clear')", "value = gesture[p][1] if (left == True): id = id + 10; value", "tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): #", "def MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print (str(id) + \":\"", "(left == True): id = id + 10; value = -(value - self._servoManager.GetCenteredValue(id-10))", "__future__ import division import time, sys, os my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path", "OTHER # DEALINGS IN THE SOFTWARE. from __future__ import division import time, sys,", "and associated documentation files (the # \"Software\"), to deal in the Software without", "2018 <NAME> | <EMAIL> # # Permission is hereby granted, free of charge,", "my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import * from", "= smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose =", "IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i in", "import atexit clear = lambda: os.system('cls' if os.name=='nt' else 'clear') class Arms(): _servoManager", "if (self._released == False): self._released = True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False);", "+ self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id))", "left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True):", "self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues()", "so, subject to # the following conditions: # # The above copyright notice", "range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id);", "__name__ == \"__main__\": atexit.register(exit_handler) ended = False; servos = LX16AServos() servoManager = SmartServoManager(lX16AServos=servos,", "(opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released == False): self._released = True;", "#self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2,", "##while(True): ## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True);", "Licensed under MIT License (MIT) # # Copyright (c) 2018 <NAME> | <EMAIL>", "self._released = True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def", "persons to whom the Software is furnished to do so, subject to #", "do so, subject to # the following conditions: # # The above copyright", "in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True);", "masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self):", "######## # # Arms # # ######## # # Licensed under MIT License", "#time.sleep(1); ##while(True): ## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide,", "#self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1,", "while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def", "#_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager", "Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): # print() while(True):", "= self._servoManager.ReadServo(id); #print (str(id) + \":\" +str(value)) value = -(value - self._servoManager.GetCenteredValue(id)) +", "leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen", "use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the", "the Software without restriction, including # without limitation the rights to use, copy,", "[[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540,", "self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for id in [1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print", "tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for", "tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached();", "PrintLeftArmValues(self): for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self):", "+ \":\" +str(value)) value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def", "range(0,len(gesture)): id = gesture[p][0] value = gesture[p][1] if (left == True): id =", "p in range(0,len(gesture)): id = gesture[p][0] value = gesture[p][1] if (left == True):", "def PrintLeftArmValues(self): for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "Copyright (c) 2018 <NAME> | <EMAIL> # # Permission is hereby granted, free", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "files (the # \"Software\"), to deal in the Software without restriction, including #", "OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import division import time,", "\\____/\\____//_.___/\\___//_/ \\__/ # # Project website: http://roobert.springwald.de # # ######## # # Arms", "division import time, sys, os my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\")", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "time, sys, os my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path +", "# DEALINGS IN THE SOFTWARE. from __future__ import division import time, sys, os", "tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start() #while(True): #plus = - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while", "__ \\ _ \\_ ___/ __/ # _ _, _// /_/ / /_/", "self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released", "- second version of home robot project # ________ ______ _____ # ___", "Release(self): if (self._released == False): self._released = True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True,", "charge, to any person obtaining # a copy of this software and associated", "self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self):", "#tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1,", "to deal in the Software without restriction, including # without limitation the rights", "False): self._released = True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached();", "SharedInts from SharedFloats import SharedFloats from LX16AServos import LX16AServos from SmartServoManager import SmartServoManager", "self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start()", "100 #servoManager.Start() #while(True): #plus = - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False):", "to permit # persons to whom the Software is furnished to do so,", "#tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._ghettoFist2,", "def SetHand(self, opened, left): if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if", "== False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left=", "__/ # _ _, _// /_/ / /_/ / /_/ / __/ /", "# # ######## # # Licensed under MIT License (MIT) # # Copyright", "EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "IN THE SOFTWARE. from __future__ import division import time, sys, os my_file =", "centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for id in range(1,8):", "#!/usr/bin/env python # Roobert V2 - second version of home robot project #", "left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self): # right arm", "id = gesture[p][0] value = gesture[p][1] if (left == True): id = id", "gesture[p][0] value = gesture[p][1] if (left == True): id = id + 10;", "value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id)); else:", "SetHand(self, opened, left): if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True):", "= -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for id in", "self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released == False):", "___ __ \\______________ /_______________ /_ # __ /_/ / __ \\ __ \\_", "class Arms(): _servoManager = None; _released = False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand =", "documentation files (the # \"Software\"), to deal in the Software without restriction, including", "and to permit # persons to whom the Software is furnished to do", "V2 - second version of home robot project # ________ ______ _____ #", "os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\") from MultiProcessing import *", "masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486);", "centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477);", "= True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self):", "_, _// /_/ / /_/ / /_/ / __/ / / /_ #", "str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False):", "MIT License (MIT) # # Copyright (c) 2018 <NAME> | <EMAIL> # #", "robot project # ________ ______ _____ # ___ __ \\______________ /_______________ /_ #", "self.Release() def exit_handler(): tester.Release() servoManager.Release() servos.Release() if __name__ == \"__main__\": atexit.register(exit_handler) ended =", "left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1,", "# # Copyright (c) 2018 <NAME> | <EMAIL> # # Permission is hereby", "(\"right:\" + str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False): time.sleep(0.1); def SetHand(self, opened,", "_wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def", "centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); # left arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12,", "else 'clear') class Arms(): _servoManager = None; _released = False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]]", "+str(value)) value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for", "tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2,", "distribute, sublicense, and/or sell copies of the Software, and to permit # persons", "str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False): time.sleep(0.1); def SetHand(self, opened, left): if", "ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "/_/ / __ \\ __ \\_ __ \\ _ \\_ ___/ __/ #", "website: http://roobert.springwald.de # # ######## # # Arms # # ######## # #", "#while(True): #plus = - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False,", "home robot project # ________ ______ _____ # ___ __ \\______________ /_______________ /_", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT", "(tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True,", "of charge, to any person obtaining # a copy of this software and", "value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for id", "notice and this permission notice shall be # included in all copies or", "left=True); #tester.WaitTillTargetsReached(); #while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False);", "= False; servos = LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester = Arms(servoManager)", "# # The above copyright notice and this permission notice shall be #", "None; _released = False; _armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]]", "merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to", "sys, os my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/hardware\")", "\"__main__\": atexit.register(exit_handler) ended = False; servos = LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1)", "# distribute, sublicense, and/or sell copies of the Software, and to permit #", "Software is furnished to do so, subject to # the following conditions: #", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release() def exit_handler(): tester.Release() servoManager.Release() servos.Release() if __name__", "the Software is furnished to do so, subject to # the following conditions:", "DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4,", "the Software, and to permit # persons to whom the Software is furnished", "False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False);", "self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start()", "# Permission is hereby granted, free of charge, to any person obtaining #", "_lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues", "time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); #self._servoManager.Start() def MirrorRightArmToLeftUpdate(self): for", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "(str(id) + \":\" +str(value)) value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value);", "__del__(self): self.Release() def exit_handler(): tester.Release() servoManager.Release() servos.Release() if __name__ == \"__main__\": atexit.register(exit_handler) ended", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF", "# included in all copies or substantial portions of the Software. # #", "if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen) else: self._servoManager.MoveServo(18,self._leftHandClose) else: if (opened==True): self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self):", "import array from SharedInts import SharedInts from SharedFloats import SharedFloats from LX16AServos import", "#plus = - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False, left=", "copyright notice and this permission notice shall be # included in all copies", "a copy of this software and associated documentation files (the # \"Software\"), to", "print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1,", "while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True):", "## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached(); ##tester.SetArm(gesture=Arms._lookHand, left=False); ##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide,", "id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture, left): for p in range(0,len(gesture)):", "self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8,", "left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release() def exit_handler(): tester.Release() servoManager.Release() servos.Release() if __name__ ==", "self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture, left): for p in range(0,len(gesture)): id = gesture[p][0]", "# # Licensed under MIT License (MIT) # # Copyright (c) 2018 <NAME>", "#while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "self._leftHandClose = leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True,", "#tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._ghettoFist2, left=", "def DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685);", "copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software,", "_armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide", "import SharedFloats from LX16AServos import LX16AServos from SmartServoManager import SmartServoManager import atexit clear", "id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id", "+ str(id)) def WaitTillTargetsReached(self): while (self._servoManager.allTargetsReached == False): time.sleep(0.1); def SetHand(self, opened, left):", "tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True);", "#tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1);", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN", "if (left == True): id = id + 10; value = -(value -", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT", "# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released == False): self._released = True; self.SetArm(gesture=Arms._armHanging, left=False);", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR", "# The above copyright notice and this permission notice shall be # included", "centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for id", "OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import", "SharedFloats from LX16AServos import LX16AServos from SmartServoManager import SmartServoManager import atexit clear =", "included in all copies or substantial portions of the Software. # # THE", "left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release() def exit_handler():", "python # Roobert V2 - second version of home robot project # ________", "/ /_/ / __/ / / /_ # /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ #", "False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True); #tester.SetHand(opened=False, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True);", "left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2,", "##tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.SetArm(gesture=Arms._strechSide, left=False); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached();", "#time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1) ##", "WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED", "= leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False);", "version of home robot project # ________ ______ _____ # ___ __ \\______________", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS", "isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False) time.sleep(0.1) def MirrorRightArmToLeftStart(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);", "self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500); #", "License (MIT) # # Copyright (c) 2018 <NAME> | <EMAIL> # # Permission", "be # included in all copies or substantial portions of the Software. #", "WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "to do so, subject to # the following conditions: # # The above", "__ \\_ __ \\ _ \\_ ___/ __/ # _ _, _// /_/", "i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand,", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen = leftHandOpen", "# __ /_/ / __ \\ __ \\_ __ \\ _ \\_ ___/", "SOFTWARE. from __future__ import division import time, sys, os my_file = os.path.abspath(__file__) my_path", "restriction, including # without limitation the rights to use, copy, modify, merge, publish,", "#self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self): # right", "______ _____ # ___ __ \\______________ /_______________ /_ # __ /_/ / __", "OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE", "arm self._servoManager.AddMasterServo(servoId=11, centeredValue=545); self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459); self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700);", "without restriction, including # without limitation the rights to use, copy, modify, merge,", "True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1,", "(self._released == False): self._released = True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True,", "# # ######## # # Arms # # ######## # # Licensed under", "tester.SetArm(gesture=Arms._wink1, left=False); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i in range(1,4):", "# # Permission is hereby granted, free of charge, to any person obtaining", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED,", "deal in the Software without restriction, including # without limitation the rights to", "left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370);", "while(True): tester.SetArm(gesture=Arms._armHanging, left=False); tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=False); tester.WaitTillTargetsReached(); for i in range(1,4):", "__/ / / /_ # /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ # # Project website:", "#plus = 100 #servoManager.Start() #while(True): #plus = - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS #", "import time, sys, os my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + \"/../DanielsRasPiPythonLibs/multitasking\") sys.path.insert(0,my_path", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "# Roobert V2 - second version of home robot project # ________ ______", "= [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]] #_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self,", "#tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True);", "servos.Release() if __name__ == \"__main__\": atexit.register(exit_handler) ended = False; servos = LX16AServos() servoManager", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "to any person obtaining # a copy of this software and associated documentation", "is hereby granted, free of charge, to any person obtaining # a copy", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "os.name=='nt' else 'clear') class Arms(): _servoManager = None; _released = False; _armHanging =", "self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released == False): self._released = True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging,", "- self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\"", "True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._ghettoFist2, left= True); #tester.WaitTillTargetsReached();", "left= True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide,", "Roobert V2 - second version of home robot project # ________ ______ _____", "Software without restriction, including # without limitation the rights to use, copy, modify,", "above copyright notice and this permission notice shall be # included in all", "/_ # __ /_/ / __ \\ __ \\_ __ \\ _ \\_", "following conditions: # # The above copyright notice and this permission notice shall", "isReadOnly=False); def SetArm(self, gesture, left): for p in range(0,len(gesture)): id = gesture[p][0] value", "#tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1) ## print(\"sleep\") #tester.SetArm(gesture=Arms._strechSide, left=True); #tester.WaitTillTargetsReached();", "#tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True); #tester.SetArm(gesture=Arms._ghettoFist1, left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._ghettoFist2, left= True); #tester.WaitTillTargetsReached(); print(\"done\");", "self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id)); else: self._servoManager.MoveServo(id,value); #print (\"right:\" + str(id)) def WaitTillTargetsReached(self):", "MirrorRightArmToLeftEnd(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture, left): for p", "free of charge, to any person obtaining # a copy of this software", "The above copyright notice and this permission notice shall be # included in", "subject to # the following conditions: # # The above copyright notice and", "self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def", "#tester.WaitTillTargetsReached(); #time.sleep(1); #tester.SetHand(opened=True, left= True); #tester.SetHand(opened=True, left= False); #tester.WaitTillTargetsReached(); #time.sleep(1); ##while(True): ## time.sleep(1)", "# the following conditions: # # The above copyright notice and this permission", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def DefineArms(self): #", "id + 10; value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\"", "Project website: http://roobert.springwald.de # # ######## # # Arms # # ######## #", "THE SOFTWARE. from __future__ import division import time, sys, os my_file = os.path.abspath(__file__)", "== \"__main__\": atexit.register(exit_handler) ended = False; servos = LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0,", "centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503);", "THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import division", "_____ # ___ __ \\______________ /_______________ /_ # __ /_/ / __ \\", "and/or sell copies of the Software, and to permit # persons to whom", "-(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10) self._servoManager.MoveServo(id+10, pos=value); def MirrorRightArmToLeftEnd(self): for id in range(1,8):", "#tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True): # print() while(True): tester.SetArm(gesture=Arms._armHanging, left=False);", "= [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]] _lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]] _wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]] _wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]] _stretchSide =", "centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3, centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510);", "__ \\ __ \\_ __ \\ _ \\_ ___/ __/ # _ _,", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "[1,3,5,6,7,8]: value = self._servoManager.ReadServo(id); #print (str(id) + \":\" +str(value)) value = -(value -", "import LX16AServos from SmartServoManager import SmartServoManager import atexit clear = lambda: os.system('cls' if", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release() def exit_handler(): tester.Release() servoManager.Release() servos.Release() if", "centeredValue=685); self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352); self._servoManager.AddMasterServo(servoId=5, centeredValue=510); self._servoManager.AddMasterServo(servoId=6, centeredValue=460); self._servoManager.AddMasterServo(servoId=7, centeredValue=495); self._servoManager.AddMasterServo(servoId=8, centeredValue=500);", "self._leftHandOpen = leftHandOpen self._leftHandClose = leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose = rightHandClose self.DefineArms()", "def exit_handler(): tester.Release() servoManager.Release() servos.Release() if __name__ == \"__main__\": atexit.register(exit_handler) ended = False;", "DEALINGS IN THE SOFTWARE. from __future__ import division import time, sys, os my_file", "= - plus ##tester._servoManager.MoveServo(1,400+plus) #tester._servoManager.MoveServo(3,600+plus) #while (tester._servoManager.allTargetsReached == False): #time.sleep(0.1) #tester.SetHand(opened=False, left= True);", "/_/ / __/ / / /_ # /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ # #", "software and associated documentation files (the # \"Software\"), to deal in the Software", "= gesture[p][1] if (left == True): id = id + 10; value =", "tester.SetArm(gesture=Arms._armHanging, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._lookAtHand, left=True); tester.WaitTillTargetsReached(); for i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached();", "\\______________ /_______________ /_ # __ /_/ / __ \\ __ \\_ __ \\", "= rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging, left=True); #self.SetHand(opened=True, left=True); #self.WaitTillTargetsReached(); def", "self._servoManager.AddMasterServo(servoId=13, centeredValue=329); self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700); self._servoManager.AddMasterServo(servoId=15, centeredValue=477); self._servoManager.AddMasterServo(servoId=16, centeredValue=486); self._servoManager.AddMasterServo(servoId=17, centeredValue=501); self._servoManager.AddMasterServo(servoId=18,", "publish, # distribute, sublicense, and/or sell copies of the Software, and to permit", "10; value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print (\"left:\" + str(id));", "[[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]] def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen =", "to whom the Software is furnished to do so, subject to # the", "#self.WaitTillTargetsReached(); def DefineArms(self): # right arm self._servoManager.AddMasterServo(servoId=1, centeredValue=370); self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608); self._servoManager.AddMasterServo(servoId=3,", "True; self.SetArm(gesture=Arms._armHanging, left=False); self.SetArm(gesture=Arms._armHanging, left=True); self.SetHand(opened=True, left=False); self.SetHand(opened=True, left=True); self.WaitTillTargetsReached(); def __del__(self): self.Release()", "leftHandClose self._rightHandOpen = rightHandOpen self._rightHandClose = rightHandClose self.DefineArms() #self.SetArm(gesture=Arms._armHanging, left=False); #self.SetHand(opened=True, left=False); #self.SetArm(gesture=Arms._armHanging,", "self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1) def PrintLeftArmValues(self): for id in range(11,18): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False)", "centeredValue=503); def PrintRightArmValues(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True): self._servoManager.PrintReadOnlyServoValues() time.sleep(0.1)", "of home robot project # ________ ______ _____ # ___ __ \\______________ /_______________", "AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE", "__ /_/ / __ \\ __ \\_ __ \\ _ \\_ ___/ __/", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "for p in range(0,len(gesture)): id = gesture[p][0] value = gesture[p][1] if (left ==", "/_______________ /_ # __ /_/ / __ \\ __ \\_ __ \\ _", "pos=value); def MirrorRightArmToLeftEnd(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False); def SetArm(self, gesture, left):", "left= True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink1, left=True); #tester.WaitTillTargetsReached(); #tester.SetArm(gesture=Arms._wink2, left= True); #tester.WaitTillTargetsReached(); #tester.SetHand(opened=False, left= True);", "__init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430): self._servoManager = smartServoManager self._leftHandOpen = leftHandOpen self._leftHandClose", "(self._servoManager.allTargetsReached == False): time.sleep(0.1); def SetHand(self, opened, left): if (left==True): if (opened==True): self._servoManager.MoveServo(18,self._leftHandOpen)", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #", "= gesture[p][0] value = gesture[p][1] if (left == True): id = id +", "in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start() #while(True):", "servos = LX16AServos() servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues()", "self._servoManager.MoveServo(8,self._rightHandOpen); else: self._servoManager.MoveServo(8,self._rightHandClose); def Release(self): if (self._released == False): self._released = True; self.SetArm(gesture=Arms._armHanging,", "i in range(1,4): tester.SetArm(gesture=Arms._wink2, left=True); tester.WaitTillTargetsReached(); tester.SetArm(gesture=Arms._wink1, left=True); tester.WaitTillTargetsReached(); #plus = 100 #servoManager.Start()", "/ /_/ / /_/ / __/ / / /_ # /_/ |_| \\____/\\____//_.___/\\___//_/", "from LX16AServos import LX16AServos from SmartServoManager import SmartServoManager import atexit clear = lambda:", "maxSpeed=1) tester = Arms(servoManager) #tester.MirrorRightArmToLeft(); #tester.PrintRightArmValues() tester.PrintLeftArmValues(); servoManager.Start(); #time.sleep(1); #tester.SetArm(gesture=Arms._rightCenteredValues, left=True); #tester.WaitTillTargetsReached(); #while(True):", "NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "centeredValue=501); self._servoManager.AddMasterServo(servoId=18, centeredValue=503); def PrintRightArmValues(self): for id in range(1,8): self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True); self._servoManager.Start() while(True):", "= id + 10; value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id) self._servoManager.MoveServo(id,value); #print", "/_ # /_/ |_| \\____/\\____//_.___/\\___//_/ \\__/ # # Project website: http://roobert.springwald.de # #" ]
[ "output_path = os.path.join(\"Resources\", \"Election Analysis\") # Open and read csv with open(poll_path, newline=\"\")", "votes each candidate won for x in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100, 4)", "candidate_votes = [] # Winning Candidate and Winning Count Tracker percent = []", "csv with open(poll_path, newline=\"\") as csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\") # Read the", "= total_votes + 1 #read in the candidate name from column 3 row", "max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print results to terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\")", "file using \"write\" mode. Specify the variable to hold the contents with open(output_path,", "of votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}%", "the candidate name from column 3 row 2 of csv candidate_in = (row[2])", "1 to vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index:", "of csv candidate_in = (row[2]) if candidate_in in candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index]", "in poll_reader: total_votes = total_votes + 1 #read in the candidate name from", "+ 1 else: #if candidate was not found in candidates_unique list then append", "export a text file with the results # Open the file using \"write\"", "to Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of votes cast :", "if candidate_in in candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1 else:", "Read the header row first (skip this part if there is no header)", "{election_winner}\") print(\"--------------------------------------------------------------\") #To export a text file with the results # Open the", "#print results to Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of votes", "2 of csv candidate_in = (row[2]) if candidate_in in candidates_names: candidate_index = candidates_names.index(candidate_in)", "results to terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total number of votes cast", "for file poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election Analysis\") # Open", "{total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The percentage of votes", "csvwriter = csv.writer(csvfile, delimiter=',') #print results to Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The", "in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\\n\") textfile.write(\"------------------------------------------------------------------\\n\") textfile.write(f\"The Winner is : {election_winner}\\n\") textfile.write(\"--------------------------------------------------------------\\n\")", "import csv #Set the variables total_votes = 0 total_candidates = 0 candidates_names =", "[] # Winning Candidate and Winning Count Tracker percent = [] # Set", "to list and add 1 to vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}')", "number of votes cast : {total_votes}\") print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)): print(f\"{candidates_names[x]} :", "header) poll_header = next(csvfile) #To loop through the data to collect the answers", "0 total_candidates = 0 candidates_names = [] candidate_votes = [] # Winning Candidate", "= round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index]", "Open the file using \"write\" mode. Specify the variable to hold the contents", "answers for row in poll_reader: total_votes = total_votes + 1 #read in the", "total_votes = total_votes + 1 #read in the candidate name from column 3", "and Winning Count Tracker percent = [] # Set path for file poll_path", "list then append to list and add 1 to vote count candidates_names.append(candidate_in) candidate_votes.append(1)", "Analysis\") # Open and read csv with open(poll_path, newline=\"\") as csvfile: poll_reader =", "csv.writer(csvfile, delimiter=',') #print results to Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number", "candidates_unique list then append to list and add 1 to vote count candidates_names.append(candidate_in)", "delimiter=',') #print results to Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of", "print(f\"The Winner is : {election_winner}\") print(\"--------------------------------------------------------------\") #To export a text file with the", "is no header) poll_header = next(csvfile) #To loop through the data to collect", "for x in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes = max(candidate_votes)", "the variable to hold the contents with open(output_path, 'w', newline='') as textfile: #", "print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total number of votes cast : {total_votes}\") print(\"--------------------------------------------------------------\")", "x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\\n\") textfile.write(\"------------------------------------------------------------------\\n\") textfile.write(f\"The Winner is : {election_winner}\\n\")", "append to list and add 1 to vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes", "through the data to collect the answers for row in poll_reader: total_votes =", "{total_votes}\") print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner", "of votes each candidate won for x in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100,", "a text file with the results # Open the file using \"write\" mode.", "candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1 else: #if candidate was not", "was not found in candidates_unique list then append to list and add 1", "1 #read in the candidate name from column 3 row 2 of csv", "Winner is : {election_winner}\") print(\"--------------------------------------------------------------\") #To export a text file with the results", "newline=\"\") as csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\") # Read the header row first", "data to collect the answers for row in poll_reader: total_votes = total_votes +", "percent.append(vote_percent) max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print results to terminal", "variable to hold the contents with open(output_path, 'w', newline='') as textfile: # Initialize", "csv.writer csvwriter = csv.writer(csvfile, delimiter=',') #print results to Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\")", "= next(csvfile) #To loop through the data to collect the answers for row", "# Winning Candidate and Winning Count Tracker percent = [] # Set path", "#The percentage of votes each candidate won for x in range(len(candidates_names)): vote_percent =", "for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\\n\") textfile.write(\"------------------------------------------------------------------\\n\") textfile.write(f\"The Winner is :", "to collect the answers for row in poll_reader: total_votes = total_votes + 1", "candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print results to terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The", "number of votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} :", "[] # Set path for file poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\",", "first (skip this part if there is no header) poll_header = next(csvfile) #To", "election_winner = candidates_names[max_index] #print results to terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total", "mode. Specify the variable to hold the contents with open(output_path, 'w', newline='') as", "terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total number of votes cast : {total_votes}\")", "csv.reader(csvfile, delimiter=\",\") # Read the header row first (skip this part if there", "next(csvfile) #To loop through the data to collect the answers for row in", "#if candidate was not found in candidates_unique list then append to list and", "candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The percentage of votes each candidate", "candidate won for x in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes", "candidates_names[max_index] #print results to terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total number of", "no header) poll_header = next(csvfile) #To loop through the data to collect the", ": {total_votes}\") print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The", "total_votes + 1 #read in the candidate name from column 3 row 2", "poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election Analysis\") # Open and read", "total_votes = 0 total_candidates = 0 candidates_names = [] candidate_votes = [] #", "4) percent.append(vote_percent) max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print results to", "print(\"-------------------------------------------------------------\") print(f\"The total number of votes cast : {total_votes}\") print(\"--------------------------------------------------------------\") for x in", "is : {election_winner}\") print(\"--------------------------------------------------------------\") #To export a text file with the results #", "to hold the contents with open(output_path, 'w', newline='') as textfile: # Initialize csv.writer", "print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total number of votes cast : {total_votes}\") print(\"--------------------------------------------------------------\") for", "hold the contents with open(output_path, 'w', newline='') as textfile: # Initialize csv.writer csvwriter", "in the candidate name from column 3 row 2 of csv candidate_in =", "print(\"------------------------------------------------------------------\") print(f\"The Winner is : {election_winner}\") print(\"--------------------------------------------------------------\") #To export a text file with", "= os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election Analysis\") # Open and read csv", "# Read the header row first (skip this part if there is no", "row first (skip this part if there is no header) poll_header = next(csvfile)", "(skip this part if there is no header) poll_header = next(csvfile) #To loop", "#read in the candidate name from column 3 row 2 of csv candidate_in", "vote_percent = round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner =", "= candidate_votes[candidate_index] + 1 else: #if candidate was not found in candidates_unique list", "[] candidate_votes = [] # Winning Candidate and Winning Count Tracker percent =", "round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print", "as csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\") # Read the header row first (skip", "os.path.join(\"Resources\", \"Election Analysis\") # Open and read csv with open(poll_path, newline=\"\") as csvfile:", "and read csv with open(poll_path, newline=\"\") as csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\") #", "= 0 total_candidates = 0 candidates_names = [] candidate_votes = [] # Winning", "#print results to terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total number of votes", "# Set path for file poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election", "= (row[2]) if candidate_in in candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] +", "1 else: #if candidate was not found in candidates_unique list then append to", "textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for", "+ 1 #read in the candidate name from column 3 row 2 of", "name from column 3 row 2 of csv candidate_in = (row[2]) if candidate_in", "= candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1 else: #if candidate was not found", "this part if there is no header) poll_header = next(csvfile) #To loop through", "part if there is no header) poll_header = next(csvfile) #To loop through the", "text file with the results # Open the file using \"write\" mode. Specify", "= os.path.join(\"Resources\", \"Election Analysis\") # Open and read csv with open(poll_path, newline=\"\") as", "Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x", "max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print results to terminal print(\"------------------------------------------------------------\") print(\"Election Results\")", "votes cast : {total_votes}\") print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\")", "the variables total_votes = 0 total_candidates = 0 candidates_names = [] candidate_votes =", "Tracker percent = [] # Set path for file poll_path = os.path.join(\"Resources\", \"election_data.csv\")", "the answers for row in poll_reader: total_votes = total_votes + 1 #read in", "textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in", "(row[2]) if candidate_in in candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1", "= max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print results to terminal print(\"------------------------------------------------------------\") print(\"Election", "#print(f\"candidates votes: {candidate_votes}\") #The percentage of votes each candidate won for x in", "{total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\\n\") textfile.write(\"------------------------------------------------------------------\\n\") textfile.write(f\"The Winner", "if there is no header) poll_header = next(csvfile) #To loop through the data", "= [] candidate_votes = [] # Winning Candidate and Winning Count Tracker percent", "textfile: # Initialize csv.writer csvwriter = csv.writer(csvfile, delimiter=',') #print results to Text file", "in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is : {election_winner}\") print(\"--------------------------------------------------------------\")", "os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election Analysis\") # Open and read csv with", "Count Tracker percent = [] # Set path for file poll_path = os.path.join(\"Resources\",", "from column 3 row 2 of csv candidate_in = (row[2]) if candidate_in in", "collect the answers for row in poll_reader: total_votes = total_votes + 1 #read", "for row in poll_reader: total_votes = total_votes + 1 #read in the candidate", "of votes cast : {total_votes}\") print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}%", "= 0 candidates_names = [] candidate_votes = [] # Winning Candidate and Winning", "candidate name from column 3 row 2 of csv candidate_in = (row[2]) if", "print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is", "total number of votes cast : {total_votes}\") print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)): print(f\"{candidates_names[x]}", "csv candidate_in = (row[2]) if candidate_in in candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] =", "0 candidates_names = [] candidate_votes = [] # Winning Candidate and Winning Count", "using \"write\" mode. Specify the variable to hold the contents with open(output_path, 'w',", "as textfile: # Initialize csv.writer csvwriter = csv.writer(csvfile, delimiter=',') #print results to Text", "row in poll_reader: total_votes = total_votes + 1 #read in the candidate name", "candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1 else: #if candidate was not found in", "candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The", "{percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is : {election_winner}\") print(\"--------------------------------------------------------------\") #To export a text", "= candidates_names[max_index] #print results to terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total number", "3 row 2 of csv candidate_in = (row[2]) if candidate_in in candidates_names: candidate_index", "#print(f'Total votes {total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The percentage", "with open(output_path, 'w', newline='') as textfile: # Initialize csv.writer csvwriter = csv.writer(csvfile, delimiter=',')", "and add 1 to vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each candidate:", "# Open and read csv with open(poll_path, newline=\"\") as csvfile: poll_reader = csv.reader(csvfile,", "= csv.writer(csvfile, delimiter=',') #print results to Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total", "#print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The percentage of votes each candidate won for", ": {election_winner}\") print(\"--------------------------------------------------------------\") #To export a text file with the results # Open", "to terminal print(\"------------------------------------------------------------\") print(\"Election Results\") print(\"-------------------------------------------------------------\") print(f\"The total number of votes cast :", "add 1 to vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each candidate: {candidates_names}')", "open(poll_path, newline=\"\") as csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\") # Read the header row", "open(output_path, 'w', newline='') as textfile: # Initialize csv.writer csvwriter = csv.writer(csvfile, delimiter=',') #print", "Set path for file poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election Analysis\")", "x in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is : {election_winner}\")", "print(f\"The total number of votes cast : {total_votes}\") print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)):", "variables total_votes = 0 total_candidates = 0 candidates_names = [] candidate_votes = []", "textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\\n\") textfile.write(\"------------------------------------------------------------------\\n\") textfile.write(f\"The Winner is", "= [] # Set path for file poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path =", "candidate_votes[candidate_index] + 1 else: #if candidate was not found in candidates_unique list then", "{candidate_votes}\") #The percentage of votes each candidate won for x in range(len(candidates_names)): vote_percent", "path for file poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election Analysis\") #", "for x in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is :", "# Open the file using \"write\" mode. Specify the variable to hold the", "file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\")", "Initialize csv.writer csvwriter = csv.writer(csvfile, delimiter=',') #print results to Text file textfile.write(\"Election Results\\n\")", "vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates", "in candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1 else: #if candidate", "there is no header) poll_header = next(csvfile) #To loop through the data to", "candidate_in = (row[2]) if candidate_in in candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index]", "list and add 1 to vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each", "file with the results # Open the file using \"write\" mode. Specify the", "Specify the variable to hold the contents with open(output_path, 'w', newline='') as textfile:", "votes: {candidate_votes}\") #The percentage of votes each candidate won for x in range(len(candidates_names)):", "({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is : {election_winner}\") print(\"--------------------------------------------------------------\") #To export a text file", "\"Election Analysis\") # Open and read csv with open(poll_path, newline=\"\") as csvfile: poll_reader", "\"write\" mode. Specify the variable to hold the contents with open(output_path, 'w', newline='')", "column 3 row 2 of csv candidate_in = (row[2]) if candidate_in in candidates_names:", "os import csv #Set the variables total_votes = 0 total_candidates = 0 candidates_names", "import os import csv #Set the variables total_votes = 0 total_candidates = 0", "candidates_names = [] candidate_votes = [] # Winning Candidate and Winning Count Tracker", "loop through the data to collect the answers for row in poll_reader: total_votes", "print(\"--------------------------------------------------------------\") #To export a text file with the results # Open the file", "with open(poll_path, newline=\"\") as csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\") # Read the header", "Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of votes cast : {total_votes}\\n\")", "total_candidates = 0 candidates_names = [] candidate_votes = [] # Winning Candidate and", "x in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes = max(candidate_votes) max_index=", "#To export a text file with the results # Open the file using", "the data to collect the answers for row in poll_reader: total_votes = total_votes", "cast : {total_votes}\") print(\"--------------------------------------------------------------\") for x in range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\")", "= [] # Winning Candidate and Winning Count Tracker percent = [] #", "not found in candidates_unique list then append to list and add 1 to", "delimiter=\",\") # Read the header row first (skip this part if there is", "Open and read csv with open(poll_path, newline=\"\") as csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\")", "# Initialize csv.writer csvwriter = csv.writer(csvfile, delimiter=',') #print results to Text file textfile.write(\"Election", "to vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}')", "range(len(candidates_names)): print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is : {election_winner}\") print(\"--------------------------------------------------------------\") #To", "{candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The percentage of votes each candidate won", "#To loop through the data to collect the answers for row in poll_reader:", "Winning Candidate and Winning Count Tracker percent = [] # Set path for", "\"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election Analysis\") # Open and read csv with open(poll_path,", "results # Open the file using \"write\" mode. Specify the variable to hold", "Candidate and Winning Count Tracker percent = [] # Set path for file", "file poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path = os.path.join(\"Resources\", \"Election Analysis\") # Open and", "candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1 else: #if candidate was not found in candidates_unique", "*100, 4) percent.append(vote_percent) max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print results", ": {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\\n\") textfile.write(\"------------------------------------------------------------------\\n\") textfile.write(f\"The", "candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1 else: #if candidate was", "Modules import os import csv #Set the variables total_votes = 0 total_candidates =", "count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes:", "candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total votes {total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\")", "contents with open(output_path, 'w', newline='') as textfile: # Initialize csv.writer csvwriter = csv.writer(csvfile,", "<gh_stars>0 # Modules import os import csv #Set the variables total_votes = 0", "csv #Set the variables total_votes = 0 total_candidates = 0 candidates_names = []", "print(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is : {election_winner}\") print(\"--------------------------------------------------------------\") #To export", "percentage of votes each candidate won for x in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes", "in candidates_unique list then append to list and add 1 to vote count", "the contents with open(output_path, 'w', newline='') as textfile: # Initialize csv.writer csvwriter =", "newline='') as textfile: # Initialize csv.writer csvwriter = csv.writer(csvfile, delimiter=',') #print results to", "the results # Open the file using \"write\" mode. Specify the variable to", "else: #if candidate was not found in candidates_unique list then append to list", "max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner = candidates_names[max_index] #print results to terminal print(\"------------------------------------------------------------\")", "#Set the variables total_votes = 0 total_candidates = 0 candidates_names = [] candidate_votes", "poll_reader: total_votes = total_votes + 1 #read in the candidate name from column", "Results\") print(\"-------------------------------------------------------------\") print(f\"The total number of votes cast : {total_votes}\") print(\"--------------------------------------------------------------\") for x", "{candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The percentage of votes each candidate won for x", "read csv with open(poll_path, newline=\"\") as csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\") # Read", "'w', newline='') as textfile: # Initialize csv.writer csvwriter = csv.writer(csvfile, delimiter=',') #print results", "csvfile: poll_reader = csv.reader(csvfile, delimiter=\",\") # Read the header row first (skip this", "votes {total_votes}') #print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The percentage of", "range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes) election_winner", "then append to list and add 1 to vote count candidates_names.append(candidate_in) candidate_votes.append(1) #print(f'Total", ": {percent[x]}% ({candidate_votes[x]})\") print(\"------------------------------------------------------------------\") print(f\"The Winner is : {election_winner}\") print(\"--------------------------------------------------------------\") #To export a", "Winning Count Tracker percent = [] # Set path for file poll_path =", "poll_header = next(csvfile) #To loop through the data to collect the answers for", "#print(f'Each candidate: {candidates_names}') #print(f'Index: {candidates_names.index(candidate_in)}') #print(f\"candidates votes: {candidate_votes}\") #The percentage of votes each", "poll_reader = csv.reader(csvfile, delimiter=\",\") # Read the header row first (skip this part", "results to Text file textfile.write(\"Election Results\\n\") textfile.write(\"-------------------------------------------------------------\\n\") textfile.write(f\"The total number of votes cast", "cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\\n\") textfile.write(\"------------------------------------------------------------------\\n\")", "header row first (skip this part if there is no header) poll_header =", "the file using \"write\" mode. Specify the variable to hold the contents with", "in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes = max(candidate_votes) max_index= candidate_votes.index(max_votes)", "total number of votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]}", "candidate was not found in candidates_unique list then append to list and add", "percent = [] # Set path for file poll_path = os.path.join(\"Resources\", \"election_data.csv\") output_path", "row 2 of csv candidate_in = (row[2]) if candidate_in in candidates_names: candidate_index =", "= csv.reader(csvfile, delimiter=\",\") # Read the header row first (skip this part if", "with the results # Open the file using \"write\" mode. Specify the variable", "won for x in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent) max_votes =", "candidate_in in candidates_names: candidate_index = candidates_names.index(candidate_in) candidate_votes[candidate_index] = candidate_votes[candidate_index] + 1 else: #if", "found in candidates_unique list then append to list and add 1 to vote", "each candidate won for x in range(len(candidates_names)): vote_percent = round(candidate_votes[x]/total_votes *100, 4) percent.append(vote_percent)", "textfile.write(f\"The total number of votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)):", "votes cast : {total_votes}\\n\") textfile.write(\"--------------------------------------------------------------\\n\") for x in range(len(candidates_names)): textfile.write(f\"{candidates_names[x]} : {percent[x]}% ({candidate_votes[x]})\\n\")", "# Modules import os import csv #Set the variables total_votes = 0 total_candidates", "the header row first (skip this part if there is no header) poll_header" ]
[ "self.keys: if not key in params: raise KeyError(key) if not isinstance(params[key], expected_type): raise", "expected_type in self.keys: if not key in params: raise KeyError(key) if not isinstance(params[key],", "not key in params: raise KeyError(key) if not isinstance(params[key], expected_type): raise TypeError('expected {}", "raise TypeError('expected {} to be {}, but got {}'.format( key, expected_type, type(params[key]))) setattr(self,", "class MessageData: keys = [('internal_date', datetime), ('subject', str), ('body', str), ('from', list), ('to',", "def __init__(self, params): for key, expected_type in self.keys: if not key in params:", "in self.keys: if not key in params: raise KeyError(key) if not isinstance(params[key], expected_type):", "= [('internal_date', datetime), ('subject', str), ('body', str), ('from', list), ('to', list)] def __init__(self,", "isinstance(params[key], expected_type): raise TypeError('expected {} to be {}, but got {}'.format( key, expected_type,", "keys = [('internal_date', datetime), ('subject', str), ('body', str), ('from', list), ('to', list)] def", "got {}'.format( key, expected_type, type(params[key]))) setattr(self, key, params[key]) def get_value(self, key): if key", "key): if key not in [x[0] for x in self.keys]: raise KeyError(key) return", "str), ('from', list), ('to', list)] def __init__(self, params): for key, expected_type in self.keys:", "from datetime import datetime class MessageData: keys = [('internal_date', datetime), ('subject', str), ('body',", "key in params: raise KeyError(key) if not isinstance(params[key], expected_type): raise TypeError('expected {} to", "datetime import datetime class MessageData: keys = [('internal_date', datetime), ('subject', str), ('body', str),", "datetime), ('subject', str), ('body', str), ('from', list), ('to', list)] def __init__(self, params): for", "datetime class MessageData: keys = [('internal_date', datetime), ('subject', str), ('body', str), ('from', list),", "('subject', str), ('body', str), ('from', list), ('to', list)] def __init__(self, params): for key,", "key, expected_type, type(params[key]))) setattr(self, key, params[key]) def get_value(self, key): if key not in", "key not in [x[0] for x in self.keys]: raise KeyError(key) return getattr(self, key)", "for key, expected_type in self.keys: if not key in params: raise KeyError(key) if", "expected_type): raise TypeError('expected {} to be {}, but got {}'.format( key, expected_type, type(params[key])))", "if not isinstance(params[key], expected_type): raise TypeError('expected {} to be {}, but got {}'.format(", "<gh_stars>0 from datetime import datetime class MessageData: keys = [('internal_date', datetime), ('subject', str),", "MessageData: keys = [('internal_date', datetime), ('subject', str), ('body', str), ('from', list), ('to', list)]", "('body', str), ('from', list), ('to', list)] def __init__(self, params): for key, expected_type in", "if key not in [x[0] for x in self.keys]: raise KeyError(key) return getattr(self,", "type(params[key]))) setattr(self, key, params[key]) def get_value(self, key): if key not in [x[0] for", "key, params[key]) def get_value(self, key): if key not in [x[0] for x in", "list)] def __init__(self, params): for key, expected_type in self.keys: if not key in", "be {}, but got {}'.format( key, expected_type, type(params[key]))) setattr(self, key, params[key]) def get_value(self,", "list), ('to', list)] def __init__(self, params): for key, expected_type in self.keys: if not", "params: raise KeyError(key) if not isinstance(params[key], expected_type): raise TypeError('expected {} to be {},", "__init__(self, params): for key, expected_type in self.keys: if not key in params: raise", "raise KeyError(key) if not isinstance(params[key], expected_type): raise TypeError('expected {} to be {}, but", "but got {}'.format( key, expected_type, type(params[key]))) setattr(self, key, params[key]) def get_value(self, key): if", "if not key in params: raise KeyError(key) if not isinstance(params[key], expected_type): raise TypeError('expected", "in params: raise KeyError(key) if not isinstance(params[key], expected_type): raise TypeError('expected {} to be", "KeyError(key) if not isinstance(params[key], expected_type): raise TypeError('expected {} to be {}, but got", "str), ('body', str), ('from', list), ('to', list)] def __init__(self, params): for key, expected_type", "import datetime class MessageData: keys = [('internal_date', datetime), ('subject', str), ('body', str), ('from',", "{}'.format( key, expected_type, type(params[key]))) setattr(self, key, params[key]) def get_value(self, key): if key not", "params): for key, expected_type in self.keys: if not key in params: raise KeyError(key)", "{} to be {}, but got {}'.format( key, expected_type, type(params[key]))) setattr(self, key, params[key])", "to be {}, but got {}'.format( key, expected_type, type(params[key]))) setattr(self, key, params[key]) def", "expected_type, type(params[key]))) setattr(self, key, params[key]) def get_value(self, key): if key not in [x[0]", "key, expected_type in self.keys: if not key in params: raise KeyError(key) if not", "get_value(self, key): if key not in [x[0] for x in self.keys]: raise KeyError(key)", "TypeError('expected {} to be {}, but got {}'.format( key, expected_type, type(params[key]))) setattr(self, key,", "params[key]) def get_value(self, key): if key not in [x[0] for x in self.keys]:", "not isinstance(params[key], expected_type): raise TypeError('expected {} to be {}, but got {}'.format( key,", "('from', list), ('to', list)] def __init__(self, params): for key, expected_type in self.keys: if", "{}, but got {}'.format( key, expected_type, type(params[key]))) setattr(self, key, params[key]) def get_value(self, key):", "setattr(self, key, params[key]) def get_value(self, key): if key not in [x[0] for x", "def get_value(self, key): if key not in [x[0] for x in self.keys]: raise", "('to', list)] def __init__(self, params): for key, expected_type in self.keys: if not key", "[('internal_date', datetime), ('subject', str), ('body', str), ('from', list), ('to', list)] def __init__(self, params):" ]
[ "1 items = [] for product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items", "seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]: grouped_products =", "_send_invoice(billing_detail, email, items): customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, )", "address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF)", "from online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices import PRODUCTS, get_product_items logger = logging.getLogger(__name__) def", "collections import defaultdict import logging from django.conf import settings from online_payments.billing.enums import Currency", "_send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]: grouped_products = defaultdict(int) for seat in", "import List from collections import defaultdict import logging from django.conf import settings from", "grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def _send_invoice(billing_detail, email, items): customer = Customer( name=billing_detail.company_name,", "from online_payments.billing.enums import Currency from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu", "return items def _send_invoice(billing_detail, email, items): customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1,", "import Currency from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu import Szamlazzhu", "payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF) logger.info(\"Sending invoice to: %s\", email) szamlazzhu.send_invoice(invoice, settings.SZAMLAZZHU_INVOICE_PREFIX)", "defaultdict(int) for seat in seats: grouped_products[seat.payment.product_type] += 1 items = [] for product_type,", "= defaultdict(int) for seat in seats: grouped_products[seat.payment.product_type] += 1 items = [] for", "invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF) logger.info(\"Sending invoice to: %s\",", "Szamlazzhu from payments.prices import PRODUCTS, get_product_items logger = logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email,", "= logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def", "items = [] for product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def", "online_payments.billing.enums import Currency from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu import", "logging from django.conf import settings from online_payments.billing.enums import Currency from online_payments.billing.models import Item,", "in seats: grouped_products[seat.payment.product_type] += 1 items = [] for product_type, quantity in grouped_products.items():", "seats: grouped_products[seat.payment.product_type] += 1 items = [] for product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type],", "Invoice, Customer from online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices import PRODUCTS, get_product_items logger =", "Item, PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices import PRODUCTS, get_product_items", "get_product_items logger = logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email,", "= [] for product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def _send_invoice(billing_detail,", "payments.prices import PRODUCTS, get_product_items logger = logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def", "send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]: grouped_products = defaultdict(int) for seat", "_get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]: grouped_products = defaultdict(int) for seat in seats: grouped_products[seat.payment.product_type]", "import Szamlazzhu from payments.prices import PRODUCTS, get_product_items logger = logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail,", "PRODUCTS, get_product_items logger = logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail,", "List from collections import defaultdict import logging from django.conf import settings from online_payments.billing.enums", "Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF) logger.info(\"Sending invoice to: %s\", email) szamlazzhu.send_invoice(invoice,", "items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def _send_invoice(billing_detail, email, items): customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code,", "= Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD,", "<gh_stars>10-100 from typing import List from collections import defaultdict import logging from django.conf", "+= 1 items = [] for product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return", "items): customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice =", "customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items,", "def _get_items_for_seats(seats) -> List[Item]: grouped_products = defaultdict(int) for seat in seats: grouped_products[seat.payment.product_type] +=", "for product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def _send_invoice(billing_detail, email, items):", "appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]: grouped_products = defaultdict(int) for seat in seats:", "django.conf import settings from online_payments.billing.enums import Currency from online_payments.billing.models import Item, PaymentMethod, Invoice,", "List[Item]: grouped_products = defaultdict(int) for seat in seats: grouped_products[seat.payment.product_type] += 1 items =", "product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def _send_invoice(billing_detail, email, items): customer", "tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF) logger.info(\"Sending invoice", "from collections import defaultdict import logging from django.conf import settings from online_payments.billing.enums import", "from payments.prices import PRODUCTS, get_product_items logger = logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat]))", "import settings from online_payments.billing.enums import Currency from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer", "import logging from django.conf import settings from online_payments.billing.enums import Currency from online_payments.billing.models import", "Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer)", "online_payments.billing.models import Item, PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices import", "email=email, tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF) logger.info(\"Sending", "= Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF) logger.info(\"Sending invoice to: %s\", email)", "quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def _send_invoice(billing_detail, email, items): customer =", "for seat in seats: grouped_products[seat.payment.product_type] += 1 items = [] for product_type, quantity", "-> List[Item]: grouped_products = defaultdict(int) for seat in seats: grouped_products[seat.payment.product_type] += 1 items", "_send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]: grouped_products", ") invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY, Currency.HUF) logger.info(\"Sending invoice to:", "grouped_products[seat.payment.product_type] += 1 items = [] for product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity))", "items def _send_invoice(billing_detail, email, items): customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email,", "from django.conf import settings from online_payments.billing.enums import Currency from online_payments.billing.models import Item, PaymentMethod,", "logger = logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all()))", "def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) ->", "in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def _send_invoice(billing_detail, email, items): customer = Customer(", "import Item, PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices import PRODUCTS,", "post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu =", "def _send_invoice(billing_detail, email, items): customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number,", "city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu = Szamlazzhu(settings.SZAMLAZZHU_AGENT_KEY,", "seat in seats: grouped_products[seat.payment.product_type] += 1 items = [] for product_type, quantity in", "_get_items_for_seats(seats) -> List[Item]: grouped_products = defaultdict(int) for seat in seats: grouped_products[seat.payment.product_type] += 1", "def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]: grouped_products = defaultdict(int) for", "Customer from online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices import PRODUCTS, get_product_items logger = logging.getLogger(__name__)", "PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices import PRODUCTS, get_product_items logger", "name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice = Invoice(items=items, payment_method=PaymentMethod.CREDIT_CARD, customer=customer) szamlazzhu", "import defaultdict import logging from django.conf import settings from online_payments.billing.enums import Currency from", "from typing import List from collections import defaultdict import logging from django.conf import", "defaultdict import logging from django.conf import settings from online_payments.billing.enums import Currency from online_payments.billing.models", "grouped_products = defaultdict(int) for seat in seats: grouped_products[seat.payment.product_type] += 1 items = []", "Currency from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu import Szamlazzhu from", "from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer from online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices", "send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]:", "email, items): customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city, address=billing_detail.address_line1, email=email, tax_number=billing_detail.tax_number, ) invoice", "online_payments.billing.szamlazzhu import Szamlazzhu from payments.prices import PRODUCTS, get_product_items logger = logging.getLogger(__name__) def send_seat_invoice(seat):", "logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats)", "quantity)) return items def _send_invoice(billing_detail, email, items): customer = Customer( name=billing_detail.company_name, post_code=billing_detail.post_code, city=billing_detail.city,", "import PRODUCTS, get_product_items logger = logging.getLogger(__name__) def send_seat_invoice(seat): _send_invoice(seat.appointment.billing_detail, seat.appointment.email, _get_items_for_seats([seat])) def send_appointment_invoice(appointment):", "typing import List from collections import defaultdict import logging from django.conf import settings", "[] for product_type, quantity in grouped_products.items(): items.extend(get_product_items(PRODUCTS[product_type], quantity)) return items def _send_invoice(billing_detail, email,", "_get_items_for_seats([seat])) def send_appointment_invoice(appointment): _send_invoice(appointment.billing_detail, appointment.email, _get_items_for_seats(appointment.seats.all())) def _get_items_for_seats(seats) -> List[Item]: grouped_products = defaultdict(int)", "settings from online_payments.billing.enums import Currency from online_payments.billing.models import Item, PaymentMethod, Invoice, Customer from" ]
[ "histogram of velocities using a bar chart. Parameters: ---------- :param int nbin: number", "hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" Property", "self.bt['ncpr'].as_matrix() / self.ncol x0 = np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t,", "dict['x'] \"\"\" # todo: test this method! look up references for clearer examples!", "np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx + ccy else: mesh = self.__hdf5.get_data(key) # find center", "df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return self.__breakthrough_curve", "genuchten 1986. Miscable displacement. Parameters: ---------- :param float D: Diffusivity initial guess. Cannot", "be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.xlen =", "valid for plotting'.format(key)) elif key in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr", "ncol = 0 for index, row in pdf_colloids.iterrows(): if normalize: if lower_nts <", "ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least sqares for jury", "nbin): ncol = 0 for index, row in pdf_colloids.iterrows(): if normalize: if lower_nts", ":return: mean colloid velocity \"\"\" return self.velocity['velocity'].mean() @property def var(self): \"\"\" :return: variance", "ncols = [] nts = [] ncol = 0 for index, row in", "t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)] ncol += 1 ncols.append(float(ncol))", "1 ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts arr = np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)])", "i not in ('\\t', '', ' ', '\\n')]) temp = np.array(t).T temp =", "field scale model parameterization Class needs to be re-named and updated to CDE", "x = 0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] = 0 return x def", "values Parameters: ---------- :param int nbin: number of timesteps to bin a pdf", "v in self.velocity['velocity']: if lower_v < v <= upper_v: ncol += 1 velocity.append((lower_v", "(\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data = self.__hdf5.get_data(key) * factor", "if key not in Hdf5Reader.data_paths: raise KeyError('Dictionary key not in valid keys. Use", "model parameterization Class needs to be re-named and updated to CDE equation Parameters:", "import least_squares l = self.ylen * self.resolution v = self.uy t = self.bt['nts'].as_matrix()", "optimize method to solve least sqares for jury 1991. Pulse flux. Parameters: ----------", "= self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1])", "= kwargs.pop('vmin') if 'vmax' in kwargs: vamx = kwargs.pop('vmax') p = ax.pcolormesh(xx, yy,", "np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)]) for idx, value in enumerate(colloid): arr[idx] = tuple([value,", "filename: output filename (ie. endpoint, timestep, or pathline) \"\"\" dtypes = {'colloid': np.int,", "= hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x':", "not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5)", "< v <= upper_v: ncol += 1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v =", "statistics relating to colloid velocity for a simulation. Class needs to be rebuilt", "masked=False, *args, **kwargs): \"\"\" Hdf array plotting using Hdf5Reader keys Parameters: ---------- :param", "= [] for idx, line in enumerate(f): if idx < self.__data_startline: pass elif", "hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x': data", "hdf5 data by specific path Parameters: ---------- :param str path: hdf5 directory path", "'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx + ccy elif", "to colloid velocity for a simulation. Class needs to be rebuilt to work", "filename (ie. endpoint, timestep, or pathline) \"\"\" dtypes = {'colloid': np.int, 'flag': np.int,", "= colcol[center, center:] else: x = self.__hdf5.get_data('distance_fine_y') x = x[center, center:] # *", "Class to calculate macroscopic advection dispersion equation parameters for field scale model parameterization", "pdf, t = self.__prep_data() x0 = np.array([D, R]) return least_squares(self.__jury_residuals, x0, args=(a, l,", "= reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous # todo:", "y * -1, *args, **kwargs) def plot_mesh(self, key, ax=None, *args, **kwargs): \"\"\" Plotting", "H class Breakthrough(object): \"\"\" Class to prepare and plot breakthrough curve data from", "self.pdf = arr def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume calculation", "@property def keys(self): \"\"\" :return: list of valid hdf5 data keys \"\"\" return", "class. Integrate into LB class LBOutput(object): \"\"\" Class to anaylze LB fluid/solid properties", "matplotlib plotting kwargs \"\"\" adjuster = 0.00001 bins = np.linspace(self.min - adjuster, self.max,", "**kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args,", "L - v * t) ** 2 eq3 = 4 * R *", "None: ax = plt.gca() if key not in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x',", "adjuster = 0.00001 bins = np.linspace(self.min - adjuster, self.max, nbin) ncols = []", "* factor else: data = self.__hdf5.get_data(key) return data class ASCIIReader(object): \"\"\" Class to", "for LB-Colloids Parameters: ---------- :param str filename: colloid model output filename (ie. endpoint,", "to retrieve hdf5 data by dict. key Parameters: ---------- :param str key: valid", ":param int nbin: number of timesteps to bin a pdf for calculation :param", "array of velocity. Parameters ---------- :param *args: matplotlib plotting args :param **kwargs: matplotlib", "model key \"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\" Method to return data", "else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts']", "self.bin = nbin self.pdf = None self.reset_pdf(nbin) self.__normalize = False self.__reader = reader", "= np.ma.masked_where(img == 1, a=arr) mesh = ax.imshow(arr, *args, **kwargs) if mesh is", "Parameters: ---------- :param str filename: ascii output file name from colloid model :param", "'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine':", "breakthrough_curve(self): \"\"\" Property method that performs a dynamic calculation of breakthrough curve data", "= line.split() self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t = line.split() self.ncol = int(t[-1].rstrip())", "'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()]", "float ftol: scipy function tolerance for solution :param int max_nfev: maximum number of", "*args, **kwargs) else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) else: if", "of velocity vectors within the system. Parameters: ---------- :param int nbin: refinement for", "+ 1)) if mesh.max()/mesh.min() > 10: vmin = mesh.min() vmax = mesh.max() if", "hdf5 data by dict. key Parameters: ---------- :param str key: valid dictionary key", "= self.__hdf5.get_data(key) shape = colcol.shape center = shape[0] // 2 if key ==", "of velocities by colloid for array of velocity. Parameters ---------- :param *args: matplotlib", "Property method to return valid keys to obtain data \"\"\" return CCModelPlot.keys def", ":param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" from matplotlib.colors", "ax.plot([center], [center], 'ko') return p class ColloidVelocity(object): \"\"\" Method to return colloid velocity", "v): \"\"\" Equation for Jury 1991 calculation of Dispersivity and Retardation Parameters vars:", "= self.pdf['nts'] return pdf, time class ModelPlot(object): \"\"\" Class to retrieve Colloid force", "data path Parameters: ---------- :param str key: lattice boltzmann data key Returns: -------", "self.pdf['ncol'][:strip_idx + 1] time = self.pdf['nts'][:strip_idx + 1] else: pdf = self.pdf['ncol'] time", "and plot data as 1d or as a meshgrid object More sophisticated than", "index, row in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) / (row['delta-ts'] * self.timestep))", "bar chart. Parameters: ---------- :param int nbin: number of specific bins for plotting", "+= 1 else: if lower_nts < row['end-ts'] <= upper_nts: ncol += 1 ts.append(upper_nts)", "mean velocity of each colloid in the simulation \"\"\" colloid = [] velocity", "file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" :return: Lattice", "self.ncol = float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin = nbin self.pdf = None self.reset_pdf(nbin)", "self.pdf['nts'][:strip_idx + 1] else: pdf = self.pdf['ncol'] time = self.pdf['nts'] return pdf, time", "- adjuster upper_v = 0 for upper_v in bins: ncol = 0 for", "float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)]) class DistributionFunction(object):", "self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key == \"image\": arr", "= hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif", "= float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin = nbin self.pdf = None self.reset_pdf(nbin) self.__normalize", "pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Convience method to plot data into", "Method to plot breakthrough data with pore volumes (non-dimensional time) Parameters: ---------- :param", "for van genuchten 1986. Miscable displacement. Parameters: ---------- :param float D: Diffusivity initial", "in method to calculate the mean velocity of each colloid in the simulation", "time \"\"\" from scipy import special D = vars[0] R = vars[1] eq0", "h5py as H class Breakthrough(object): \"\"\" Class to prepare and plot breakthrough curve", "'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr =", "*args, **kwargs): \"\"\" Method to plot data into a matplotlib chart. Parameters: ----------", "vectors within the system. Parameters: ---------- :param int nbin: refinement for quiver plotting", "**kwargs) elif key == \"image\": arr = self.__hdf.get_data(key) if masked: arr = np.ma.masked_where(arr", "= colcol.T[center, center:] elif key == \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x = x[center,", "# find center and set to nearby value to prevent log scale crashing", "------- :return: data \"\"\" if key in (\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key", "parameterization Class needs to be re-named and updated to CDE equation Parameters: ----------", "import ColloidOutput >>> import matplotlib.pyplot as plt >>> >>> hdf = \"mymodel.hdf5\" >>>", "\"\"\" :return: maximum colloid velocity \"\"\" return self.velocity['velocity'].max() @property def min(self): \"\"\" :return:", "to provide plotting functionality. ModelPlot and CCModelPlot are useful for visualizing colloid-surface forces", "output files <endpoint, timestep, pathline> to a pandas dataframe Parameters: ---------- :param str", "bool normalize: flag to calculate pdf by residence time or end time \"\"\"", "matplotlib bar width. :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs", "(float): model resolution :ivar timestep: (float): model timestep :ivar continuous: (int): interval of", "key in (\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data = self.__hdf5.get_data(key)", "1d plotting \"\"\" pv_factor = self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep,", "release, 0 means pulse :ivar ncol: (float): number of colloids per release in", "key == 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0]", "\"\"\" Method to return data by hdf5 path Parameters: ---------- :param str path:", "matplotlib axes object (optional) :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting", "self.read_ascii(filename) def read_header(self, filename): \"\"\" Method to read the header from ascii output", "---------- :param str path: hdf5 directory path to data Returns: ------ :return: data", "for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol']", ":param int nbin: number of time steps to base bin on :param bool", "[] velocity = [] lower_v = self.min - adjuster upper_v = 0 for", "= self.__hdf5.get_data(key) # find center and set to nearby value to prevent log", "np.int} def __init__(self, filename): self.timestep = 0 self.ncol = 0 self.resolution = 0", "Equation for Jury 1991 calculation of Dispersivity and Retardation Parameters vars: (np.array) [dispersivity,", "to query colloid-colloid interactions and plot data as 1d or as a meshgrid", "0 self.__data_startline = 0 self.__header = [] if filename.split('.')[-1] not in ('endpoint', 'timeseries',", "* np.sqrt(R)) eq1 = 2 * np.sqrt(np.pi * D * t ** 3)", "ColloidOutput >>> import matplotlib.pyplot as plt >>> >>> hdf = \"mymodel.hdf5\" >>> mp", "self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous # todo: replace this", "time steps to base bin on :param bool normalize: method to calculate pdf", "and kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\" import numpy as np import", "= nbin self.pdf = None self.reset_pdf(nbin) self.__normalize = False self.__reader = reader def", "(optional) :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" #", "self.ncol, *args, **kwargs) class ADE(object): \"\"\" Class to calculate macroscopic advection dispersion equation", "self.df = reader.df self.ncol = reader.df.shape[0] self.max_time = max(reader.df['nts']) * self.timestep self.velocity =", "1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot pdf data with pore", "**kwargs: matplotlib keyword arguments for 1d charts \"\"\" if time: if self.__normalize: plt.plot(self.pdf['nts']", "0 return x def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize", "pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution) return pv_factor def plot(self,", "path to data Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data_by_path(path) def plot(self,", "= self.__hdf5.get_data('distance_fine_y') x = x[center, center:] # * 1e-6 y = colcol[center, center:]", "= shape[0] // 2 if key == \"<KEY>\": x = self.__hdf5.get_data('distance_x') x =", "'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine',", "Many classes are available to provide plotting functionality. ModelPlot and CCModelPlot are useful", "CCModelPlot.keys def get_data(self, key): \"\"\" Method to return data by key Parameters: ----------", "if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'] *", "import special D = vars[0] R = vars[1] eq0 = R * l", "by residence time or end time \"\"\" self.bin = nbin self.__normalize = normalize", "time \"\"\" self.bin = nbin self.__normalize = normalize ts = [] ncols =", "= np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx + ccy else: mesh =", "= self.uy t = self.bt['nts'].as_matrix() * self.timestep bt = self.bt['ncpr'].as_matrix() / self.ncol x0", "each colloid in the simulation \"\"\" colloid = [] velocity = [] for", "breakthrough data with pore volumes (non-dimensional time) Parameters: ---------- :param *args: matplotlib args", "dictionary. Answer in dict['x'] \"\"\" from scipy.optimize import least_squares l = self.ylen *", "plot breakthrough data with pore volumes (non-dimensional time) Parameters: ---------- :param *args: matplotlib", "---------- :param str filename: <>.endpoint file Attributes: ---------- :ivar df: (pandas DataFrame): dataframe", ":param **kwargs: matplotlib keyword arguments for 1d charts \"\"\" if time: if self.continuous:", "plot breakthrough curve data from endpoint files. Parameters: ---------- :param str filename: <>.endpoint", "= {'colloid': np.int, 'flag': np.int, 'nts': np.int, 'x-position': np.float, 'y-position': np.float, 'x-model': np.float,", "D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least squares", "using data Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float) ylen v:", "Hdf array plotting using Hdf5Reader keys Parameters: ---------- :param str key: valid dictionary", "file name \"\"\" def __init__(self, filename): if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must", ">>> mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\" import numpy as np import matplotlib.pyplot as", "arr[idx] = tuple([value, ncols[idx]]) self.pdf = arr def pore_volume_conversion(self): \"\"\" Method to retrieve", "= data[0] elif key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y',", "'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data", "max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars, A, L, t, v, pdf): \"\"\" Method to", "self.ncol l = self.ylen * self.resolution v = self.uy pdf, t = self.__prep_data()", "obtain data \"\"\" return CCModelPlot.keys def get_data(self, key): \"\"\" Method to return data", "self.ncol = reader.df.shape[0] self.max_time = max(reader.df['nts']) * self.timestep self.velocity = None self.__get_velocity_array() def", "nbin, normalize=False): \"\"\" User method to reset values based on changing the pdf", "---------- :param str path: valid HDF5 data path \"\"\" return self.__hdf5.get_data_by_path(path) def plot(self,", "& (bt_colloids['end-ts'] <= max_ts)])) df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve", "return LBOutput.data_paths.keys() def get_data(self, key): \"\"\" Method to select data from hdf5 file", "not in ('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}: not in supported filetypes\".format(filename)) else: self.read_header(filename)", "in dict['x'] \"\"\" from scipy.optimize import least_squares l = self.ylen * self.resolution v", "self.continuous = reader.continuous self.ncol = float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin = nbin self.pdf", "= [] for index, row in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) /", "colloid-colloid forces respectively. example import of the Colloid_output.py module is as follows >>>", "str filename: output filename (ie. endpoint, timestep, or pathline) \"\"\" dtypes = {'colloid':", "'start-ts': np.int, 'end-ts': np.int, 'delta-ts': np.int, 'continuous': np.int} def __init__(self, filename): self.timestep =", "be re-named and updated to CDE equation Parameters: ---------- :param str filename: ascii", "'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x':", "within the system. Parameters: ---------- :param int nbin: refinement for quiver plotting :param", "data key Returns: ------- :return: data \"\"\" if key in (\"velocity_x\", \"velocity_y\"): factor", "1, a=yy) x = np.ma.masked_where(img == 1, a=x) y = np.ma.masked_where(img == 1,", "---------- :param str key: valid data key :param object ax: matplotlib axes object", "self.velocity = arr @property def max(self): \"\"\" :return: maximum colloid velocity \"\"\" return", "filename (ie. endpoint, timestep, or pathline) \"\"\" with open(filename) as f: t =", "(float) time bt: (np.array) breakthrough curve \"\"\" return bt - self.__van_genuchten_1986(vars, l, v,", "plotting using Hdf5Reader keys Parameters: ---------- :param str key: valid dictionary key from", "guess. Cannot be 0 :param float ftol: scipy function tolerance for solution :param", "or hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\"", "ylen v: (float) mean fluid_velocity t: (float) time pdf: pd.dataframe c/co of colloid", "**kwargs: matplotlib plotting kwargs \"\"\" from matplotlib.colors import LogNorm if ax is None:", "matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" adjuster = 0.00001 bins", "endpoint file data from from ascii files for LB-Colloids Sets data to pandas", "self.__data_startline: self.__header = [i.rstrip() for i in line.split() if i not in ('\\t',", "idx, value in enumerate(ts): arr[idx] = tuple([value, ncols[idx]]) self.pdf = arr def pore_volume_conversion(self):", "'dlvo_y', 'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs)", "lower_v = upper_v - adjuster velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity, ncols, width, *args,", "l: (float) ylen v: (float) mean fluid_velocity t: (float) time \"\"\" D =", "* L * np.sqrt(R)) eq1 = 2 * np.sqrt(np.pi * D * t", "*args, **kwargs): \"\"\" User method to plot a histogram of velocities using a", "= 0 for v in self.velocity['velocity']: if lower_v < v <= upper_v: ncol", "3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol = 0", "macroscopic advection dispersion equation parameters for field scale model parameterization Class needs to", "Parameters: ---------- :param bool time: if true x-axis is time, false is nts", "x0, args=(a, l, t, v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars, A,", "least squares kwargs Returns: ------- :return: scipy least squares dictionary. Answer in dict['x']", "matplotlib.pyplot as plt >>> >>> hdf = \"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf) >>>", "else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'],", "Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data_by_path(path) def plot(self, key, ax=None, masked=False,", "scale model parameterization Class needs to be re-named and updated to CDE equation", "= self.pdf['ncol'][:strip_idx + 1] time = self.pdf['nts'][:strip_idx + 1] else: pdf = self.pdf['ncol']", "args :param **kwargs: matplotlib plotting kwargs \"\"\" if key not in ('col_col_x', 'col_col_y',", "retrieve Colloid force arrays and plot for data analysis. Parameters: ---------- :param str", "if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep", "'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x':", "= float(t[-1].rstrip()) elif line.startswith('Continuous'): t = line.split() self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline", "= reader.resolution self.ylen = reader.ylen self.ncol = reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy =", "max_ts)])) df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return", "key :param object ax: matplotlib axes object (optional) :param *args: matplotlib plotting args", "(float) ylen v: (float) mean fluid_velocity t: (float) time pdf: pd.dataframe c/co of", "* self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args,", "matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" # todo: create a", "dynamic calculation of breakthrough curve data \"\"\" max_ts = self.df['nts'].max() if self.__breakthrough_curve is", "interactions and plot data as 1d or as a meshgrid object More sophisticated", "arguments for 1d charts \"\"\" if time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr']", "key from self.keys :param object ax: matplotlib pyplot axes object (optional) :param *args:", "method to view and analyze colloid force arrays Parameters: ---------- :param str key:", "else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs):", "R = vars[1] eq0 = R * l - v * t eq1", "time = self.pdf['nts'][:strip_idx + 1] else: pdf = self.pdf['ncol'] time = self.pdf['nts'] return", "equation Parameters: ---------- :param str filename: ascii output file name from colloid model", "** 3) eq2 = -(R * L - v * t) ** 2", "matplotlib pyplot axes object (optional) :param *args: matplotlib plotting args :param **kwargs: matplotlib", "self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key == \"image\": arr = self.__hdf.get_data(key) if", "vanGenuchten and Winerega 1986 Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length", "key not in valid keys. Use get_data_by_path') hdf = H.File(self.file_name, 'r') if key", "time: if true x-axis is time, false is nts :param *args: matplotlib args", "mesh[center, center] = mesh[center, center + 1] xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0,", "about this one. Does it belong here? Finish class. Integrate into LB class", "LB class LBOutput(object): \"\"\" Class to anaylze LB fluid/solid properties Parameters: ---------- :param", "Parameters: ---------- :param str filename: output filename (ie. endpoint, timestep, or pathline) \"\"\"", "(int): total number of colloids in simulation \"\"\" def __init__(self, filename): if not", "------- :return: data <varies> \"\"\" if key not in Hdf5Reader.data_paths: raise KeyError('Dictionary key", "keys Parameters: ---------- :param str key: valid dictionary key from self.keys :param object", "not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep =", "to calculate pdf by residence time or end time \"\"\" self.bin = nbin", "self.ncol = reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader = reader @property", "\"<KEY>\": x = self.__hdf5.get_data('distance_x') x = x[center, center:] y = colcol[center, center:] elif", "nts.append(max_ts) df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df else: bt_colloids =", "data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif key in ('lvdw_x',", "max(self): \"\"\" :return: maximum colloid velocity \"\"\" return self.velocity['velocity'].max() @property def min(self): \"\"\"", "bool time: if true x-axis is time, false is nts :param *args: matplotlib", "Hdf5Reader(hdf5) @property def keys(self): \"\"\" Property method to return valid keys to obtain", "name from colloid model :param int nbin: number of timesteps to bin a", "not a valid key\".format(key)) if key == 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy =", "t = line.split() self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'): t = line.split() self.xlen =", "import h5py as H class Breakthrough(object): \"\"\" Class to prepare and plot breakthrough", "y[::nbin, ::nbin], units='width', *args, **kwargs) qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$',", "generate a probability distribution function based upon user supplied bin size. Parameters: ----------", "hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.file_name = hdf5 @property", "key Returns: ------- :return: data \"\"\" if key in (\"velocity_x\", \"velocity_y\"): factor =", "= bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol = 0 for index,", "plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def plot_histogram(self, nbin=10, width=0.01, *args, **kwargs):", "v * t) ** 2 eq3 = 4 * R * D *", "elif line.startswith('ux'): t = line.split() self.ux = float(t[-1].rstrip()) elif line.startswith('uy'): t = line.split()", "None, 'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise", "output filename (ie. endpoint, timestep, or pathline) \"\"\" dtypes = {'colloid': np.int, 'flag':", "'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux',", "np.int, 'x-position': np.float, 'y-position': np.float, 'x-model': np.float, 'y-model': np.float, 'start-ts': np.int, 'end-ts': np.int,", "filename: <>.endpoint file Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint data", "\"\"\" with open(filename) as f: t = [] for idx, line in enumerate(f):", "file name from colloid model :param int nbin: number of timesteps to bin", "for solution :param int max_nfev: maximum number of function iterations :param **kwargs: scipy", "y = colcol[center, center:] else: x = self.__hdf5.get_data('distance_fine_y') x = x[center, center:] #", "R * D * t x = (eq0 / eq1) * np.exp(eq2 /", "function iterations :param **kwargs: scipy least squares kwargs Returns: ------- :return: scipy least", "== 0, a=arr) ax.imshow(arr, *args, **kwargs) else: arr = self.__hdf.get_data(key) if masked: img", "pulse :ivar ncol: (float): number of colloids per release in simulation :ivar total_ncol:", "= 0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] = 0 return x def __prep_data(self):", "retrieve hdf5 data by specific path Parameters: ---------- :param str path: hdf5 directory", "filetypes\".format(filename)) else: self.read_header(filename) self.df = self.read_ascii(filename) def read_header(self, filename): \"\"\" Method to read", "**kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0,", "vmax = mesh.max() if 'vmin' in kwargs: vmin = kwargs.pop('vmin') if 'vmax' in", "in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine',", "breakthrough data by stripping off trailing zeros. Returns: pdf = (np.array) stripped pdf", ":ivar df: (pandas DataFrame): dataframe of endpoint data :ivar resolution: (float): model resolution", "arr = np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)]) for idx, value in enumerate(colloid): arr[idx]", "from jury 1991 equation using data Parameters vars: (np.array) [dispersivity, retardation] A: ncol", "from scipy.optimize import least_squares l = self.ylen * self.resolution v = self.uy t", "plotting kwargs \"\"\" if dimensional: x = self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else: x", "and analyze colloid force arrays Parameters: ---------- :param str key: valid dictionary key", "find center and set to nearby value to prevent log scale crashing shape", "**kwargs) else: p = ax.pcolormesh(xx, yy, mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]])", "(float) time \"\"\" from scipy import special D = vars[0] R = vars[1]", "class ModelPlot(object): \"\"\" Class to retrieve Colloid force arrays and plot for data", "None, 'resolution': None, 'porosity': None, 'pore_diameter': None, 'conversion_factor': None, 'reynolds_number': None} def __init__(self,", "def keys(self): \"\"\" :return: list of valid hdf5 data keys \"\"\" return [i", "pdf = (np.array) stripped pdf t = (np.array) times \"\"\" strip_idx = None", "if key not in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a", "files for a more precise velocity measurement Parameters: ---------- :param str filename: endpoint", "upper_v = 0 for upper_v in bins: ncol = 0 for v in", "plotting'.format(key)) elif key in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key)", "mesh.shape[1] + 1)) if mesh.max()/mesh.min() > 10: vmin = mesh.min() vmax = mesh.max()", "for field scale model parameterization Class needs to be re-named and updated to", "data from from ascii files for LB-Colloids Sets data to pandas dataframe Parameters:", "None if ax is None: ax = plt.gca() if key in ('lvdw_x', 'lvdw_y',", "ax: matplotlib axes object (optional) :param *args: matplotlib plotting args :param **kwargs: matplotlib", "*args, **kwargs): \"\"\" Method to create a quiver plot to display the magnitude", "Hdf5Reader.data_paths] def get_data(self, key): \"\"\" Method to retrieve hdf5 data by dict. key", "hdf = \"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf) >>> # model plot accepts matplotlib", "self.__dist_func = DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf = self.__dist_func.pdf", "*args, **kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)]) class DistributionFunction(object): \"\"\"", "idx, value in enumerate(colloid): arr[idx] = tuple([value, velocity[idx]]) self.velocity = arr @property def", "+ upper_v)/2.) ncols.append(ncol) lower_v = upper_v - adjuster velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity,", "for idx, line in enumerate(f): if idx < self.__data_startline: pass elif idx ==", "= reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader = reader @property def", "not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.file_name = hdf5", "breakthrough curve data from endpoint files. Parameters: ---------- :param str filename: <>.endpoint file", "'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif key in", "= ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else: p = ax.pcolormesh(xx, yy,", "pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs)", "Colloid_output.py module is as follows >>> from lb_colloids import ColloidOutput >>> import matplotlib.pyplot", "value to prevent log scale crashing shape = mesh.shape center = shape[0] //", "idx seq = True else: pass else: seq = False strip_idx = None", "ascii files for LB-Colloids Sets data to pandas dataframe Parameters: ---------- :param str", "df = pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid') return df @staticmethod", "__init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader", ":ivar timestep: (float): model timestep :ivar continuous: (int): interval of continuous release, 0", "float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t = line.split() self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'): t =", "Retardation from breakthrough data. Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length", "elif key in ('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}: key not valid for plotting'.format(key))", "np.int), ('velocity', np.float)]) for idx, value in enumerate(colloid): arr[idx] = tuple([value, velocity[idx]]) self.velocity", "upper_nts: ncol += 1 else: if lower_nts < row['end-ts'] <= upper_nts: ncol +=", "<varies> \"\"\" if key not in Hdf5Reader.data_paths: raise KeyError('Dictionary key not in valid", "eq3) x[0] = 0 return x def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs):", "Class to read in text based output files <endpoint, timestep, pathline> to a", "velocity measurement Parameters: ---------- :param str filename: endpoint file name \"\"\" def __init__(self,", "Convience method to plot data into a matplotlib chart. Parameters: ---------- :param bool", "= 0 return x def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy", "\"\"\" Equation for Jury 1991 calculation of Dispersivity and Retardation Parameters vars: (np.array)", "of variance of colloid velocities \"\"\" return (self.stdev / self.mean) * 100 def", "**kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0] / 2. ax.plot([center], [center], 'ko')", "velocity of each colloid in the simulation \"\"\" colloid = [] velocity =", "chart. Parameters: ---------- :param bool time: if true x-axis is time, false is", "== 1, a=arr) mesh = ax.imshow(arr, *args, **kwargs) if mesh is not None:", "Parameters: ---------- :param str filename: endpoint file name \"\"\" def __init__(self, filename): if", "= 0 self.xlen = 0 self.ylen = 0 self.ux = 0 self.uy =", "------- :return: scipy least squares dictionary. Answer in dict['x'] \"\"\" from scipy.optimize import", "hdf5 directory path to data Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data_by_path(path)", "of colloid pdf \"\"\" return pdf - self.__jury_1991(vars, A, L, t, v) def", "int nbin: number of specific bins for plotting :param float width: matplotlib bar", "self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t = line.split() self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'):", "\"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must be", "'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) colcol = self.__hdf5.get_data(key) shape", "timestep: (float): model timestep :ivar continuous: (int): interval of continuous release, 0 means", "tuple([value, ncols[idx]]) self.pdf = arr def pore_volume_conversion(self): \"\"\" Method to retrieve the pore", "in self.velocity['velocity']: if lower_v < v <= upper_v: ncol += 1 velocity.append((lower_v +", "must be supplied') self.__hdf = Hdf5Reader(hdf5) @property def keys(self): return self.__hdf.keys def get_data(self,", "if masked: img = self.__hdf.get_data('image') xx = np.ma.masked_where(img == 1, a=xx) yy =", "A, L, t, v): \"\"\" Equation for Jury 1991 calculation of Dispersivity and", "or pathline) \"\"\" with open(filename) as f: for idx, line in enumerate(f): if", "(np.array) stripped pdf t = (np.array) times \"\"\" strip_idx = None seq =", "None: pdf = self.pdf['ncol'][:strip_idx + 1] time = self.pdf['nts'][:strip_idx + 1] else: pdf", "key): \"\"\" Method to retrieve hdf5 data by dict. key Parameters: ---------- :param", "R]) return least_squares(self.__jury_residuals, x0, args=(a, l, t, v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def", "temp = {self.__header[idx]: data for idx, data in enumerate(temp)} df = pd.DataFrame(temp) df", "= [] nts = [] ncol = 0 ncol_per_release = [] for index,", "key == \"image\": arr = self.__hdf.get_data(key) if masked: arr = np.ma.masked_where(arr == 0,", "self.__hdf5.get_data_by_path(path) def plot(self, key, *args, **kwargs): \"\"\" Plotting method for 1d colloid-colloid dlvo", "'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y',", "import leastsq, minimize, least_squares a = self.ncol l = self.ylen * self.resolution v", "mean(self): \"\"\" :return: mean colloid velocity \"\"\" return self.velocity['velocity'].mean() @property def var(self): \"\"\"", "\"\"\" :return: minimum colloid velocity \"\"\" return self.velocity['velocity'].min() @property def mean(self): \"\"\" :return:", "---------- :param str filename: output filename (ie. endpoint, timestep, or pathline) \"\"\" dtypes", "key: valid data key :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting", "eq1) * np.exp(eq2 / eq3) x[0] = 0 return x def solve_van_genuchten_1986(self, D=0.01,", "[dispersivity, retardation] x: (float) column length v: (float) mean fluid velocity t: (float)", "p class ColloidVelocity(object): \"\"\" Method to return colloid velocity and statistics relating to", "<= upper_ts)] ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts", "specific bins for plotting :param float width: matplotlib bar width. :param *args: matplotlib", "= row['end-ts'] - self.continuous upper_ts = row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) &", "raise KeyError(\"{} is not a valid key\".format(key)) if key == 'col_col': ccx =", "def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume calculation conversion for plotting", "R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self,", "DataFrame): dataframe of endpoint data :ivar resolution: (float): model resolution :ivar timestep: (float):", "if mesh.max()/mesh.min() > 10: vmin = mesh.min() vmax = mesh.max() if 'vmin' in", "of breakthrough curve data \"\"\" max_ts = self.df['nts'].max() if self.__breakthrough_curve is None: if", "'lb_velocity_y': None, 'resolution': None, 'porosity': None, 'pore_diameter': None, 'conversion_factor': None, 'reynolds_number': None} def", "self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values / float(self.ncol),", "ModelPlot Parameters: ---------- :param str hdf5: hdf5 file name \"\"\" data_paths = {'col_col_x':", "upper_nts arr = np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)]) for idx, value in enumerate(ts):", "line.startswith(\"Ncols\"): t = line.split() self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'): t = line.split() self.resolution", "elif key == \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x = x[center, center:] # *", "= df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid') return df @staticmethod def __try_float(val): try: return", "nts = [] ncol = 0 ncol_per_release = [] for index, row in", "'edl_fine', 'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data", "ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol')", "enumerate(self.pdf): if not bt: if rec['ncol'] != 0: bt = True else: pass", "masked: img = self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img == 1, a=arr) mesh = ax.imshow(arr,", "plot_pv(self, *args, **kwargs): \"\"\" Method to plot breakthrough data with pore volumes (non-dimensional", "kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def plot_histogram(self, nbin=10, width=0.01, *args, **kwargs): \"\"\"", "width=0.01, *args, **kwargs): \"\"\" User method to plot a histogram of velocities using", "true x-axis is time, false is nts :param *args: matplotlib args for 1d", "= idx seq = True else: pass else: seq = False strip_idx =", "------- :return: scipy least squares dictionary. Answer in dict['x'] \"\"\" # todo: test", "arr = self.__hdf.get_data(key) if masked: img = self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img == 1,", "'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I',", "**kwargs): \"\"\" Scipy optimize method to solve least sqares for jury 1991. Pulse", "raise KeyError(\"{} is not a valid key\".format(key)) colcol = self.__hdf5.get_data(key) shape = colcol.shape", "and plot for data analysis. Parameters: ---------- :param str hdf5: hdf5 file name", "get_data(self, key): \"\"\" Get data method to view and analyze colloid force arrays", "= [] velocity = [] for index, row in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen", "ccy elif key == 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh =", "= self.__hdf5.get_data(key) * factor else: data = self.__hdf5.get_data(key) return data class ASCIIReader(object): \"\"\"", "r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\" Class to query", "mesh = None if ax is None: ax = plt.gca() if key in", "' ', '\\n')] else: t.append([self.__try_float(i.rstrip()) for i in line.split() if i not in", "valid hdf5 data keys \"\"\" return [i for i in Hdf5Reader.data_paths] def get_data(self,", "def plot_histogram(self, nbin=10, width=0.01, *args, **kwargs): \"\"\" User method to plot a histogram", "*args, **kwargs): \"\"\" Method to plot distribution of velocities by colloid for array", "/ (row['nts'] * self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)]) for", "'vmin' in kwargs: vmin = kwargs.pop('vmin') if 'vmax' in kwargs: vamx = kwargs.pop('vmax')", "(abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution) return pv_factor def plot(self, time=True, *args,", "for <NAME> and Winerega 1986 to calculate Dispersivity and Retardation from breakthrough data.", "of colloid velocities \"\"\" return np.std(self.velocity['velocity']) @property def cv(self): \"\"\" :return: coeficient of", "<NAME> and Winerega 1986 to calculate Dispersivity and Retardation from breakthrough data. Parameters:", "velocities \"\"\" return (self.stdev / self.mean) * 100 def plot(self, *args, **kwargs): \"\"\"", "= R * l - v * t eq1 = np.sqrt(4 * D", "= {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy',", "1, a=x) y = np.ma.masked_where(img == 1, a=y) Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin,", "'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args,", "\"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs):", "data analysis. Parameters: ---------- :param str hdf5: hdf5 file name \"\"\" def __init__(self,", "fluid_velocity t: (float) time \"\"\" D = vars[0] R = vars[1] eq0 =", "/ float(self.ncol), *args, **kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol),", "coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\" Class to query colloid-colloid interactions", ":param **kwargs: matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def plot_histogram(self, nbin=10,", "reset_pdf(self, nbin, normalize=False): \"\"\" Method to generate a probability distribution function based upon", "return ax def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args, **kwargs): \"\"\" Method to create", "or pathline) \"\"\" with open(filename) as f: t = [] for idx, line", "plotting :param **kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() if", "Parameters: ---------- :param str hdf5: hdf5 file name \"\"\" def __init__(self, hdf5): if", "classes to read LB Colloids simulation outputs and perform post processing. Many classes", "self.ylen = reader.ylen self.ncol = reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy = reader.uy self.pdf", "(float) mean fluid velocity t: (float) time \"\"\" from scipy import special D", "t eq1 = np.sqrt(4 * D * R * t) x = 0.5", "be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" Property method to return", "'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self,", "the header from ascii output files for LB-Colloids Parameters: ---------- :param str filename:", "the user to use keys to access data Parameters: ---------- :param str hdf5:", "ASCIIReader(filename) self.df = reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous", "np.float, 'x-model': np.float, 'y-model': np.float, 'start-ts': np.int, 'end-ts': np.int, 'delta-ts': np.int, 'continuous': np.int}", "if lower_nts < row['delta-ts'] <= upper_nts: ncol += 1 else: if lower_nts <", "0: bt = True else: pass else: if rec['ncol'] == 0: if not", "self.df['nts'].max() if self.__breakthrough_curve is None: if not self.continuous: bt_colloids = self.df.loc[self.df['flag'] == 3]", "0 max_ts = self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] == 3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for", "upper_v)/2.) ncols.append(ncol) lower_v = upper_v - adjuster velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity, ncols,", "= None self.__reader = reader @property def breakthrough_curve(self): \"\"\" Property method that performs", "a meshgrid object More sophisticated than standard ModelPlot Parameters: ---------- :param str hdf5:", "data keys \"\"\" return [i for i in Hdf5Reader.data_paths] def get_data(self, key): \"\"\"", "not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5)", "to work with timeseries and pathline files for a more precise velocity measurement", "+ ccy elif key == 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh", "0) class CCModelPlot(object): \"\"\" Class to query colloid-colloid interactions and plot data as", "of data path Parameters: ---------- :param str key: lattice boltzmann data key Returns:", "reader.timestep self.continuous = reader.continuous self.ncol = float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin = nbin", "if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol,", "be 0 :param float R: Retardation initial guess. Cannot be 0 :param float", "to CDE equation Parameters: ---------- :param str filename: ascii output file name from", "'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x':", "to bin a pdf for calculation :param bool normalize: flag to calculate pdf", "Method to return colloid velocity and statistics relating to colloid velocity for a", "keys \"\"\" return [i for i in Hdf5Reader.data_paths] def get_data(self, key): \"\"\" Method", "of velocities using a bar chart. Parameters: ---------- :param int nbin: number of", "plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] *", "valid data key :param object ax: matplotlib axes object (optional) :param *args: matplotlib", "if rec['ncol'] != 0: bt = True else: pass else: if rec['ncol'] ==", "R * t) x = 0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] = 0", "needs to be re-named and updated to CDE equation Parameters: ---------- :param str", "\"\"\" if time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs)", "data <varies> \"\"\" return self.__hdf.get_data_by_path(path) def plot(self, key, ax=None, masked=False, *args, **kwargs): \"\"\"", "to display the magnitude and direction of velocity vectors within the system. Parameters:", "= np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx + ccy else: mesh = self.__hdf5.get_data(key) # find", "center + 1] xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1)) if", "* self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) class ADE(object): \"\"\" Class to calculate", "Finish class. Integrate into LB class LBOutput(object): \"\"\" Class to anaylze LB fluid/solid", "self.__hdf = Hdf5Reader(hdf5) @property def keys(self): return self.__hdf.keys def get_data(self, key): \"\"\" Get", "key Parameters: ---------- :param str key: valid dictionary key from self.keys Returns: -------", "---------- :param str key: valid model key \"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self, path):", "max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)]) class DistributionFunction(object): \"\"\" Class to plot a probablity", "R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least sqares for", "time bt: (np.array) breakthrough curve \"\"\" return bt - self.__van_genuchten_1986(vars, l, v, t)", "= self.ncol l = self.ylen * self.resolution v = self.uy pdf, t =", ":param **kwargs: matplotlib plotting kwargs \"\"\" if key not in ('col_col_x', 'col_col_y', 'col_col_fine_x',", "== 1, a=xx) yy = np.ma.masked_where(img == 1, a=yy) x = np.ma.masked_where(img ==", "np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1)) if mesh.max()/mesh.min() > 10: vmin = mesh.min()", "'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x':", "<= max_ts)])) df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df", "return data def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by specific", "', '\\n')]) temp = np.array(t).T temp = {self.__header[idx]: data for idx, data in", "nts = [] ncol = 0 for index, row in bt_colloids.iterrows(): ncol +=", "Built in method to calculate the mean velocity of each colloid in the", "data keys \"\"\" return LBOutput.data_paths.keys() def get_data(self, key): \"\"\" Method to select data", "t) def __van_genuchten_1986(self, vars, l, v, t): \"\"\" Equation for <NAME> and Winerega", "None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5): if", "\"\"\" def __init__(self, filename): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied')", "calculation Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint data :ivar resolution:", "elif key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y',", "self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) else:", "filename: endpoint file name \"\"\" def __init__(self, filename): if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint", "later! self.ncol = reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader = reader", "'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr", "mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0] / 2. ax.plot([center], [center], 'ko') return p", "df @staticmethod def __try_float(val): try: return float(val) except ValueError: return float('nan') class Hdf5Reader(object):", "KeyError('{}: key not valid for plotting'.format(key)) elif key in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis", "ax.imshow(arr, *args, **kwargs) if mesh is not None: return mesh else: return ax", "else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs):", "return df @staticmethod def __try_float(val): try: return float(val) except ValueError: return float('nan') class", "line.split() if i not in ('\\t', '', ' ', '\\n')]) temp = np.array(t).T", "performs a dynamic calculation of breakthrough curve data \"\"\" max_ts = self.df['nts'].max() if", "self.df.loc[self.df['flag'] == 3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts in range(0, int(max_ts) + 1,", "pv_factor * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor *", "of timesteps to bin a pdf for calculation \"\"\" def __init__(self, filename, nbin=1000):", "for idx, rec in enumerate(self.pdf): if not bt: if rec['ncol'] != 0: bt", "data path \"\"\" return self.__hdf5.get_data_by_path(path) def plot(self, key, *args, **kwargs): \"\"\" Plotting method", "xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1)) if mesh.max()/mesh.min() > 10:", "None: if not self.continuous: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols", "solution :param int max_nfev: maximum number of function iterations :param **kwargs: scipy least", "= int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline = idx + 1 break else: pass def", "return [i for i in Hdf5Reader.data_paths] def get_data(self, key): \"\"\" Method to retrieve", "to bin a pdf for calculation \"\"\" def __init__(self, filename, nbin=1000): if not", "df.set_index('colloid') return df @staticmethod def __try_float(val): try: return float(val) except ValueError: return float('nan')", "self.uy = 0 self.velocity_factor = 1. self.continuous = 0 self.__data_startline = 0 self.__header", "**kwargs): \"\"\" User method to plot a histogram of velocities using a bar", "valid dictionary key from self.keys Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data(key)", "mean fluid velocity t: (float) time bt: (np.array) breakthrough curve \"\"\" return bt", "name :param int nbin: number of bins for pdf calculation Attributes: ---------- :ivar", "Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin], units='width', *args, **kwargs)", "in ('\\t', '', ' ', '\\n')] else: t.append([self.__try_float(i.rstrip()) for i in line.split() if", "else: if lower_nts < row['end-ts'] <= upper_nts: ncol += 1 ts.append(upper_nts) ncols.append(ncol) lower_nts", "plt import pandas as pd import h5py as H class Breakthrough(object): \"\"\" Class", "/ float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs)", "hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property", "float(t[-1].rstrip()) elif line.startswith('uy'): t = line.split() self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t =", "as plt >>> >>> hdf = \"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf) >>> #", "for a more precise velocity measurement Parameters: ---------- :param str filename: endpoint file", "---------- :param str path: hdf5 directory path to data Returns: ------- :return: data", "*args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self,", "('\\t', '', ' ', '\\n')]) temp = np.array(t).T temp = {self.__header[idx]: data for", "processing. Many classes are available to provide plotting functionality. ModelPlot and CCModelPlot are", "None} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or", "Hdf5Reader(object): \"\"\" Reader object to read in HDF5 stored outputs from colloid models.", "bins: ncol = 0 for v in self.velocity['velocity']: if lower_v < v <=", "nbin: number of time steps to base bin on :param bool normalize: method", "= H.File(self.file_name, 'r') if key == 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key ==", "velocity = [] lower_v = self.min - adjuster upper_v = 0 for upper_v", "line in enumerate(f): if line.startswith(\"Timestep\"): t = line.split() self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"):", "def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve", "path Parameters: ---------- :param str path: valid HDF5 data path \"\"\" return self.__hdf5.get_data_by_path(path)", "'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\", "hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data", "options mesh = None if ax is None: ax = plt.gca() if key", "dict. key Parameters: ---------- :param str key: valid dictionary key from self.keys Returns:", "pdf: pd.dataframe c/co of colloid pdf \"\"\" return pdf - self.__jury_1991(vars, A, L,", "= 0 max_ts = self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] == 3] pdf_colloids = pdf_colloids.sort_values('delta-ts')", "x0 = np.array([D, R]) return least_squares(self.__jury_residuals, x0, args=(a, l, t, v, pdf), ftol=ftol,", "nts :param *args: matplotlib args for 1d charts :param **kwargs: matplotlib keyword arguments", "are useful for visualizing colloid-surface forces and colloid-colloid forces respectively. example import of", "by specific path Parameters: ---------- :param str path: hdf5 directory path to data", "vamx = kwargs.pop('vmax') p = ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else:", "self.ncol = reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy = reader.uy self.pdf = None self.__dist_func", "def plot(self, key, *args, **kwargs): \"\"\" Plotting method for 1d colloid-colloid dlvo profiles", "mesh = ccx + ccy else: mesh = self.__hdf5.get_data(key) # find center and", "= self.df.loc[self.df['flag'] == 3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts in range(0, int(max_ts) +", "equation parameters for field scale model parameterization Class needs to be re-named and", "pdf - self.__jury_1991(vars, A, L, t, v) def __jury_1991(self, vars, A, L, t,", "self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\"", "l, v, t) def __van_genuchten_1986(self, vars, l, v, t): \"\"\" Equation for <NAME>", "self.keys :param object ax: matplotlib pyplot axes object (optional) :param *args: matplotlib plotting", "if 'vmin' in kwargs: vmin = kwargs.pop('vmin') if 'vmax' in kwargs: vamx =", "from matplotlib.colors import LogNorm if ax is None: ax = plt.gca() if key", "**kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)]) class DistributionFunction(object): \"\"\" Class", "in line.split() if i not in ('\\t', '', ' ', '\\n')]) temp =", "\"\"\" User method to plot a histogram of velocities using a bar chart.", "coeficient of variance of colloid velocities \"\"\" return (self.stdev / self.mean) * 100", "i in line.split() if i not in ('\\t', '', ' ', '\\n')]) temp", "plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) plt.xlim([0,", "reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader = reader @property def breakthrough_curve(self):", "xx = np.ma.masked_where(img == 1, a=xx) yy = np.ma.masked_where(img == 1, a=yy) x", "Parameters: ---------- :param float D: Diffusivity initial guess. Cannot be 0 :param float", "vars, l, v, t): \"\"\" Equation for <NAME> and Winerega 1986 to calculate", "'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method", "ncol l: (float) ylen v: (float) mean fluid_velocity t: (float) time pdf: pd.dataframe", "get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by specific hdf5 path Parameters:", "Retardation Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float) ylen v: (float)", "1986. Miscable displacement. Parameters: ---------- :param float D: Diffusivity initial guess. Cannot be", "colcol = self.__hdf5.get_data(key) shape = colcol.shape center = shape[0] // 2 if key", "pandas dataframe Parameters: ---------- :param str filename: colloid model output filename (ie. endpoint,", "display the magnitude and direction of velocity vectors within the system. Parameters: ----------", "'dlvo_x': None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y':", "Method to return data by key Parameters: ---------- :param str key: valid model", "t: (float) time \"\"\" from scipy import special D = vars[0] R =", "== 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif key", "R: Retardation initial guess. Cannot be 0 :param float ftol: scipy function tolerance", "adjuster velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity, ncols, width, *args, **kwargs) # todo: think", "bool normalize: method to calculate pdf by residence time or end time \"\"\"", "== 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif", "elif line.startswith('Resolution'): t = line.split() self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'): t = line.split()", "plotting functionality. ModelPlot and CCModelPlot are useful for visualizing colloid-surface forces and colloid-colloid", "HDF5 stored outputs from colloid models. Contains a data_paths dictionary which allows the", "based upon user supplied bin size. Parameters: ---------- :param int nbin: number of", "colcol[center, center:] plt.plot(x, y * -1, *args, **kwargs) def plot_mesh(self, key, ax=None, *args,", "return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Convience method to plot data", "line.startswith(\"Timestep\"): t = line.split() self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t = line.split() self.ncol", "'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col':", "calculate pdf by residence time or end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf =", "Method to plot pdf data with pore volumes (non-dimensional time) Parameters: ---------- :param", "time: if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts']", "self.__jury_1991(vars, A, L, t, v) def __jury_1991(self, vars, A, L, t, v): \"\"\"", "supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" :return: Lattice boltzmann data keys", "Property method that performs a dynamic calculation of breakthrough curve data \"\"\" max_ts", "l = self.ylen * self.resolution v = self.uy t = self.bt['nts'].as_matrix() * self.timestep", "= self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img == 1, a=arr) mesh = ax.imshow(arr, *args, **kwargs)", ":return: scipy least squares dictionary. Answer in dict['x'] \"\"\" from scipy.optimize import least_squares", "= 0 ncol_per_release = [] for index, row in bt_colloids.iterrows(): lower_ts = row['end-ts']", "with open(filename) as f: for idx, line in enumerate(f): if line.startswith(\"Timestep\"): t =", "'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x',", "Dispersivity and Retardation Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float) ylen", "to plot data into a matplotlib chart. Parameters: ---------- :param bool time: if", "kwargs \"\"\" if dimensional: x = self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else: x =", "self.__breakthrough_curve = None self.__reader = reader @property def breakthrough_curve(self): \"\"\" Property method that", "args for 1d plotting :param **kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor", "mesh is not None: return mesh else: return ax def plot_velocity_magnitude(self, nbin=10, dimensional=True,", "kwargs.pop('vmin') if 'vmax' in kwargs: vamx = kwargs.pop('vmax') p = ax.pcolormesh(xx, yy, mesh,", "'delta-ts': np.int, 'continuous': np.int} def __init__(self, filename): self.timestep = 0 self.ncol = 0", "data by key Parameters: ---------- :param str key: valid model key \"\"\" return", "colloid velocities \"\"\" return np.var(self.velocity['velocity']) @property def stdev(self): \"\"\" :return: standard deviation of", "self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'): t = line.split() self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'):", "- self.__van_genuchten_1986(vars, l, v, t) def __van_genuchten_1986(self, vars, l, v, t): \"\"\" Equation", "self.__hdf5.get_data('distance_fine_y') x = x[center, center:] # * 1e-6 y = colcol[center, center:] plt.plot(x,", "velocity. Parameters ---------- :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs", "to obtain data \"\"\" return CCModelPlot.keys def get_data(self, key): \"\"\" Method to return", "kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor * self.timestep,", "solve least squares for van genuchten 1986. Miscable displacement. Parameters: ---------- :param float", "supplied') self.file_name = hdf5 @property def keys(self): \"\"\" :return: list of valid hdf5", "colloid = [] velocity = [] for index, row in self.df.iterrows(): if np.isnan(row['y-position']):", "path: hdf5 directory path to data Returns: ------ :return: data <varies> \"\"\" hdf", "line.split() self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'): t = line.split() self.resolution = float(t[-1].rstrip()) elif", "self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader = reader @property def breakthrough_curve(self): \"\"\"", "of colloids in simulation :ivar pdf: (np.recarray) colloid probability distribution function \"\"\" def", "= vars[1] eq0 = (A * L * np.sqrt(R)) eq1 = 2 *", "function of colloid breakthrough from endpoint files. Parameters: ---------- :param str filename: <>.endpoint", "key :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if", "line.split() self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'): t = line.split() self.continuous = int(t[-1].rstrip()) elif", "for a simulation. Class needs to be rebuilt to work with timeseries and", "return x def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method", "0 self.resolution = 0 self.xlen = 0 self.ylen = 0 self.ux = 0", "self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol, *args,", "nbin: number of bins for pdf calculation Attributes: ---------- :ivar df: (pandas DataFrame):", "'y-position': np.float, 'x-model': np.float, 'y-model': np.float, 'start-ts': np.int, 'end-ts': np.int, 'delta-ts': np.int, 'continuous':", "2 eq3 = 4 * R * D * t x = (eq0", "break else: pass def read_ascii(self, filename): \"\"\" Method to read endpoint file data", "examples! from scipy.optimize import leastsq, minimize, least_squares a = self.ncol l = self.ylen", "'conversion_factor': None, 'reynolds_number': None} def __init__(self, hdf5): if not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'):", "total number of colloids in simulation :ivar pdf: (np.recarray) colloid probability distribution function", "filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}: not in supported filetypes\".format(filename)) else:", "FileTypeError(\"{}: not in supported filetypes\".format(filename)) else: self.read_header(filename) self.df = self.read_ascii(filename) def read_header(self, filename):", "'r') if key == 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y': data", "in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis =", ":return: Lattice boltzmann data keys \"\"\" return LBOutput.data_paths.keys() def get_data(self, key): \"\"\" Method", "(ie. endpoint, timestep, or pathline) \"\"\" dtypes = {'colloid': np.int, 'flag': np.int, 'nts':", "= upper_nts arr = np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)]) for idx, value in", "data by stripping off trailing zeros. Returns: pdf = (np.array) stripped pdf t", "def plot(self, time=True, *args, **kwargs): \"\"\" Method to plot data into a matplotlib", "idx, line in enumerate(f): if line.startswith(\"Timestep\"): t = line.split() self.timestep = float(t[-1].rstrip()) elif", "key not in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid", "ModelPlot(object): \"\"\" Class to retrieve Colloid force arrays and plot for data analysis.", "key in ('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}: key not valid for plotting'.format(key)) elif", "range(0, int(max_ts) + 1, nbin): ncol = 0 for index, row in pdf_colloids.iterrows():", "self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\"", "---------- :param str hdf5: LB-Colloid hdf5 file name \"\"\" data_paths = {'ac': \"colloids/model_dict/ac\",", "up references for clearer examples! from scipy.optimize import leastsq, minimize, least_squares a =", "0 for v in self.velocity['velocity']: if lower_v < v <= upper_v: ncol +=", "f: for idx, line in enumerate(f): if line.startswith(\"Timestep\"): t = line.split() self.timestep =", "np.float)]) for idx, value in enumerate(colloid): arr[idx] = tuple([value, velocity[idx]]) self.velocity = arr", "/ float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)]) class", "for plotting colloids. \"\"\" pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution)", "- v * t) ** 2 eq3 = 4 * R * D", "data[0] elif key == 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\", "line.startswith('Resolution'): t = line.split() self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'): t = line.split() self.xlen", "mesh = ax.imshow(arr, *args, **kwargs) if mesh is not None: return mesh else:", "try: return float(val) except ValueError: return float('nan') class Hdf5Reader(object): \"\"\" Reader object to", "\"\"\" return self.velocity['velocity'].mean() @property def var(self): \"\"\" :return: variance of colloid velocities \"\"\"", "nbin self.pdf = None self.reset_pdf(nbin) self.__normalize = False self.__reader = reader def reset_pdf(self,", "simulation. Class needs to be rebuilt to work with timeseries and pathline files", "to be rebuilt to work with timeseries and pathline files for a more", "float(t[-1].rstrip()) elif line.startswith('Continuous'): t = line.split() self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline =", "output filename (ie. endpoint, timestep, or pathline) \"\"\" with open(filename) as f: t", "center:] plt.plot(x, y * -1, *args, **kwargs) def plot_mesh(self, key, ax=None, *args, **kwargs):", "== 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data", "timesteps to bin a pdf for calculation \"\"\" def __init__(self, filename, nbin=1000): if", "# * 1e-6 y = colcol[center, center:] plt.plot(x, y * -1, *args, **kwargs)", "= np.ma.masked_where(img == 1, a=yy) x = np.ma.masked_where(img == 1, a=x) y =", "*args, **kwargs) else: p = ax.pcolormesh(xx, yy, mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0,", "* self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Method to plot", "nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous) & (bt_colloids['end-ts'] <= max_ts)])) df = pd.DataFrame({'nts':", "self.__hdf5.get_data(key) return data class ASCIIReader(object): \"\"\" Class to read in text based output", "is not None: pdf = self.pdf['ncol'][:strip_idx + 1] time = self.pdf['nts'][:strip_idx + 1]", "pv_factor * self.timestep)]) class DistributionFunction(object): \"\"\" Class to plot a probablity distribution function", "* self.resolution v = self.uy t = self.bt['nts'].as_matrix() * self.timestep bt = self.bt['ncpr'].as_matrix()", "(A * L * np.sqrt(R)) eq1 = 2 * np.sqrt(np.pi * D *", ">>> hdf = \"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf) >>> # model plot accepts", "resolution: (float): model resolution :ivar timestep: (float): model timestep :ivar continuous: (int): interval", "**kwargs: matplotlib plotting kwargs \"\"\" if dimensional: x = self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y')", "= plt.gca() if key not in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise", "properties Parameters: ---------- :param str hdf: hdf5 output filename \"\"\" data_paths = {'velocity_x':", "to pandas dataframe Parameters: ---------- :param str filename: colloid model output filename (ie.", "def plot_pv(self, *args, **kwargs): \"\"\" Method to plot breakthrough data with pore volumes", "self.read_header(filename) self.df = self.read_ascii(filename) def read_header(self, filename): \"\"\" Method to read the header", "'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number',", "colloid-colloid interactions and plot data as 1d or as a meshgrid object More", "in enumerate(temp)} df = pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid') return", "normalize: flag to calculate pdf by residence time or end time \"\"\" self.__dist_func.reset_pdf(nbin,", "np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr, *args, **kwargs) else: arr = self.__hdf.get_data(key) if masked:", "in Hdf5Reader.data_paths] def get_data(self, key): \"\"\" Method to retrieve hdf5 data by dict.", ":param str hdf5: hdf5 file name \"\"\" data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y',", "return pdf, time class ModelPlot(object): \"\"\" Class to retrieve Colloid force arrays and", "keys(self): \"\"\" :return: list of valid hdf5 data keys \"\"\" return [i for", "\"\"\" data_paths = {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux',", "vars, A, L, t, v, pdf): \"\"\" Method to estimate residuals from jury", "size. Parameters: ---------- :param int nbin: number of time steps to base bin", "== 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\", "* R * t) x = 0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] =", "Method to retrieve hdf5 data by specific hdf5 path Parameters: ---------- :param str", "reader.uy self.pdf = None self.__dist_func = DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def", "Equation for <NAME> and Winerega 1986 to calculate Dispersivity and Retardation from breakthrough", "squares dictionary. Answer in dict['x'] \"\"\" from scipy.optimize import least_squares l = self.ylen", "off trailing zeros. Returns: pdf = (np.array) stripped pdf t = (np.array) times", "x = x[center, center:] y = colcol[center, center:] elif key == \"col_col_y\": x", "create a function_fmt for axis options mesh = None if ax is None:", "= {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x',", "maximum colloid velocity \"\"\" return self.velocity['velocity'].max() @property def min(self): \"\"\" :return: minimum colloid", "normalize=False): \"\"\" User method to reset values based on changing the pdf bin", "'x-model': np.float, 'y-model': np.float, 'start-ts': np.int, 'end-ts': np.int, 'delta-ts': np.int, 'continuous': np.int} def", "file data from from ascii files for LB-Colloids Sets data to pandas dataframe", "user to use keys to access data Parameters: ---------- :param str hdf5: LB-Colloid", "'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None,", "key == 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx +", "hdf5 file based on key, instead of data path Parameters: ---------- :param str", ":param float R: Retardation initial guess. Cannot be 0 :param float ftol: scipy", "colloids per release in simulation :ivar total_ncol: (int): total number of colloids in", "l, t, v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars, A, L, t,", "0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\" Class to", "x = x[center, center:] # * 1e-6 y = colcol[center, center:] plt.plot(x, y", "Use get_data_by_path') hdf = H.File(self.file_name, 'r') if key == 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1]", "data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def get_data_by_path(self, path):", "= 0 for index, row in pdf_colloids.iterrows(): if normalize: if lower_nts < row['delta-ts']", "line.startswith('ux'): t = line.split() self.ux = float(t[-1].rstrip()) elif line.startswith('uy'): t = line.split() self.uy", "= np.ma.masked_where(img == 1, a=xx) yy = np.ma.masked_where(img == 1, a=yy) x =", "'pore_diameter': None, 'conversion_factor': None, 'reynolds_number': None} def __init__(self, hdf5): if not hdf5.endswith('.hdf') and", "[i.rstrip() for i in line.split() if i not in ('\\t', '', ' ',", "this method! look up references for clearer examples! from scipy.optimize import leastsq, minimize,", "data class ASCIIReader(object): \"\"\" Class to read in text based output files <endpoint,", "[] lower_nts = 0 max_ts = self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] == 3] pdf_colloids", "'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None}", "line.startswith('xlen'): t = line.split() self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'): t = line.split() self.ylen", "sqares for jury 1991. Pulse flux. Parameters: ---------- :param float D: Diffusivity initial", "and CCModelPlot are useful for visualizing colloid-surface forces and colloid-colloid forces respectively. example", "{'velocity_x': None, 'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution': None, 'porosity': None, 'pore_diameter':", "call with something from the header later! self.ncol = reader.ncol self.total_ncol = float(self.df.shape[0])", "\"\"\" Reader object to read in HDF5 stored outputs from colloid models. Contains", "text based output files <endpoint, timestep, pathline> to a pandas dataframe Parameters: ----------", "1991 calculation of Dispersivity and Retardation Parameters vars: (np.array) [dispersivity, retardation] A: ncol", "line.startswith('velocity_factor'): t = line.split() self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'): t = line.split() self.continuous", "to plot breakthrough data with pore volumes (non-dimensional time) Parameters: ---------- :param *args:", "hdf5 file must be supplied') self.__hdf = Hdf5Reader(hdf5) @property def keys(self): return self.__hdf.keys", "pdf for calculation :param bool normalize: flag to calculate pdf by residence time", "R = vars[1] eq0 = (A * L * np.sqrt(R)) eq1 = 2", "('\\t', '', ' ', '\\n')] else: t.append([self.__try_float(i.rstrip()) for i in line.split() if i", "Contains a data_paths dictionary which allows the user to use keys to access", "self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs) plt.ylim([0, 1]) def", "number of function iterations :param **kwargs: scipy least squares kwargs Returns: ------- :return:", "in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) colcol", "**kwargs) def plot_mesh(self, key, ax=None, *args, **kwargs): \"\"\" Plotting method for 2d representation", "'y-model': np.float, 'start-ts': np.int, 'end-ts': np.int, 'delta-ts': np.int, 'continuous': np.int} def __init__(self, filename):", "file name :param int nbin: number of bins for pdf calculation Attributes: ----------", "= vars[0] R = vars[1] eq0 = R * l - v *", "function_fmt for axis options mesh = None if ax is None: ax =", "return data by key Parameters: ---------- :param str key: valid model key \"\"\"", "matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if key not in", "def __jury_residuals(self, vars, A, L, t, v, pdf): \"\"\" Method to estimate residuals", "Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf = self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False): \"\"\" User", "interval of continuous release, 0 means pulse :ivar ncol: (float): number of colloids", "0 means pulse :ivar ncol: (float): number of colloids per release in simulation", "plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol),", "np.array([D, R]) return least_squares(self.__jury_residuals, x0, args=(a, l, t, v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs)", "ncols[idx]]) self.pdf = arr def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume", "seq = False strip_idx = None if strip_idx is not None: pdf =", "yy = np.arange(0, x.shape[0]) xx, yy = np.meshgrid(xx, yy) if masked: img =", "not in ('\\t', '', ' ', '\\n')] else: t.append([self.__try_float(i.rstrip()) for i in line.split()", "('ncol', np.float)]) for idx, value in enumerate(ts): arr[idx] = tuple([value, ncols[idx]]) self.pdf =", "line.split() self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t = line.split() self.ncol = int(t[-1].rstrip()) elif", "'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y':", "import numpy as np import matplotlib.pyplot as plt import pandas as pd import", "ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars, l, v, t, bt): \"\"\" Method to", "center and set to nearby value to prevent log scale crashing shape =", "= reader.df.shape[0] self.max_time = max(reader.df['nts']) * self.timestep self.velocity = None self.__get_velocity_array() def __get_velocity_array(self):", "lower_nts < row['delta-ts'] <= upper_nts: ncol += 1 else: if lower_nts < row['end-ts']", "a data_paths dictionary which allows the user to use keys to access data", "def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied')", "= [] ncols = [] lower_nts = 0 max_ts = self.df['nts'].max() pdf_colloids =", "Parameters: ---------- :param str path: valid HDF5 data path \"\"\" return self.__hdf5.get_data_by_path(path) def", "key: valid dictionary key from self.keys Returns: ------- :return: data <varies> \"\"\" if", "str path: valid HDF5 data path \"\"\" return self.__hdf5.get_data_by_path(path) def plot(self, key, *args,", "(float) mean fluid velocity t: (float) time bt: (np.array) breakthrough curve \"\"\" return", "pathline> to a pandas dataframe Parameters: ---------- :param str filename: output filename (ie.", "anaylze LB fluid/solid properties Parameters: ---------- :param str hdf: hdf5 output filename \"\"\"", "calculation \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must", "= x.T[center, center:] y = colcol.T[center, center:] elif key == \"col_col_fine_x\": x =", "[] velocity = [] for index, row in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen *", "self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] == 3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts in range(0,", "* -1, *args, **kwargs) def plot_mesh(self, key, ax=None, *args, **kwargs): \"\"\" Plotting method", "pdf by residence time or end time \"\"\" self.bin = nbin self.__normalize =", "data key :param object ax: matplotlib axes object (optional) :param *args: matplotlib plotting", "self.df = self.read_ascii(filename) def read_header(self, filename): \"\"\" Method to read the header from", "= np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1)) if mesh.max()/mesh.min() > 10: vmin =", "float(self.ncol), *args, **kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args,", "== 1, a=y) Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin],", "1d charts \"\"\" if time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol),", "+= 1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v = upper_v - adjuster velocity.append(upper_v +", "pore volumes (non-dimensional time) Parameters: ---------- :param *args: matplotlib args for 1d plotting", "velocity \"\"\" return self.velocity['velocity'].max() @property def min(self): \"\"\" :return: minimum colloid velocity \"\"\"", "KeyError('Dictionary key not in valid keys. Use get_data_by_path') hdf = H.File(self.file_name, 'r') if", "'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self, hdf5): if not", "= hdf5 @property def keys(self): \"\"\" :return: list of valid hdf5 data keys", "for Jury 1991 calculation of Dispersivity and Retardation Parameters vars: (np.array) [dispersivity, retardation]", "data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x':", "10: vmin = mesh.min() vmax = mesh.max() if 'vmin' in kwargs: vmin =", "as pd import h5py as H class Breakthrough(object): \"\"\" Class to prepare and", "D = vars[0] R = vars[1] eq0 = (A * L * np.sqrt(R))", "bin a pdf for calculation :param bool normalize: flag to calculate pdf by", "\"\"\" return self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by", "'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\", "pdf bin values Parameters: ---------- :param int nbin: number of timesteps to bin", "'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None,", "bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol = 0 ncol_per_release", "('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}: key not valid for plotting'.format(key)) elif key in", "'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x':", "using Hdf5Reader keys Parameters: ---------- :param str key: valid dictionary key from self.keys", "-(R * L - v * t) ** 2 eq3 = 4 *", ":param str filename: <>.endpoint file name :param int nbin: number of bins for", "def reset_pdf(self, nbin, normalize=False): \"\"\" User method to reset values based on changing", "force arrays Parameters: ---------- :param str key: valid dictionary key from self.keys Returns:", "center:] # * 1e-6 y = colcol[center, center:] else: x = self.__hdf5.get_data('distance_fine_y') x", "with pore volumes (non-dimensional time) Parameters: ---------- :param *args: matplotlib args for 1d", "not None: return mesh else: return ax def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args,", "int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline = idx + 1 break else: pass def read_ascii(self,", "a bar chart. Parameters: ---------- :param int nbin: number of specific bins for", "int nbin: refinement for quiver plotting :param *args: matplotlib plotting args :param **kwargs:", "reset_pdf(self, nbin, normalize=False): \"\"\" User method to reset values based on changing the", "reader.df.shape[0] self.max_time = max(reader.df['nts']) * self.timestep self.velocity = None self.__get_velocity_array() def __get_velocity_array(self): \"\"\"", "= self.pdf['ncol'] time = self.pdf['nts'] return pdf, time class ModelPlot(object): \"\"\" Class to", "bt: (np.array) breakthrough curve \"\"\" return bt - self.__van_genuchten_1986(vars, l, v, t) def", "<= upper_v: ncol += 1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v = upper_v -", "l, v, t): \"\"\" Equation for <NAME> and Winerega 1986 to calculate Dispersivity", "in the simulation \"\"\" colloid = [] velocity = [] for index, row", ":param str path: valid HDF5 data path \"\"\" return self.__hdf5.get_data_by_path(path) def plot(self, key,", "else: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts", "Lattice boltzmann data keys \"\"\" return LBOutput.data_paths.keys() def get_data(self, key): \"\"\" Method to", "'end-ts': np.int, 'delta-ts': np.int, 'continuous': np.int} def __init__(self, filename): self.timestep = 0 self.ncol", "*args, **kwargs): \"\"\" Plotting method for 2d representation of colloid-colloid dlvo profiles. Parameters:", "ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df", "**kwargs): \"\"\" Method to plot breakthrough data with pore volumes (non-dimensional time) Parameters:", "valid key\".format(key)) if key == 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh", "Sets data to pandas dataframe Parameters: ---------- :param str filename: colloid model output", "plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\"", "ASCIIReader(object): \"\"\" Class to read in text based output files <endpoint, timestep, pathline>", "int nbin: number of timesteps to bin a pdf for calculation \"\"\" def", "retardation] x: (float) column length v: (float) mean fluid velocity t: (float) time", "dataframe Parameters: ---------- :param str filename: output filename (ie. endpoint, timestep, or pathline)", "= \"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf) >>> # model plot accepts matplotlib args", "time = self.pdf['nts'] return pdf, time class ModelPlot(object): \"\"\" Class to retrieve Colloid", "- adjuster, self.max, nbin) ncols = [] velocity = [] lower_v = self.min", "\"col_col_y\": x = self.__hdf5.get_data('distance_y') x = x.T[center, center:] y = colcol.T[center, center:] elif", "float(t[-1].rstrip()) elif line.startswith('ux'): t = line.split() self.ux = float(t[-1].rstrip()) elif line.startswith('uy'): t =", "hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y',", "base bin on :param bool normalize: method to calculate pdf by residence time", "User method to plot a histogram of velocities using a bar chart. Parameters:", "= pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df else: bt_colloids = self.df.loc[self.df['flag'] ==", "= reader.uy self.pdf = None self.__dist_func = DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin)", "for clearer examples! from scipy.optimize import leastsq, minimize, least_squares a = self.ncol l", ":param **kwargs: matplotlib plotting kwargs \"\"\" # todo: create a function_fmt for axis", "/ self.ncol, *args, **kwargs) class ADE(object): \"\"\" Class to calculate macroscopic advection dispersion", "l = self.ylen * self.resolution v = self.uy pdf, t = self.__prep_data() x0", "= hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5", "x: (float) column length v: (float) mean fluid velocity t: (float) time bt:", "+= 1 ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts arr = np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol',", "in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args,", "2d representation of colloid-colloid dlvo profiles. Parameters: ---------- :param str key: valid data", "FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution =", "= [] velocity = [] lower_v = self.min - adjuster upper_v = 0", "vars: (np.array) [dispersivity, retardation] x: (float) column length v: (float) mean fluid velocity", "self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)])", "column length v: (float) mean fluid velocity t: (float) time bt: (np.array) breakthrough", "if rec['ncol'] == 0: if not seq: strip_idx = idx seq = True", "must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" Property method to", "'timeseries', 'pathline'): raise FileTypeError(\"{}: not in supported filetypes\".format(filename)) else: self.read_header(filename) self.df = self.read_ascii(filename)", "args for 1d charts :param **kwargs: matplotlib keyword arguments for 1d charts \"\"\"", "from endpoint files. Parameters: ---------- :param str filename: <>.endpoint file Attributes: ---------- :ivar", "int max_nfev: maximum number of function iterations :param **kwargs: scipy least squares kwargs", "reader def reset_pdf(self, nbin, normalize=False): \"\"\" Method to generate a probability distribution function", "stripped pdf t = (np.array) times \"\"\" strip_idx = None seq = False", "1, a=arr) mesh = ax.imshow(arr, *args, **kwargs) if mesh is not None: return", "Parameters: ---------- :param str filename: <>.endpoint file Attributes: ---------- :ivar df: (pandas DataFrame):", "Hdf5Reader(hdf5) @property def keys(self): \"\"\" :return: Lattice boltzmann data keys \"\"\" return LBOutput.data_paths.keys()", "= ccx + ccy else: mesh = self.__hdf5.get_data(key) # find center and set", "= self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] == 3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts in", "self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx =", "df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df else: bt_colloids = self.df.loc[self.df['flag']", "= np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs)", "if key not in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is", "'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y':", "from self.keys Returns: ------- :return: data <varies> \"\"\" if key not in Hdf5Reader.data_paths:", "* pv_factor * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor", "np.sqrt(np.pi * D * t ** 3) eq2 = -(R * L -", "v * t eq1 = np.sqrt(4 * D * R * t) x", "self.__breakthrough_curve = df return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method to retrieve the pore", "simulation \"\"\" def __init__(self, filename): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be", "valid HDF5 data path \"\"\" return self.__hdf5.get_data_by_path(path) def plot(self, key, *args, **kwargs): \"\"\"", "*args, **kwargs) # todo: think about this one. Does it belong here? Finish", "belong here? Finish class. Integrate into LB class LBOutput(object): \"\"\" Class to anaylze", "directory path to data Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data_by_path(path) def", "plot(self, time=True, *args, **kwargs): \"\"\" Method to plot data into a matplotlib chart.", "deviation of colloid velocities \"\"\" return np.std(self.velocity['velocity']) @property def cv(self): \"\"\" :return: coeficient", "t = line.split() self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'): t = line.split() self.continuous =", "endpoint files. Parameters: ---------- :param str filename: <>.endpoint file name :param int nbin:", "1986 to calculate Dispersivity and Retardation from breakthrough data. Parameters: vars: (np.array) [dispersivity,", "self.ylen * self.resolution v = self.uy pdf, t = self.__prep_data() x0 = np.array([D,", "'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x':", "t = line.split() self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline = idx + 1", "using a bar chart. Parameters: ---------- :param int nbin: number of specific bins", "self.__data_startline = 0 self.__header = [] if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'):", "x = self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1]) yy = np.arange(0,", "t.append([self.__try_float(i.rstrip()) for i in line.split() if i not in ('\\t', '', ' ',", "valid model key \"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\" Method to return", "= arr @property def max(self): \"\"\" :return: maximum colloid velocity \"\"\" return self.velocity['velocity'].max()", "scipy function tolerance for solution :param int max_nfev: maximum number of function iterations", "\"\"\" if time: if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.total_ncol, *args, **kwargs)", "= np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr, *args, **kwargs) else: arr = self.__hdf.get_data(key) if", "__van_genuchten_residuals(self, vars, l, v, t, bt): \"\"\" Method to estimate residuals from vanGenuchten", "= np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)]) for idx, value in enumerate(colloid): arr[idx] =", "adjuster, self.max, nbin) ncols = [] velocity = [] lower_v = self.min -", "path Parameters: ---------- :param str path: hdf5 directory path to data Returns: ------", "('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) colcol =", "'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data =", "HDF5 data path \"\"\" return self.__hdf5.get_data_by_path(path) def plot(self, key, *args, **kwargs): \"\"\" Plotting", "else: x = self.__hdf5.get_data('distance_fine_y') x = x[center, center:] # * 1e-6 y =", ":ivar resolution: (float): model resolution :ivar timestep: (float): model timestep :ivar continuous: (int):", "xx = np.arange(0, x.shape[1]) yy = np.arange(0, x.shape[0]) xx, yy = np.meshgrid(xx, yy)", "1991. Pulse flux. Parameters: ---------- :param float D: Diffusivity initial guess. Cannot be", "filename (ie. endpoint, timestep, or pathline) \"\"\" with open(filename) as f: for idx,", "\"\"\" Method to generate a probability distribution function based upon user supplied bin", "upper_nts in range(0, int(max_ts) + 1, nbin): ncol = 0 for index, row", "data \"\"\" if key in (\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key)", "plot data as 1d or as a meshgrid object More sophisticated than standard", "return float(val) except ValueError: return float('nan') class Hdf5Reader(object): \"\"\" Reader object to read", "{'colloid': np.int, 'flag': np.int, 'nts': np.int, 'x-position': np.float, 'y-position': np.float, 'x-model': np.float, 'y-model':", "v, t): \"\"\" Equation for <NAME> and Winerega 1986 to calculate Dispersivity and", "be rebuilt to work with timeseries and pathline files for a more precise", "* np.sqrt(np.pi * D * t ** 3) eq2 = -(R * L", "** 2 eq3 = 4 * R * D * t x =", "self.__header = [i.rstrip() for i in line.split() if i not in ('\\t', '',", "must be supplied') self.file_name = hdf5 @property def keys(self): \"\"\" :return: list of", "method to plot a histogram of velocities using a bar chart. Parameters: ----------", "filename: ascii output file name from colloid model :param int nbin: number of", "force arrays and plot for data analysis. Parameters: ---------- :param str hdf5: hdf5", "self.__prep_data() x0 = np.array([D, R]) return least_squares(self.__jury_residuals, x0, args=(a, l, t, v, pdf),", "= self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts = []", "x = self.__hdf5.get_data('distance_y') x = x.T[center, center:] y = colcol.T[center, center:] elif key", "np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx + ccy elif key == 'col_col_fine':", "for idx, line in enumerate(f): if line.startswith(\"Timestep\"): t = line.split() self.timestep = float(t[-1].rstrip())", "= line.split() self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'): t = line.split() self.ux = float(t[-1].rstrip())", "arr = np.ma.masked_where(img == 1, a=arr) mesh = ax.imshow(arr, *args, **kwargs) if mesh", "colloid velocity \"\"\" return self.velocity['velocity'].max() @property def min(self): \"\"\" :return: minimum colloid velocity", "object to read in HDF5 stored outputs from colloid models. Contains a data_paths", "files. Parameters: ---------- :param str filename: <>.endpoint file name :param int nbin: number", "enumerate(ts): arr[idx] = tuple([value, ncols[idx]]) self.pdf = arr def pore_volume_conversion(self): \"\"\" Method to", "as a meshgrid object More sophisticated than standard ModelPlot Parameters: ---------- :param str", "colloid velocity and statistics relating to colloid velocity for a simulation. Class needs", "0 for upper_v in bins: ncol = 0 for v in self.velocity['velocity']: if", "data <varies> \"\"\" if key not in Hdf5Reader.data_paths: raise KeyError('Dictionary key not in", "self.pdf = self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False): \"\"\" User method to reset values", "= self.min - adjuster upper_v = 0 for upper_v in bins: ncol =", "number of timesteps to bin a pdf for calculation \"\"\" def __init__(self, filename,", "to return data by hdf5 path Parameters: ---------- :param str path: valid HDF5", "clearer examples! from scipy.optimize import leastsq, minimize, least_squares a = self.ncol l =", "* l - v * t eq1 = np.sqrt(4 * D * R", "np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def", "supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.xlen = reader.xlen", "Parameters: ---------- :param str key: valid data key :param object ax: matplotlib axes", "= line.split() self.ux = float(t[-1].rstrip()) elif line.startswith('uy'): t = line.split() self.uy = float(t[-1].rstrip())", "Winerega 1986 Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length v: (float)", "dataframe of endpoint data :ivar resolution: (float): model resolution :ivar timestep: (float): model", "or hdf5 file must be supplied') self.file_name = hdf5 @property def keys(self): \"\"\"", "dict['x'] \"\"\" from scipy.optimize import least_squares l = self.ylen * self.resolution v =", "'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity',", "key from self.keys Returns: ------- :return: data <varies> \"\"\" if key not in", "\"\"\" Plotting method for 1d colloid-colloid dlvo profiles Parameters: ---------- :param str key:", "bins for pdf calculation Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint", "* self.timestep)]) class DistributionFunction(object): \"\"\" Class to plot a probablity distribution function of", "is not None: return mesh else: return ax def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False,", "raise KeyError('{}: key not valid for plotting'.format(key)) elif key in ('dlvo_fine', 'edl_fine', 'attractive_fine'):", "return np.std(self.velocity['velocity']) @property def cv(self): \"\"\" :return: coeficient of variance of colloid velocities", "pore volume calculation conversion for plotting colloids. \"\"\" pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor)", "== \"image\": arr = self.__hdf.get_data(key) if masked: arr = np.ma.masked_where(arr == 0, a=arr)", "yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin], units='width', *args, **kwargs) qk = plt.quiverkey(Q, 0.9,", "for i in Hdf5Reader.data_paths] def get_data(self, key): \"\"\" Method to retrieve hdf5 data", ">>> import matplotlib.pyplot as plt >>> >>> hdf = \"mymodel.hdf5\" >>> mp =", "something from the header later! self.ncol = reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve =", "A, L, t, v, pdf): \"\"\" Method to estimate residuals from jury 1991", "file name \"\"\" def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise", "get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by specific path Parameters: ----------", "units='width', *args, **kwargs) qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0,", "== \"col_col_y\": x = self.__hdf5.get_data('distance_y') x = x.T[center, center:] y = colcol.T[center, center:]", "advection dispersion equation parameters for field scale model parameterization Class needs to be", "colcol[center, center:] else: x = self.__hdf5.get_data('distance_fine_y') x = x[center, center:] # * 1e-6", "= bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol = 0 ncol_per_release =", "pathline) \"\"\" with open(filename) as f: t = [] for idx, line in", "perform post processing. Many classes are available to provide plotting functionality. ModelPlot and", "for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor *", "Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint data :ivar resolution: (float):", "t: (float) time pdf: pd.dataframe c/co of colloid pdf \"\"\" return pdf -", "to estimate residuals from vanGenuchten and Winerega 1986 Parameters: vars: (np.array) [dispersivity, retardation]", "mesh.max()/mesh.min() > 10: vmin = mesh.min() vmax = mesh.max() if 'vmin' in kwargs:", "model output filename (ie. endpoint, timestep, or pathline) \"\"\" with open(filename) as f:", "hdf5: hdf5 file name \"\"\" data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None,", "contains classes to read LB Colloids simulation outputs and perform post processing. Many", "\"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\" Method to return data by hdf5", "*args, **kwargs): \"\"\" Method to plot pdf data with pore volumes (non-dimensional time)", "the system. Parameters: ---------- :param int nbin: refinement for quiver plotting :param *args:", "function \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must", "* D * R * t) x = 0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]):", "enumerate(colloid): arr[idx] = tuple([value, velocity[idx]]) self.velocity = arr @property def max(self): \"\"\" :return:", "if i not in ('\\t', '', ' ', '\\n')]) temp = np.array(t).T temp", "np.float, 'start-ts': np.int, 'end-ts': np.int, 'delta-ts': np.int, 'continuous': np.int} def __init__(self, filename): self.timestep", "time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts']", "if not bt: if rec['ncol'] != 0: bt = True else: pass else:", "Method to plot distribution of velocities by colloid for array of velocity. Parameters", "self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) class ADE(object): \"\"\" Class to calculate macroscopic", "seq: strip_idx = idx seq = True else: pass else: seq = False", "it belong here? Finish class. Integrate into LB class LBOutput(object): \"\"\" Class to", "colloid velocities \"\"\" return np.std(self.velocity['velocity']) @property def cv(self): \"\"\" :return: coeficient of variance", "def max(self): \"\"\" :return: maximum colloid velocity \"\"\" return self.velocity['velocity'].max() @property def min(self):", "'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x',", "x[::nbin, ::nbin], y[::nbin, ::nbin], units='width', *args, **kwargs) qk = plt.quiverkey(Q, 0.9, 0.9, 0.01,", "+ 1] else: pdf = self.pdf['ncol'] time = self.pdf['nts'] return pdf, time class", "**kwargs: matplotlib plotting kwargs \"\"\" if key not in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'):", "except ValueError: return float('nan') class Hdf5Reader(object): \"\"\" Reader object to read in HDF5", "read LB Colloids simulation outputs and perform post processing. Many classes are available", "if dimensional: x = self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x') y", "if ax is None: ax = plt.gca() if key in ('lvdw_x', 'lvdw_y', 'lewis_x',", "line.split() self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'): t = line.split() self.ux = float(t[-1].rstrip()) elif", "\"\"\" Method to retrieve hdf5 data by specific path Parameters: ---------- :param str", "distribution function \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file", "\"\"\" User method to reset values based on changing the pdf bin values", "== 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol =", "+ 1] time = self.pdf['nts'][:strip_idx + 1] else: pdf = self.pdf['ncol'] time =", "hdf5 output filename \"\"\" data_paths = {'velocity_x': None, 'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y':", "or end time \"\"\" self.bin = nbin self.__normalize = normalize ts = []", "= DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf = self.__dist_func.pdf def", "must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.ylen", "v: (float) mean fluid_velocity t: (float) time \"\"\" D = vars[0] R =", "not in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key))", "mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1)) if mesh.max()/mesh.min() > 10: vmin = mesh.min() vmax", "seq = True else: pass else: seq = False strip_idx = None if", "'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data =", "= (A * L * np.sqrt(R)) eq1 = 2 * np.sqrt(np.pi * D", "ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx + ccy else: mesh = self.__hdf5.get_data(key) #", "hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif key == 'dlvo_y':", "row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)] ncol += 1", "np.array(t).T temp = {self.__header[idx]: data for idx, data in enumerate(temp)} df = pd.DataFrame(temp)", "plt.plot(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) class ADE(object): \"\"\"", "*args, **kwargs) class ADE(object): \"\"\" Class to calculate macroscopic advection dispersion equation parameters", "file must be supplied') self.__hdf = Hdf5Reader(hdf5) @property def keys(self): return self.__hdf.keys def", "in dict['x'] \"\"\" # todo: test this method! look up references for clearer", "plotting kwargs \"\"\" if key not in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{}", "[] if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}: not in supported", "ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous) & (bt_colloids['end-ts'] <= max_ts)])) df = pd.DataFrame({'nts': nts,", "classes are available to provide plotting functionality. ModelPlot and CCModelPlot are useful for", "rebuilt to work with timeseries and pathline files for a more precise velocity", "plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot pdf data with", "in range(0, int(max_ts) + 1, nbin): ncol = 0 for index, row in", "\"\"\" Class to retrieve Colloid force arrays and plot for data analysis. Parameters:", "in line.split() if i not in ('\\t', '', ' ', '\\n')] else: t.append([self.__try_float(i.rstrip())", "ncols.append(ncol) lower_v = upper_v - adjuster velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity, ncols, width,", "hdf5 path Parameters: ---------- :param str path: hdf5 directory path to data Returns:", "*args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0] / 2. ax.plot([center], [center],", "= [] nts = [] ncol = 0 for index, row in bt_colloids.iterrows():", "__init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must be supplied') reader", ":param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'],", "look up references for clearer examples! from scipy.optimize import leastsq, minimize, least_squares a", "endpoint file name \"\"\" def __init__(self, filename): if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file", "'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5):", "= vars[1] eq0 = R * l - v * t eq1 =", "DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf = self.__dist_func.pdf def reset_pdf(self,", "= colcol[center, center:] plt.plot(x, y * -1, *args, **kwargs) def plot_mesh(self, key, ax=None,", "@property def var(self): \"\"\" :return: variance of colloid velocities \"\"\" return np.var(self.velocity['velocity']) @property", "key, ax=None, *args, **kwargs): \"\"\" Plotting method for 2d representation of colloid-colloid dlvo", "\"\"\" return self.__hdf5.get_data_by_path(path) def plot(self, key, *args, **kwargs): \"\"\" Plotting method for 1d", "ascii output files for LB-Colloids Parameters: ---------- :param str filename: colloid model output", "cv(self): \"\"\" :return: coeficient of variance of colloid velocities \"\"\" return (self.stdev /", "hdf = H.File(self.file_name, 'r') if key == 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key", "str path: hdf5 directory path to data Returns: ------ :return: data <varies> \"\"\"", "(self.__reader.ylen * self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Method to", "not seq: strip_idx = idx seq = True else: pass else: seq =", "eq1 = 2 * np.sqrt(np.pi * D * t ** 3) eq2 =", "of colloid breakthrough from endpoint files. Parameters: ---------- :param str filename: <>.endpoint file", "reader @property def breakthrough_curve(self): \"\"\" Property method that performs a dynamic calculation of", "header later! self.ncol = reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader =", "---------- :param str key: valid dictionary key from self.keys :param object ax: matplotlib", "@property def mean(self): \"\"\" :return: mean colloid velocity \"\"\" return self.velocity['velocity'].mean() @property def", "pdf): \"\"\" Method to estimate residuals from jury 1991 equation using data Parameters", "upon user supplied bin size. Parameters: ---------- :param int nbin: number of time", "plotting kwargs \"\"\" adjuster = 0.00001 bins = np.linspace(self.min - adjuster, self.max, nbin)", "self.velocity['velocity']: if lower_v < v <= upper_v: ncol += 1 velocity.append((lower_v + upper_v)/2.)", "self.__header = [] if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}: not", "('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key))", "hdf.close() return data def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by", "float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) else:", "ax.plot(x_axis, arr, *args, **kwargs) elif key == \"image\": arr = self.__hdf.get_data(key) if masked:", "'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y',", "self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\" Method to return data by hdf5 path Parameters:", ":param str filename: output filename (ie. endpoint, timestep, or pathline) \"\"\" dtypes =", "of colloid velocities \"\"\" return (self.stdev / self.mean) * 100 def plot(self, *args,", "the simulation \"\"\" colloid = [] velocity = [] for index, row in", "'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x',", "(ie. endpoint, timestep, or pathline) \"\"\" with open(filename) as f: for idx, line", "data_paths = {'velocity_x': None, 'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution': None, 'porosity':", "[i for i in Hdf5Reader.data_paths] def get_data(self, key): \"\"\" Method to retrieve hdf5", "i in Hdf5Reader.data_paths] def get_data(self, key): \"\"\" Method to retrieve hdf5 data by", "* 1e-6 y = colcol[center, center:] plt.plot(x, y * -1, *args, **kwargs) def", "hdf5 data keys \"\"\" return [i for i in Hdf5Reader.data_paths] def get_data(self, key):", "\"\"\" :return: mean colloid velocity \"\"\" return self.velocity['velocity'].mean() @property def var(self): \"\"\" :return:", "# model plot accepts matplotlib args and kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>> plt.show()", "not in valid keys. Use get_data_by_path') hdf = H.File(self.file_name, 'r') if key ==", "from vanGenuchten and Winerega 1986 Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column", "* R * D * t x = (eq0 / eq1) * np.exp(eq2", "a pandas dataframe Parameters: ---------- :param str filename: output filename (ie. endpoint, timestep,", "= tuple([value, velocity[idx]]) self.velocity = arr @property def max(self): \"\"\" :return: maximum colloid", "arr = np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)]) for idx, value in enumerate(ts): arr[idx]", "be supplied') reader = ASCIIReader(filename) self.df = reader.df self.resolution = reader.resolution self.timestep =", "based on changing the pdf bin values Parameters: ---------- :param int nbin: number", ":ivar pdf: (np.recarray) colloid probability distribution function \"\"\" def __init__(self, filename, nbin=1000): if", "plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin], units='width', *args, **kwargs) qk =", "solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least", "pass else: seq = False strip_idx = None if strip_idx is not None:", "= self.df['nts'].max() if self.__breakthrough_curve is None: if not self.continuous: bt_colloids = self.df.loc[self.df['flag'] ==", "\"\"\" return self.velocity['velocity'].min() @property def mean(self): \"\"\" :return: mean colloid velocity \"\"\" return", "0 self.velocity_factor = 1. self.continuous = 0 self.__data_startline = 0 self.__header = []", "0 for index, row in bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts)", "mean fluid velocity t: (float) time \"\"\" from scipy import special D =", "(float) mean fluid_velocity t: (float) time \"\"\" D = vars[0] R = vars[1]", "rec['ncol'] == 0: if not seq: strip_idx = idx seq = True else:", "filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.df = reader.df", "= normalize ts = [] ncols = [] lower_nts = 0 max_ts =", "Hdf5Reader(hdf5) @property def keys(self): return self.__hdf.keys def get_data(self, key): \"\"\" Get data method", "arr @property def max(self): \"\"\" :return: maximum colloid velocity \"\"\" return self.velocity['velocity'].max() @property", "upper_v in bins: ncol = 0 for v in self.velocity['velocity']: if lower_v <", "key: valid model key \"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\" Method to", "dtype=[('nts', np.float), ('ncol', np.float)]) for idx, value in enumerate(ts): arr[idx] = tuple([value, ncols[idx]])", "not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied')", "raise KeyError('Dictionary key not in valid keys. Use get_data_by_path') hdf = H.File(self.file_name, 'r')", "to data Returns: ------ :return: data <varies> \"\"\" hdf = H.File(self.file_name, 'r') data", "self.pdf = None self.reset_pdf(nbin) self.__normalize = False self.__reader = reader def reset_pdf(self, nbin,", "else: self.read_header(filename) self.df = self.read_ascii(filename) def read_header(self, filename): \"\"\" Method to read the", "return colloid velocity and statistics relating to colloid velocity for a simulation. Class", "{'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor':", "= self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args,", "be supplied') self.file_name = hdf5 @property def keys(self): \"\"\" :return: list of valid", "(row['nts'] * self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)]) for idx,", "\\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x',", "img = self.__hdf.get_data('image') xx = np.ma.masked_where(img == 1, a=xx) yy = np.ma.masked_where(img ==", "1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous) & (bt_colloids['end-ts']", "key == 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()]", "float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args,", "= reader.timestep self.continuous = reader.continuous self.ncol = float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin =", ":param float width: matplotlib bar width. :param *args: matplotlib plotting args :param **kwargs:", "which allows the user to use keys to access data Parameters: ---------- :param", "False self.__reader = reader def reset_pdf(self, nbin, normalize=False): \"\"\" Method to generate a", "def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve", "self.__hdf.get_data(key) if masked: img = self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img == 1, a=arr) mesh", "('velocity', np.float)]) for idx, value in enumerate(colloid): arr[idx] = tuple([value, velocity[idx]]) self.velocity =", "i in line.split() if i not in ('\\t', '', ' ', '\\n')] else:", "elif key == \"image\": arr = self.__hdf.get_data(key) if masked: arr = np.ma.masked_where(arr ==", "Returns: ------- :return: scipy least squares dictionary. Answer in dict['x'] \"\"\" # todo:", "self.__hdf5.get_data(key) shape = colcol.shape center = shape[0] // 2 if key == \"<KEY>\":", "quiver plotting :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\"", "filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader =", "bin size. Parameters: ---------- :param int nbin: number of time steps to base", "squares kwargs Returns: ------- :return: scipy least squares dictionary. Answer in dict['x'] \"\"\"", "import matplotlib.pyplot as plt import pandas as pd import h5py as H class", "\"\"\" Property method that performs a dynamic calculation of breakthrough curve data \"\"\"", "------ :return: data <varies> \"\"\" hdf = H.File(self.file_name, 'r') data = hdf[path][()] hdf.close()", "**kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol']", "'reynolds_number': None} def __init__(self, hdf5): if not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf", "user supplied bin size. Parameters: ---------- :param int nbin: number of time steps", "= None if strip_idx is not None: pdf = self.pdf['ncol'][:strip_idx + 1] time", "(non-dimensional time) Parameters: ---------- :param *args: matplotlib args for 1d plotting :param **kwargs:", "elif key == 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx", "plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if dimensional: x = self.__hdf.get_data('velocity_x')", "return self.__hdf5.get_data_by_path(path) def plot(self, key, *args, **kwargs): \"\"\" Plotting method for 1d colloid-colloid", "mesh = ccx + ccy elif key == 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy", "the header later! self.ncol = reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader", "in enumerate(f): if line.startswith(\"Timestep\"): t = line.split() self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t", "\"\"\" :return: variance of colloid velocities \"\"\" return np.var(self.velocity['velocity']) @property def stdev(self): \"\"\"", "plotting colloids. \"\"\" pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution) return", "*args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" # todo: create", "'resolution': None, 'porosity': None, 'pore_diameter': None, 'conversion_factor': None, 'reynolds_number': None} def __init__(self, hdf5):", "== \"<KEY>\": x = self.__hdf5.get_data('distance_x') x = x[center, center:] y = colcol[center, center:]", "float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) def", "= plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin], units='width', *args, **kwargs) qk", ":param str hdf5: hdf5 file name \"\"\" def __init__(self, hdf5): if not hdf5.endswith('hdf')", "keys to obtain data \"\"\" return CCModelPlot.keys def get_data(self, key): \"\"\" Method to", "self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built in method to calculate the mean velocity of", "Method to estimate residuals from vanGenuchten and Winerega 1986 Parameters: vars: (np.array) [dispersivity,", "reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous # todo: replace this call with", "elif line.startswith('uy'): t = line.split() self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t = line.split()", "relating to colloid velocity for a simulation. Class needs to be rebuilt to", "\"\"\" Method to plot pdf data with pore volumes (non-dimensional time) Parameters: ----------", "Get data method to view and analyze colloid force arrays Parameters: ---------- :param", "matplotlib plotting kwargs \"\"\" if dimensional: x = self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else:", "(row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position'] * self.resolution) / (row['nts'] * self.timestep)) colloid.append(index) arr", "= x[center, center:] y = colcol[center, center:] elif key == \"col_col_y\": x =", "\"\"\" # todo: create a function_fmt for axis options mesh = None if", "mesh else: return ax def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args, **kwargs): \"\"\" Method", "dimensional: x = self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x') y =", "with timeseries and pathline files for a more precise velocity measurement Parameters: ----------", "filename): \"\"\" Method to read endpoint file data from from ascii files for", "by key Parameters: ---------- :param str key: valid model key \"\"\" return self.__hdf5.get_data(key)", "and colloid-colloid forces respectively. example import of the Colloid_output.py module is as follows", "self.__reader = reader def reset_pdf(self, nbin, normalize=False): \"\"\" Method to generate a probability", "def __init__(self, filename): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader", "np.meshgrid(xx, yy) if masked: img = self.__hdf.get_data('image') xx = np.ma.masked_where(img == 1, a=xx)", "+\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif key == 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()]", "ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous) & (bt_colloids['end-ts'] <= max_ts)])) df =", "self.df = reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous self.ncol", "return self.__hdf.get_data_by_path(path) def plot(self, key, ax=None, masked=False, *args, **kwargs): \"\"\" Hdf array plotting", "pdf_colloids.sort_values('delta-ts') for upper_nts in range(0, int(max_ts) + 1, nbin): ncol = 0 for", "elif key == \"col_col_y\": x = self.__hdf5.get_data('distance_y') x = x.T[center, center:] y =", "\"\"\" :return: standard deviation of colloid velocities \"\"\" return np.std(self.velocity['velocity']) @property def cv(self):", "self.timestep)]) class DistributionFunction(object): \"\"\" Class to plot a probablity distribution function of colloid", "specific path Parameters: ---------- :param str path: hdf5 directory path to data Returns:", "a=arr) mesh = ax.imshow(arr, *args, **kwargs) if mesh is not None: return mesh", "a=yy) x = np.ma.masked_where(img == 1, a=x) y = np.ma.masked_where(img == 1, a=y)", "= mesh.shape[0] / 2. ax.plot([center], [center], 'ko') return p class ColloidVelocity(object): \"\"\" Method", "'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not", "breakthrough data. Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length v: (float)", "max_nfev: maximum number of function iterations :param **kwargs: scipy least squares kwargs Returns:", "idx, data in enumerate(temp)} df = pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1) df =", "[] for index, row in bt_colloids.iterrows(): lower_ts = row['end-ts'] - self.continuous upper_ts =", "data = data[0] elif key == 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] #", "'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y',", "hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif key in ('lvdw_x', 'lvdw_y', 'lewis_x',", "plot pdf data with pore volumes (non-dimensional time) Parameters: ---------- :param *args: matplotlib", "self.file_name = hdf5 @property def keys(self): \"\"\" :return: list of valid hdf5 data", "Parameters: ---------- :param str path: hdf5 directory path to data Returns: ------ :return:", "self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0,", "supplied') self.__hdf = Hdf5Reader(hdf5) @property def keys(self): return self.__hdf.keys def get_data(self, key): \"\"\"", "work with timeseries and pathline files for a more precise velocity measurement Parameters:", "= reader.resolution self.xlen = reader.xlen self.ylen = reader.ylen self.df = reader.df self.ncol =", "solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least", "'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close()", "colloids. \"\"\" pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution) return pv_factor", "model plot accepts matplotlib args and kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\"", "self.__hdf.keys def get_data(self, key): \"\"\" Get data method to view and analyze colloid", "self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'): t = line.split() self.ux = float(t[-1].rstrip()) elif line.startswith('uy'):", "filename \"\"\" data_paths = {'velocity_x': None, 'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution':", "reader.ylen self.ncol = reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy = reader.uy self.pdf = None", ":param str filename: <>.endpoint file Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of", "else: x = self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1]) yy =", "self.timestep = reader.timestep self.continuous = reader.continuous self.ncol = float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin", "read endpoint file data from from ascii files for LB-Colloids Sets data to", "not in Hdf5Reader.data_paths: raise KeyError('Dictionary key not in valid keys. Use get_data_by_path') hdf", "data Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data_by_path(path) def plot(self, key, ax=None,", "from hdf5 file based on key, instead of data path Parameters: ---------- :param", "* D * t ** 3) eq2 = -(R * L - v", "Class to prepare and plot breakthrough curve data from endpoint files. Parameters: ----------", "key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis", "endpoint, timestep, or pathline) \"\"\" with open(filename) as f: t = [] for", "np import matplotlib.pyplot as plt import pandas as pd import h5py as H", "valid keys to obtain data \"\"\" return CCModelPlot.keys def get_data(self, key): \"\"\" Method", "/\\ (self.__reader.ylen * self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Convience", "a = self.ncol l = self.ylen * self.resolution v = self.uy pdf, t", "data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] #", "/ self.mean) * 100 def plot(self, *args, **kwargs): \"\"\" Method to plot distribution", "self.timestep, self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol,", "0: if not seq: strip_idx = idx seq = True else: pass else:", "= [] lower_v = self.min - adjuster upper_v = 0 for upper_v in", "'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic':", "outputs from colloid models. Contains a data_paths dictionary which allows the user to", "or as a meshgrid object More sophisticated than standard ModelPlot Parameters: ---------- :param", "= colcol.shape center = shape[0] // 2 if key == \"<KEY>\": x =", "velocity vectors within the system. Parameters: ---------- :param int nbin: refinement for quiver", "'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr',", "a more precise velocity measurement Parameters: ---------- :param str filename: endpoint file name", "L, t, v): \"\"\" Equation for Jury 1991 calculation of Dispersivity and Retardation", "= line.split() self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'): t = line.split() self.ylen = float(t[-1].rstrip())", "line.startswith('uy'): t = line.split() self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t = line.split() self.velocity_factor", "def __try_float(val): try: return float(val) except ValueError: return float('nan') class Hdf5Reader(object): \"\"\" Reader", "= reader.df self.ncol = reader.df.shape[0] self.max_time = max(reader.df['nts']) * self.timestep self.velocity = None", "::nbin], y[::nbin, ::nbin], units='width', *args, **kwargs) qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1", "self.total_ncol = float(reader.df.shape[0]) self.uy = reader.uy self.pdf = None self.__dist_func = DistributionFunction(filename, nbin)", "<varies> \"\"\" return self.__hdf.get_data_by_path(path) def plot(self, key, ax=None, masked=False, *args, **kwargs): \"\"\" Hdf", "'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not", "*args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" from matplotlib.colors import", "\"\"\" Method to retrieve hdf5 data by dict. key Parameters: ---------- :param str", "file must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution", "into a matplotlib chart. Parameters: ---------- :param bool time: if true x-axis is", "Prepares breakthrough data by stripping off trailing zeros. Returns: pdf = (np.array) stripped", "= [] ncol = 0 for index, row in bt_colloids.iterrows(): ncol += 1", "kwargs \"\"\" # todo: create a function_fmt for axis options mesh = None", "in simulation :ivar total_ncol: (int): total number of colloids in simulation \"\"\" def", "upper_v - adjuster velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity, ncols, width, *args, **kwargs) #", "filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep", "== 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx + ccy", "idx + 1 break else: pass def read_ascii(self, filename): \"\"\" Method to read", "elif line.startswith('Continuous'): t = line.split() self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline = idx", "\"\"\" Method to select data from hdf5 file based on key, instead of", "to prevent log scale crashing shape = mesh.shape center = shape[0] // 2", "width: matplotlib bar width. :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting", "hdf5 file must be supplied') self.file_name = hdf5 @property def keys(self): \"\"\" :return:", "= vars[0] R = vars[1] eq0 = (A * L * np.sqrt(R)) eq1", "a dynamic calculation of breakthrough curve data \"\"\" max_ts = self.df['nts'].max() if self.__breakthrough_curve", "hdf = H.File(self.file_name, 'r') data = hdf[path][()] hdf.close() return data class FileTypeError(Exception): pass", "retrieve hdf5 data by dict. key Parameters: ---------- :param str key: valid dictionary", "available to provide plotting functionality. ModelPlot and CCModelPlot are useful for visualizing colloid-surface", "= (eq0 / eq1) * np.exp(eq2 / eq3) x[0] = 0 return x", "0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\" Class", "* self.resolution) / (row['nts'] * self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity',", ":return: scipy least squares dictionary. Answer in dict['x'] \"\"\" # todo: test this", "np.isnan(x[0]): x[0] = 0 return x def __prep_data(self): \"\"\" Prepares breakthrough data by", "**kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts']", "self.timestep = reader.timestep self.continuous = reader.continuous # todo: replace this call with something", "self.velocity['velocity'].mean() @property def var(self): \"\"\" :return: variance of colloid velocities \"\"\" return np.var(self.velocity['velocity'])", "data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0]", "self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key in ('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}:", "def plot_mesh(self, key, ax=None, *args, **kwargs): \"\"\" Plotting method for 2d representation of", "scipy import special D = vars[0] R = vars[1] eq0 = R *", "object ax: matplotlib axes object (optional) :param *args: matplotlib plotting args :param **kwargs:", "velocity.append((row['y-position'] * self.resolution) / (row['nts'] * self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid', np.int),", "read in HDF5 stored outputs from colloid models. Contains a data_paths dictionary which", "'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x':", "solve least sqares for jury 1991. Pulse flux. Parameters: ---------- :param float D:", "retrieve the pore volume calculation conversion for plotting colloids. \"\"\" pv_factor = (abs(self.__reader.uy)", "for index, row in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) / (row['delta-ts'] *", "time) Parameters: ---------- :param *args: matplotlib args for 1d plotting :param **kwargs: matplotlib", "plot(self, key, ax=None, masked=False, *args, **kwargs): \"\"\" Hdf array plotting using Hdf5Reader keys", "* self.timestep, self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] /", "3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts in range(0, int(max_ts) + 1, nbin): ncol", "= float(reader.df.shape[0]) self.uy = reader.uy self.pdf = None self.__dist_func = DistributionFunction(filename, nbin) self.bt", "least squares dictionary. Answer in dict['x'] \"\"\" from scipy.optimize import least_squares l =", "self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr']", "matplotlib args and kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\" import numpy as", "name \"\"\" def __init__(self, filename): if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must be", "hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif key == 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\", "to read endpoint file data from from ascii files for LB-Colloids Sets data", "else: pass else: seq = False strip_idx = None if strip_idx is not", "kwargs \"\"\" adjuster = 0.00001 bins = np.linspace(self.min - adjuster, self.max, nbin) ncols", "L * np.sqrt(R)) eq1 = 2 * np.sqrt(np.pi * D * t **", "scipy least squares dictionary. Answer in dict['x'] \"\"\" from scipy.optimize import least_squares l", "in enumerate(ts): arr[idx] = tuple([value, ncols[idx]]) self.pdf = arr def pore_volume_conversion(self): \"\"\" Method", "np.sqrt(R)) eq1 = 2 * np.sqrt(np.pi * D * t ** 3) eq2", "from breakthrough data. Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length v:", "Method to retrieve the pore volume calculation conversion for plotting colloids. \"\"\" pv_factor", "arr, *args, **kwargs) elif key == \"image\": arr = self.__hdf.get_data(key) if masked: arr", "allows the user to use keys to access data Parameters: ---------- :param str", "hdf5 @property def keys(self): \"\"\" :return: list of valid hdf5 data keys \"\"\"", "self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs) plt.ylim([0,", "\"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol'] / self.ncol, *args,", "pathline files for a more precise velocity measurement Parameters: ---------- :param str filename:", "curve \"\"\" return bt - self.__van_genuchten_1986(vars, l, v, t) def __van_genuchten_1986(self, vars, l,", "vars[1] eq0 = R * l - v * t eq1 = np.sqrt(4", "== self.__data_startline: self.__header = [i.rstrip() for i in line.split() if i not in", "to reset values based on changing the pdf bin values Parameters: ---------- :param", "if i not in ('\\t', '', ' ', '\\n')] else: t.append([self.__try_float(i.rstrip()) for i", "raise FileTypeError('hdf or hdf5 file must be supplied') self.file_name = hdf5 @property def", "replace this call with something from the header later! self.ncol = reader.ncol self.total_ncol", "* self.timestep self.velocity = None self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built in method to", "based on key, instead of data path Parameters: ---------- :param str key: lattice", "dictionary which allows the user to use keys to access data Parameters: ----------", "hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif key", "of Dispersivity and Retardation Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float)", "t = line.split() self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t = line.split() self.velocity_factor =", "FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self):", "lattice boltzmann data key Returns: ------- :return: data \"\"\" if key in (\"velocity_x\",", "__try_float(val): try: return float(val) except ValueError: return float('nan') class Hdf5Reader(object): \"\"\" Reader object", "mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else: p = ax.pcolormesh(xx, yy, mesh, *args, **kwargs)", ":param str key: valid data key :param object ax: matplotlib axes object (optional)", "# todo: think about this one. Does it belong here? Finish class. Integrate", "pass def read_ascii(self, filename): \"\"\" Method to read endpoint file data from from", "\"\"\" # todo: test this method! look up references for clearer examples! from", "method to calculate pdf by residence time or end time \"\"\" self.bin =", "value in enumerate(colloid): arr[idx] = tuple([value, velocity[idx]]) self.velocity = arr @property def max(self):", "are available to provide plotting functionality. ModelPlot and CCModelPlot are useful for visualizing", "to estimate residuals from jury 1991 equation using data Parameters vars: (np.array) [dispersivity,", "hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must", "squares dictionary. Answer in dict['x'] \"\"\" # todo: test this method! look up", "= ASCIIReader(filename) self.df = reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous =", "continuous release, 0 means pulse :ivar ncol: (float): number of colloids per release", "iterations :param **kwargs: scipy least squares kwargs Returns: ------- :return: scipy least squares", "ncols}).set_index('ncol') self.__breakthrough_curve = df else: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts')", "**kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) else: if self.continuous:", "/ self.ncol, *args, **kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs)", "if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) / (row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position'] * self.resolution)", "bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol = 0 for index, row", "1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v = upper_v - adjuster velocity.append(upper_v + adjuster)", "number of colloids in simulation \"\"\" def __init__(self, filename): if not filename.endswith('.endpoint'): raise", "if masked: img = self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img == 1, a=arr) mesh =", "self.timestep)) else: velocity.append((row['y-position'] * self.resolution) / (row['nts'] * self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,),", "t, v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars, A, L, t, v,", "colloid-colloid dlvo profiles. Parameters: ---------- :param str key: valid data key :param object", "to plot a histogram of velocities using a bar chart. Parameters: ---------- :param", "shape = mesh.shape center = shape[0] // 2 mesh[center, center] = mesh[center, center", "mesh = self.__hdf5.get_data(key) # find center and set to nearby value to prevent", "normalize: method to calculate pdf by residence time or end time \"\"\" self.bin", "keys to access data Parameters: ---------- :param str hdf5: LB-Colloid hdf5 file name", "method to solve least squares for van genuchten 1986. Miscable displacement. Parameters: ----------", "return least_squares(self.__jury_residuals, x0, args=(a, l, t, v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self,", "self.timestep bt = self.bt['ncpr'].as_matrix() / self.ncol x0 = np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0,", "in simulation :ivar pdf: (np.recarray) colloid probability distribution function \"\"\" def __init__(self, filename,", "'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5): if not", "and Retardation Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float) ylen v:", "= data[0] elif key == 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data", "and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5 =", "center:] # * 1e-6 y = colcol[center, center:] plt.plot(x, y * -1, *args,", "ncols.append(ncol) lower_nts = upper_nts arr = np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)]) for idx,", "x = (eq0 / eq1) * np.exp(eq2 / eq3) x[0] = 0 return", "total_ncol: (int): total number of colloids in simulation :ivar pdf: (np.recarray) colloid probability", "get_data(self, key): \"\"\" Method to retrieve hdf5 data by dict. key Parameters: ----------", "if lower_v < v <= upper_v: ncol += 1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol)", "in kwargs: vmin = kwargs.pop('vmin') if 'vmax' in kwargs: vamx = kwargs.pop('vmax') p", "t) x = 0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] = 0 return x", "from ascii files for LB-Colloids Sets data to pandas dataframe Parameters: ---------- :param", "outputs and perform post processing. Many classes are available to provide plotting functionality.", "('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine',", "to prepare and plot breakthrough curve data from endpoint files. Parameters: ---------- :param", "filename: colloid model output filename (ie. endpoint, timestep, or pathline) \"\"\" with open(filename)", "prepare and plot breakthrough curve data from endpoint files. Parameters: ---------- :param str", "self.ylen * self.resolution v = self.uy t = self.bt['nts'].as_matrix() * self.timestep bt =", "1, a=xx) yy = np.ma.masked_where(img == 1, a=yy) x = np.ma.masked_where(img == 1,", "line.split() self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t = line.split() self.velocity_factor = float(t[-1].rstrip()) elif", "self.max_time = max(reader.df['nts']) * self.timestep self.velocity = None self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built", "plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def", "lower_nts = upper_nts arr = np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)]) for idx, value", "hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def get_data_by_path(self, path): \"\"\" Method", "class ColloidVelocity(object): \"\"\" Method to return colloid velocity and statistics relating to colloid", "def stdev(self): \"\"\" :return: standard deviation of colloid velocities \"\"\" return np.std(self.velocity['velocity']) @property", "Answer in dict['x'] \"\"\" from scipy.optimize import least_squares l = self.ylen * self.resolution", "'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()]", "< row['delta-ts'] <= upper_nts: ncol += 1 else: if lower_nts < row['end-ts'] <=", "self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve.index.values", "t, v) def __jury_1991(self, vars, A, L, t, v): \"\"\" Equation for Jury", "Miscable displacement. Parameters: ---------- :param float D: Diffusivity initial guess. Cannot be 0", "as plt import pandas as pd import h5py as H class Breakthrough(object): \"\"\"", "to plot distribution of velocities by colloid for array of velocity. Parameters ----------", "False strip_idx = None if strip_idx is not None: pdf = self.pdf['ncol'][:strip_idx +", "'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) colcol = self.__hdf5.get_data(key) shape =", "= True else: pass else: seq = False strip_idx = None if strip_idx", "data by dict. key Parameters: ---------- :param str key: valid dictionary key from", "axes object (optional) :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs", "magnitude and direction of velocity vectors within the system. Parameters: ---------- :param int", "Returns: ------- :return: data <varies> \"\"\" if key not in Hdf5Reader.data_paths: raise KeyError('Dictionary", "for 2d representation of colloid-colloid dlvo profiles. Parameters: ---------- :param str key: valid", "= reader.continuous self.ncol = float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin = nbin self.pdf =", "of colloid-colloid dlvo profiles. Parameters: ---------- :param str key: valid data key :param", "+ adjuster) ncols.append(0) plt.bar(velocity, ncols, width, *args, **kwargs) # todo: think about this", "else: t.append([self.__try_float(i.rstrip()) for i in line.split() if i not in ('\\t', '', '", "supplied') reader = ASCIIReader(filename) self.df = reader.df self.resolution = reader.resolution self.timestep = reader.timestep", "pdf calculation Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint data :ivar", "mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\" import numpy as np import matplotlib.pyplot as plt", "**kwargs) def __jury_residuals(self, vars, A, L, t, v, pdf): \"\"\" Method to estimate", "\"\"\" :return: list of valid hdf5 data keys \"\"\" return [i for i", "def breakthrough_curve(self): \"\"\" Property method that performs a dynamic calculation of breakthrough curve", "fluid_velocity t: (float) time pdf: pd.dataframe c/co of colloid pdf \"\"\" return pdf", "with something from the header later! self.ncol = reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve", "1e-6 y = colcol[center, center:] else: x = self.__hdf5.get_data('distance_fine_y') x = x[center, center:]", "'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution': None, 'porosity': None, 'pore_diameter': None, 'conversion_factor':", "else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def get_data_by_path(self, path): \"\"\" Method to", "---------- :param str hdf5: hdf5 file name \"\"\" data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y':", "int(max_ts) + 1, nbin): ncol = 0 for index, row in pdf_colloids.iterrows(): if", ":param str hdf: hdf5 output filename \"\"\" data_paths = {'velocity_x': None, 'velocity_y': None,", "row in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) / (row['delta-ts'] * self.timestep)) else:", "Parameters: ---------- :param str filename: colloid model output filename (ie. endpoint, timestep, or", "least_squares a = self.ncol l = self.ylen * self.resolution v = self.uy pdf,", "(np.array) breakthrough curve \"\"\" return bt - self.__van_genuchten_1986(vars, l, v, t) def __van_genuchten_1986(self,", "= hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()]", "dictionary key from self.keys Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data(key) def", "plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args, **kwargs): \"\"\" Method to create a quiver plot", "+= 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous) &", "Plotting method for 1d colloid-colloid dlvo profiles Parameters: ---------- :param str key: valid", "\"\"\" Method to estimate residuals from jury 1991 equation using data Parameters vars:", ":param int max_nfev: maximum number of function iterations :param **kwargs: scipy least squares", "more precise velocity measurement Parameters: ---------- :param str filename: endpoint file name \"\"\"", "filename): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename)", "self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1]) yy = np.arange(0, x.shape[0]) xx, yy = np.meshgrid(xx,", "line.split() self.ux = float(t[-1].rstrip()) elif line.startswith('uy'): t = line.split() self.uy = float(t[-1].rstrip()) elif", "matplotlib keyword arguments for 1d charts \"\"\" if time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] *", "self.pdf['ncol'] time = self.pdf['nts'] return pdf, time class ModelPlot(object): \"\"\" Class to retrieve", "self.__hdf5.get_data('distance_y') x = x.T[center, center:] y = colcol.T[center, center:] elif key == \"col_col_fine_x\":", "= hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif key in ('lvdw_x', 'lvdw_y',", "(int): total number of colloids in simulation :ivar pdf: (np.recarray) colloid probability distribution", "self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args,", "[dispersivity, retardation] A: ncol l: (float) ylen v: (float) mean fluid_velocity t: (float)", "self.uy t = self.bt['nts'].as_matrix() * self.timestep bt = self.bt['ncpr'].as_matrix() / self.ncol x0 =", "ax.pcolormesh(xx, yy, mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0] /", "+\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif key == 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()]", "self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs)", "guess. Cannot be 0 :param float R: Retardation initial guess. Cannot be 0", "shape[0] // 2 mesh[center, center] = mesh[center, center + 1] xx, yy =", "*args, **kwargs) elif key == \"image\": arr = self.__hdf.get_data(key) if masked: arr =", "Parameters: ---------- :param str key: valid dictionary key from self.keys :param object ax:", "self.__hdf.get_data(key) if masked: arr = np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr, *args, **kwargs) else:", "def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args, **kwargs): \"\"\" Method to create a quiver", "= np.linspace(self.min - adjuster, self.max, nbin) ncols = [] velocity = [] lower_v", "plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values", "t, v, pdf): \"\"\" Method to estimate residuals from jury 1991 equation using", "hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif key", "def keys(self): return self.__hdf.keys def get_data(self, key): \"\"\" Get data method to view", "mesh[center, center + 1] xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1))", "\"\"\" data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y',", "row['end-ts'] <= upper_nts: ncol += 1 ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts arr =", "'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x':", "line.split() self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'): t = line.split() self.ylen = float(t[-1].rstrip()) elif", "self.ncol, *args, **kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else:", "vars[0] R = vars[1] eq0 = (A * L * np.sqrt(R)) eq1 =", "float(t[-1].rstrip()) elif line.startswith('ylen'): t = line.split() self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'): t =", "---------- :param str key: valid data key :param *args: matplotlib plotting args :param", "'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y':", "(np.array) [dispersivity, retardation] A: ncol l: (float) ylen v: (float) mean fluid_velocity t:", "nbin: refinement for quiver plotting :param *args: matplotlib plotting args :param **kwargs: matplotlib", "trailing zeros. Returns: pdf = (np.array) stripped pdf t = (np.array) times \"\"\"", "return data by hdf5 path Parameters: ---------- :param str path: valid HDF5 data", "of colloid velocities \"\"\" return np.var(self.velocity['velocity']) @property def stdev(self): \"\"\" :return: standard deviation", "0 ncol_per_release = [] for index, row in bt_colloids.iterrows(): lower_ts = row['end-ts'] -", "by colloid for array of velocity. Parameters ---------- :param *args: matplotlib plotting args", "of velocity. Parameters ---------- :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting", "' ', '\\n')]) temp = np.array(t).T temp = {self.__header[idx]: data for idx, data", "time, false is nts :param *args: matplotlib args for 1d charts :param **kwargs:", "def plot_pv(self, *args, **kwargs): \"\"\" Method to plot pdf data with pore volumes", "Class to anaylze LB fluid/solid properties Parameters: ---------- :param str hdf: hdf5 output", ">= max_ts - self.continuous) & (bt_colloids['end-ts'] <= max_ts)])) df = pd.DataFrame({'nts': nts, 'ncol':", "= np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx + ccy elif key == 'col_col_fine': ccx =", "get_data_by_path') hdf = H.File(self.file_name, 'r') if key == 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif", "0 self.ylen = 0 self.ux = 0 self.uy = 0 self.velocity_factor = 1.", "/ self.ncol, *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to", "__reset(self): self.pdf = self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False): \"\"\" User method to reset", "self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol", "def __jury_1991(self, vars, A, L, t, v): \"\"\" Equation for Jury 1991 calculation", "= 0 self.ylen = 0 self.ux = 0 self.uy = 0 self.velocity_factor =", "*args, **kwargs) elif key in ('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}: key not valid", "*args: matplotlib args for 1d plotting :param **kwargs: matplotlib kwargs for 1d plotting", "---------- :param str filename: <>.endpoint file name :param int nbin: number of bins", "= row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)] ncol +=", "c/co of colloid pdf \"\"\" return pdf - self.__jury_1991(vars, A, L, t, v)", "str path: hdf5 directory path to data Returns: ------- :return: data <varies> \"\"\"", "velocity t: (float) time bt: (np.array) breakthrough curve \"\"\" return bt - self.__van_genuchten_1986(vars,", "supported filetypes\".format(filename)) else: self.read_header(filename) self.df = self.read_ascii(filename) def read_header(self, filename): \"\"\" Method to", "upper_nts: ncol += 1 ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts arr = np.recarray((len(ts),), dtype=[('nts',", "x.shape[0]) xx, yy = np.meshgrid(xx, yy) if masked: img = self.__hdf.get_data('image') xx =", "pdf = self.pdf['ncol'][:strip_idx + 1] time = self.pdf['nts'][:strip_idx + 1] else: pdf =", "0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\"", "(bt_colloids['end-ts'] <= max_ts)])) df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve =", "a valid key\".format(key)) colcol = self.__hdf5.get_data(key) shape = colcol.shape center = shape[0] //", "enumerate(f): if line.startswith(\"Timestep\"): t = line.split() self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t =", "data by hdf5 path Parameters: ---------- :param str path: valid HDF5 data path", "masked=False, *args, **kwargs): \"\"\" Method to create a quiver plot to display the", "Scipy optimize method to solve least squares for van genuchten 1986. Miscable displacement.", "array plotting using Hdf5Reader keys Parameters: ---------- :param str key: valid dictionary key", "to calculate pdf by residence time or end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf", "Parameters: ---------- :param str key: valid data key :param *args: matplotlib plotting args", "'vmax' in kwargs: vamx = kwargs.pop('vmax') p = ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()),", "(float) time pdf: pd.dataframe c/co of colloid pdf \"\"\" return pdf - self.__jury_1991(vars,", "kwargs.pop('vmax') p = ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else: p =", "= False bt = False for idx, rec in enumerate(self.pdf): if not bt:", "x = self.__hdf5.get_data('distance_fine_y') x = x[center, center:] # * 1e-6 y = colcol[center,", "= 0 self.__header = [] if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'): raise", "elif line.startswith('xlen'): t = line.split() self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'): t = line.split()", "\"\"\" def __init__(self, filename): if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must be supplied')", "a simulation. Class needs to be rebuilt to work with timeseries and pathline", "---------- :param str filename: colloid model output filename (ie. endpoint, timestep, or pathline)", "plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" from matplotlib.colors import LogNorm if", "nbin, normalize=False): \"\"\" Method to generate a probability distribution function based upon user", "None self.__dist_func = DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf =", "tolerance for solution :param int max_nfev: maximum number of function iterations :param **kwargs:", "scipy least squares dictionary. Answer in dict['x'] \"\"\" # todo: test this method!", "\"\"\" return bt - self.__van_genuchten_1986(vars, l, v, t) def __van_genuchten_1986(self, vars, l, v,", "Integrate into LB class LBOutput(object): \"\"\" Class to anaylze LB fluid/solid properties Parameters:", "\"\"\" :return: Lattice boltzmann data keys \"\"\" return LBOutput.data_paths.keys() def get_data(self, key): \"\"\"", "self.xlen = 0 self.ylen = 0 self.ux = 0 self.uy = 0 self.velocity_factor", "valid data key :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs", "float R: Retardation initial guess. Cannot be 0 :param float ftol: scipy function", "return bt - self.__van_genuchten_1986(vars, l, v, t) def __van_genuchten_1986(self, vars, l, v, t):", ":param str filename: ascii output file name from colloid model :param int nbin:", "scale crashing shape = mesh.shape center = shape[0] // 2 mesh[center, center] =", "ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0] / 2. ax.plot([center], [center], 'ko') return p class", "for idx, data in enumerate(temp)} df = pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1) df", "is not a valid key\".format(key)) colcol = self.__hdf5.get_data(key) shape = colcol.shape center =", "\"\"\" Class to plot a probablity distribution function of colloid breakthrough from endpoint", "x = self.__hdf5.get_data('distance_fine_x') x = x[center, center:] # * 1e-6 y = colcol[center,", "minimize, least_squares a = self.ncol l = self.ylen * self.resolution v = self.uy", "= Hdf5Reader(hdf5) @property def keys(self): \"\"\" Property method to return valid keys to", "**kwargs): \"\"\" Method to plot pdf data with pore volumes (non-dimensional time) Parameters:", "'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) if key", "\"\"\" return [i for i in Hdf5Reader.data_paths] def get_data(self, key): \"\"\" Method to", "bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts =", "colloid in the simulation \"\"\" colloid = [] velocity = [] for index,", "set to nearby value to prevent log scale crashing shape = mesh.shape center", "velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v = upper_v - adjuster velocity.append(upper_v + adjuster) ncols.append(0)", "name \"\"\" def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf", "stored outputs from colloid models. Contains a data_paths dictionary which allows the user", "least sqares for jury 1991. Pulse flux. Parameters: ---------- :param float D: Diffusivity", "= self.pdf['nts'][:strip_idx + 1] else: pdf = self.pdf['ncol'] time = self.pdf['nts'] return pdf,", "ax is None: ax = plt.gca() if key not in ('col_col', 'col_col_fine', 'col_col_x',", "t = line.split() self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'): t = line.split() self.ux =", "and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5 =", "elif key == 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x': data =", "(float): model timestep :ivar continuous: (int): interval of continuous release, 0 means pulse", "open(filename) as f: t = [] for idx, line in enumerate(f): if idx", "stripping off trailing zeros. Returns: pdf = (np.array) stripped pdf t = (np.array)", "a=arr) ax.imshow(arr, *args, **kwargs) else: arr = self.__hdf.get_data(key) if masked: img = self.__hdf.get_data(\"image\")", "data \"\"\" max_ts = self.df['nts'].max() if self.__breakthrough_curve is None: if not self.continuous: bt_colloids", "residuals from jury 1991 equation using data Parameters vars: (np.array) [dispersivity, retardation] A:", "CDE equation Parameters: ---------- :param str filename: ascii output file name from colloid", "breakthrough curve \"\"\" return bt - self.__van_genuchten_1986(vars, l, v, t) def __van_genuchten_1986(self, vars,", ":param int nbin: number of specific bins for plotting :param float width: matplotlib", "'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine':", "matplotlib plotting kwargs \"\"\" if key not in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise", "for 1d colloid-colloid dlvo profiles Parameters: ---------- :param str key: valid data key", "---------- :param str filename: ascii output file name from colloid model :param int", "analyze colloid force arrays Parameters: ---------- :param str key: valid dictionary key from", "x def __prep_data(self): \"\"\" Prepares breakthrough data by stripping off trailing zeros. Returns:", "matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" from matplotlib.colors import LogNorm", "center = mesh.shape[0] / 2. ax.plot([center], [center], 'ko') return p class ColloidVelocity(object): \"\"\"", "colcol.T[center, center:] elif key == \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x = x[center, center:]", "return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume calculation conversion", "plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs)", "Hdf5Reader.data_paths: raise KeyError('Dictionary key not in valid keys. Use get_data_by_path') hdf = H.File(self.file_name,", "\"\"\" Method to read the header from ascii output files for LB-Colloids Parameters:", "self.resolution v = self.uy pdf, t = self.__prep_data() x0 = np.array([D, R]) return", "data[0] elif key == 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data =", "= shape[0] // 2 mesh[center, center] = mesh[center, center + 1] xx, yy", "timesteps to bin a pdf for calculation :param bool normalize: flag to calculate", "y = self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx = np.arange(0,", ":return: data <varies> \"\"\" return self.__hdf.get_data_by_path(path) def plot(self, key, ax=None, masked=False, *args, **kwargs):", "/ float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve.index.values / float(self.ncol),", "key): \"\"\" Method to return data by key Parameters: ---------- :param str key:", "FileTypeError('hdf or hdf5 file must be supplied') self.file_name = hdf5 @property def keys(self):", "Parameters: ---------- :param str key: valid model key \"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self,", "masked: arr = np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr, *args, **kwargs) else: arr =", "def get_data(self, key): \"\"\" Method to retrieve hdf5 data by dict. key Parameters:", "None, 'lb_velocity_y': None, 'resolution': None, 'porosity': None, 'pore_diameter': None, 'conversion_factor': None, 'reynolds_number': None}", "key\".format(key)) colcol = self.__hdf5.get_data(key) shape = colcol.shape center = shape[0] // 2 if", "and perform post processing. Many classes are available to provide plotting functionality. ModelPlot", "for data analysis. Parameters: ---------- :param str hdf5: hdf5 file name \"\"\" def", "self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" :return: Lattice boltzmann data keys \"\"\"", "Parameters: ---------- :param str key: valid dictionary key from self.keys Returns: ------- :return:", "file based on key, instead of data path Parameters: ---------- :param str key:", "'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x',", "ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else: p = ax.pcolormesh(xx, yy, mesh,", "* special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] = 0 return x def __prep_data(self): \"\"\" Prepares", "shape[0] // 2 if key == \"<KEY>\": x = self.__hdf5.get_data('distance_x') x = x[center,", "from from ascii files for LB-Colloids Sets data to pandas dataframe Parameters: ----------", "pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts in range(0, int(max_ts) + 1, nbin): ncol =", "def get_data(self, key): \"\"\" Get data method to view and analyze colloid force", "column length v: (float) mean fluid velocity t: (float) time \"\"\" from scipy", "0 self.xlen = 0 self.ylen = 0 self.ux = 0 self.uy = 0", "D * R * t) x = 0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]): x[0]", "hdf: hdf5 output filename \"\"\" data_paths = {'velocity_x': None, 'velocity_y': None, 'lb_velocity_x': None,", "self.ux = float(t[-1].rstrip()) elif line.startswith('uy'): t = line.split() self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'):", "from ascii output files for LB-Colloids Parameters: ---------- :param str filename: colloid model", "lower_nts < row['end-ts'] <= upper_nts: ncol += 1 ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts", "= [i.rstrip() for i in line.split() if i not in ('\\t', '', '", "data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def get_data_by_path(self, path): \"\"\" Method to retrieve", "colloid breakthrough from endpoint files. Parameters: ---------- :param str filename: <>.endpoint file name", "velocity = [] for index, row in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution)", "if time: if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else:", "[] nts = [] ncol = 0 for index, row in bt_colloids.iterrows(): ncol", "self.timestep = reader.timestep self.resolution = reader.resolution self.xlen = reader.xlen self.ylen = reader.ylen self.df", "self.velocity['velocity'], *args, **kwargs) def plot_histogram(self, nbin=10, width=0.01, *args, **kwargs): \"\"\" User method to", "self.max, nbin) ncols = [] velocity = [] lower_v = self.min - adjuster", "'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self, hdf5): if", ":return: list of valid hdf5 data keys \"\"\" return [i for i in", "1e-6 y = colcol[center, center:] plt.plot(x, y * -1, *args, **kwargs) def plot_mesh(self,", "distribution function of colloid breakthrough from endpoint files. Parameters: ---------- :param str filename:", "**kwargs): \"\"\" Method to plot data into a matplotlib chart. Parameters: ---------- :param", "pdf_colloids.iterrows(): if normalize: if lower_nts < row['delta-ts'] <= upper_nts: ncol += 1 else:", "in simulation :ivar total_ncol: (int): total number of colloids in simulation :ivar pdf:", "* self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] /", "index, row in pdf_colloids.iterrows(): if normalize: if lower_nts < row['delta-ts'] <= upper_nts: ncol", "data Returns: ------ :return: data <varies> \"\"\" hdf = H.File(self.file_name, 'r') data =", "x = self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y')", "equation using data Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float) ylen", "\"\"\" Scipy optimize method to solve least sqares for jury 1991. Pulse flux.", "for calculation :param bool normalize: flag to calculate pdf by residence time or", "data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0]", "args :param **kwargs: matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def plot_histogram(self,", "Retardation initial guess. Cannot be 0 :param float ftol: scipy function tolerance for", "1. self.continuous = 0 self.__data_startline = 0 self.__header = [] if filename.split('.')[-1] not", "else: velocity.append((row['y-position'] * self.resolution) / (row['nts'] * self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid',", "\"image\": arr = self.__hdf.get_data(key) if masked: arr = np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr,", "ccx + ccy else: mesh = self.__hdf5.get_data(key) # find center and set to", "**kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args,", "this one. Does it belong here? Finish class. Integrate into LB class LBOutput(object):", "max_ts - self.continuous) & (bt_colloids['end-ts'] <= max_ts)])) df = pd.DataFrame({'nts': nts, 'ncol': ncols,", "= Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf = self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False): \"\"\"", "idx, line in enumerate(f): if idx < self.__data_startline: pass elif idx == self.__data_startline:", "+\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif key ==", "# hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif key == 'dlvo_fine': data", "velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity, ncols, width, *args, **kwargs) # todo: think about", "* self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor", "def keys(self): \"\"\" Property method to return valid keys to obtain data \"\"\"", "l, v, t, bt): \"\"\" Method to estimate residuals from vanGenuchten and Winerega", "stdev(self): \"\"\" :return: standard deviation of colloid velocities \"\"\" return np.std(self.velocity['velocity']) @property def", "to solve least sqares for jury 1991. Pulse flux. Parameters: ---------- :param float", "ax = plt.gca() if key not in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'):", "if not self.continuous: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols =", ":param str filename: colloid model output filename (ie. endpoint, timestep, or pathline) \"\"\"", "\"\"\" Method to retrieve the pore volume calculation conversion for plotting colloids. \"\"\"", "(optional) :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" from", ":param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" # todo:", "charts :param **kwargs: matplotlib keyword arguments for 1d charts \"\"\" if time: if", "[] ncol = 0 ncol_per_release = [] for index, row in bt_colloids.iterrows(): lower_ts", "A: ncol l: (float) ylen v: (float) mean fluid_velocity t: (float) time \"\"\"", "str hdf5: LB-Colloid hdf5 file name \"\"\" data_paths = {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image',", "*args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if dimensional: x", "mesh.min() vmax = mesh.max() if 'vmin' in kwargs: vmin = kwargs.pop('vmin') if 'vmax'", "'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter':", "pdf for calculation \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint", "'continuous': np.int} def __init__(self, filename): self.timestep = 0 self.ncol = 0 self.resolution =", "*args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot pdf", "np.exp(eq2 / eq3) x[0] = 0 return x def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10,", "hdf5: hdf5 file name \"\"\" def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not", "and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf =", "for array of velocity. Parameters ---------- :param *args: matplotlib plotting args :param **kwargs:", "upper_v: ncol += 1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v = upper_v - adjuster", "**kwargs) if mesh is not None: return mesh else: return ax def plot_velocity_magnitude(self,", "= int(t[-1].rstrip()) elif line.startswith('Resolution'): t = line.split() self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'): t", "and pathline files for a more precise velocity measurement Parameters: ---------- :param str", "matplotlib.colors import LogNorm if ax is None: ax = plt.gca() if key not", "np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)]) for idx, value in enumerate(ts): arr[idx] = tuple([value,", "Answer in dict['x'] \"\"\" # todo: test this method! look up references for", "arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key in ('conversion_factor', 'gravity', 'bouyancy'):", "path: hdf5 directory path to data Returns: ------- :return: data <varies> \"\"\" return", "= (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution) return pv_factor def plot(self, time=True,", "= 0.00001 bins = np.linspace(self.min - adjuster, self.max, nbin) ncols = [] velocity", "= 0 self.resolution = 0 self.xlen = 0 self.ylen = 0 self.ux =", "= reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous self.ncol =", "x-axis is time, false is nts :param *args: matplotlib args for 1d charts", "path Parameters: ---------- :param str key: lattice boltzmann data key Returns: ------- :return:", "least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars, l,", ">= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)] ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol))", "LB-Colloids Parameters: ---------- :param str filename: colloid model output filename (ie. endpoint, timestep,", "self.__data_startline = idx + 1 break else: pass def read_ascii(self, filename): \"\"\" Method", "(self.stdev / self.mean) * 100 def plot(self, *args, **kwargs): \"\"\" Method to plot", "Diffusivity initial guess. Cannot be 0 :param float R: Retardation initial guess. Cannot", "User method to reset values based on changing the pdf bin values Parameters:", "refinement for quiver plotting :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting", "= mesh.min() vmax = mesh.max() if 'vmin' in kwargs: vmin = kwargs.pop('vmin') if", "colloid for array of velocity. Parameters ---------- :param *args: matplotlib plotting args :param", ":param **kwargs: matplotlib plotting kwargs \"\"\" from matplotlib.colors import LogNorm if ax is", "\"\"\" self.bin = nbin self.__normalize = normalize ts = [] ncols = []", "'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def", "a=x) y = np.ma.masked_where(img == 1, a=y) Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin],", "bt): \"\"\" Method to estimate residuals from vanGenuchten and Winerega 1986 Parameters: vars:", "line.split() if i not in ('\\t', '', ' ', '\\n')] else: t.append([self.__try_float(i.rstrip()) for", "Colloid force arrays and plot for data analysis. Parameters: ---------- :param str hdf5:", "initial guess. Cannot be 0 :param float ftol: scipy function tolerance for solution", "self.__reader = reader @property def breakthrough_curve(self): \"\"\" Property method that performs a dynamic", "plt >>> >>> hdf = \"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf) >>> # model", "np.int, 'continuous': np.int} def __init__(self, filename): self.timestep = 0 self.ncol = 0 self.resolution", "self.velocity['velocity'].min() @property def mean(self): \"\"\" :return: mean colloid velocity \"\"\" return self.velocity['velocity'].mean() @property", "'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine':", "hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif key == 'dlvo_y': data =", "key Parameters: ---------- :param str key: valid model key \"\"\" return self.__hdf5.get_data(key) def", "the pore volume calculation conversion for plotting colloids. \"\"\" pv_factor = (abs(self.__reader.uy) *", "to retrieve hdf5 data by specific hdf5 path Parameters: ---------- :param str path:", "self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'): t = line.split() self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'):", "import of the Colloid_output.py module is as follows >>> from lb_colloids import ColloidOutput", "'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key", "the Colloid_output.py module is as follows >>> from lb_colloids import ColloidOutput >>> import", "'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y':", "ax=None, *args, **kwargs): \"\"\" Plotting method for 2d representation of colloid-colloid dlvo profiles.", "Method to read the header from ascii output files for LB-Colloids Parameters: ----------", "data into a matplotlib chart. Parameters: ---------- :param bool time: if true x-axis", "nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf = self.__dist_func.pdf def reset_pdf(self, nbin,", "Method to retrieve hdf5 data by dict. key Parameters: ---------- :param str key:", "(bt_colloids['end-ts'] <= upper_ts)] ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >=", "matplotlib plotting kwargs \"\"\" from matplotlib.colors import LogNorm if ax is None: ax", "<varies> \"\"\" hdf = H.File(self.file_name, 'r') data = hdf[path][()] hdf.close() return data class", "probability distribution function based upon user supplied bin size. Parameters: ---------- :param int", "number of bins for pdf calculation Attributes: ---------- :ivar df: (pandas DataFrame): dataframe", "from scipy.optimize import leastsq, minimize, least_squares a = self.ncol l = self.ylen *", "factor else: data = self.__hdf5.get_data(key) return data class ASCIIReader(object): \"\"\" Class to read", "'\\n')] else: t.append([self.__try_float(i.rstrip()) for i in line.split() if i not in ('\\t', '',", "= ColloidOutput.ModelPlot(hdf) >>> # model plot accepts matplotlib args and kwargs!!! >>> mp.plot('edl_x',", "**kwargs: matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def plot_histogram(self, nbin=10, width=0.01,", "hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5", "ascii output file name from colloid model :param int nbin: number of timesteps", "total number of colloids in simulation \"\"\" def __init__(self, filename): if not filename.endswith('.endpoint'):", "fluid velocity t: (float) time bt: (np.array) breakthrough curve \"\"\" return bt -", "1d plotting \"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol'] /", "= self.__hdf5.get_data(key) return data class ASCIIReader(object): \"\"\" Class to read in text based", "False bt = False for idx, rec in enumerate(self.pdf): if not bt: if", "*args, **kwargs) def plot_histogram(self, nbin=10, width=0.01, *args, **kwargs): \"\"\" User method to plot", "optimize method to solve least squares for van genuchten 1986. Miscable displacement. Parameters:", "'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number':", "D = vars[0] R = vars[1] eq0 = R * l - v", "data <varies> \"\"\" return self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5", "self.__hdf5.get_data('distance_x') x = x[center, center:] y = colcol[center, center:] elif key == \"col_col_y\":", ":param float D: Diffusivity initial guess. Cannot be 0 :param float R: Retardation", "[] ncol = 0 for index, row in bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol))", "hdf5: LB-Colloid hdf5 file name \"\"\" data_paths = {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x':", "files <endpoint, timestep, pathline> to a pandas dataframe Parameters: ---------- :param str filename:", "'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x':", "self.continuous: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts", "index, row in bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df =", "false is nts :param *args: matplotlib args for 1d charts :param **kwargs: matplotlib", "Method to plot data into a matplotlib chart. Parameters: ---------- :param bool time:", "Jury 1991 calculation of Dispersivity and Retardation Parameters vars: (np.array) [dispersivity, retardation] A:", ":param **kwargs: matplotlib plotting kwargs \"\"\" if dimensional: x = self.__hdf.get_data('velocity_x') y =", "center:] y = colcol[center, center:] elif key == \"col_col_y\": x = self.__hdf5.get_data('distance_y') x", "\"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x = x[center, center:] # * 1e-6 y =", ":param object ax: matplotlib axes object (optional) :param *args: matplotlib plotting args :param", "open(filename) as f: for idx, line in enumerate(f): if line.startswith(\"Timestep\"): t = line.split()", "ncol += 1 ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts arr = np.recarray((len(ts),), dtype=[('nts', np.float),", "FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.df = reader.df self.resolution =", "{self.__header[idx]: data for idx, data in enumerate(temp)} df = pd.DataFrame(temp) df = df.reindex_axis(self.__header,", "estimate residuals from jury 1991 equation using data Parameters vars: (np.array) [dispersivity, retardation]", "is as follows >>> from lb_colloids import ColloidOutput >>> import matplotlib.pyplot as plt", "for pdf calculation Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint data", "is not a valid key\".format(key)) if key == 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy", "0 self.ncol = 0 self.resolution = 0 self.xlen = 0 self.ylen = 0", "= [] for index, row in bt_colloids.iterrows(): lower_ts = row['end-ts'] - self.continuous upper_ts", "endpoint files. Parameters: ---------- :param str filename: <>.endpoint file Attributes: ---------- :ivar df:", "must be supplied') reader = ASCIIReader(filename) self.df = reader.df self.resolution = reader.resolution self.timestep", "= self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key == \"image\":", "in kwargs: vamx = kwargs.pop('vmax') p = ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args,", "simulation \"\"\" colloid = [] velocity = [] for index, row in self.df.iterrows():", "**kwargs: scipy least squares kwargs Returns: ------- :return: scipy least squares dictionary. Answer", "__init__(self, filename): self.timestep = 0 self.ncol = 0 self.resolution = 0 self.xlen =", "= np.arange(0, x.shape[1]) yy = np.arange(0, x.shape[0]) xx, yy = np.meshgrid(xx, yy) if", "\"\"\" from matplotlib.colors import LogNorm if ax is None: ax = plt.gca() if", "None, 'pore_diameter': None, 'conversion_factor': None, 'reynolds_number': None} def __init__(self, hdf5): if not hdf5.endswith('.hdf')", "idx < self.__data_startline: pass elif idx == self.__data_startline: self.__header = [i.rstrip() for i", "= reader.xlen self.ylen = reader.ylen self.df = reader.df self.ncol = reader.df.shape[0] self.max_time =", "None self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built in method to calculate the mean velocity", "plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def plot_histogram(self, nbin=10, width=0.01, *args, **kwargs): \"\"\" User method", "idx, rec in enumerate(self.pdf): if not bt: if rec['ncol'] != 0: bt =", "hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf", "velocities by colloid for array of velocity. Parameters ---------- :param *args: matplotlib plotting", "parameters for field scale model parameterization Class needs to be re-named and updated", "/ 2. ax.plot([center], [center], 'ko') return p class ColloidVelocity(object): \"\"\" Method to return", "// 2 mesh[center, center] = mesh[center, center + 1] xx, yy = np.meshgrid(np.arange(0,", "self.df = reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous #", ":param str key: lattice boltzmann data key Returns: ------- :return: data \"\"\" if", "'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def", "# todo: test this method! look up references for clearer examples! from scipy.optimize", "number of colloids in simulation :ivar pdf: (np.recarray) colloid probability distribution function \"\"\"", "probability distribution function \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint", "args :param **kwargs: matplotlib plotting kwargs \"\"\" if dimensional: x = self.__hdf.get_data('velocity_x') y", "return self.velocity['velocity'].max() @property def min(self): \"\"\" :return: minimum colloid velocity \"\"\" return self.velocity['velocity'].min()", "plot a histogram of velocities using a bar chart. Parameters: ---------- :param int", "**kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot breakthrough data", "y = np.ma.masked_where(img == 1, a=y) Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin,", "for plotting :param float width: matplotlib bar width. :param *args: matplotlib plotting args", "'', ' ', '\\n')]) temp = np.array(t).T temp = {self.__header[idx]: data for idx,", "for 1d charts :param **kwargs: matplotlib keyword arguments for 1d charts \"\"\" if", "str filename: colloid model output filename (ie. endpoint, timestep, or pathline) \"\"\" with", "self.timestep self.velocity = None self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built in method to calculate", "self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to", "float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t = line.split() self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'): t =", "path to data Returns: ------ :return: data <varies> \"\"\" hdf = H.File(self.file_name, 'r')", "nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must be supplied') reader = ASCIIReader(filename)", "plt.gca() if key not in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{}", "of specific bins for plotting :param float width: matplotlib bar width. :param *args:", "simulation outputs and perform post processing. Many classes are available to provide plotting", "ncol = 0 for v in self.velocity['velocity']: if lower_v < v <= upper_v:", "\"\"\" return self.velocity['velocity'].max() @property def min(self): \"\"\" :return: minimum colloid velocity \"\"\" return", "class Breakthrough(object): \"\"\" Class to prepare and plot breakthrough curve data from endpoint", "- self.__jury_1991(vars, A, L, t, v) def __jury_1991(self, vars, A, L, t, v):", "def read_ascii(self, filename): \"\"\" Method to read endpoint file data from from ascii", "for i in line.split() if i not in ('\\t', '', ' ', '\\n')])", "reader.resolution self.xlen = reader.xlen self.ylen = reader.ylen self.df = reader.df self.ncol = reader.df.shape[0]", "D * t x = (eq0 / eq1) * np.exp(eq2 / eq3) x[0]", "plotting kwargs \"\"\" from matplotlib.colors import LogNorm if ax is None: ax =", "def keys(self): \"\"\" :return: Lattice boltzmann data keys \"\"\" return LBOutput.data_paths.keys() def get_data(self,", "*args, **kwargs): \"\"\" Plotting method for 1d colloid-colloid dlvo profiles Parameters: ---------- :param", "**kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else:", "'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise", "\"\"\" return LBOutput.data_paths.keys() def get_data(self, key): \"\"\" Method to select data from hdf5", "to retrieve Colloid force arrays and plot for data analysis. Parameters: ---------- :param", "str key: valid dictionary key from self.keys :param object ax: matplotlib pyplot axes", "number of time steps to base bin on :param bool normalize: method to", "*args, **kwargs) qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1])", "# * 1e-6 y = colcol[center, center:] else: x = self.__hdf5.get_data('distance_fine_y') x =", "hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5", "'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method to retrieve", "\"\"\" return (self.stdev / self.mean) * 100 def plot(self, *args, **kwargs): \"\"\" Method", "in text based output files <endpoint, timestep, pathline> to a pandas dataframe Parameters:", "else: pass def read_ascii(self, filename): \"\"\" Method to read endpoint file data from", "colloid model output filename (ie. endpoint, timestep, or pathline) \"\"\" with open(filename) as", "norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else: p = ax.pcolormesh(xx, yy, mesh, *args, **kwargs) ax.set_ylim([0,", "Method to generate a probability distribution function based upon user supplied bin size.", "leastsq, minimize, least_squares a = self.ncol l = self.ylen * self.resolution v =", "data def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by specific hdf5", "keyword arguments for 1d charts \"\"\" if time: if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep,", "= np.ma.masked_where(img == 1, a=x) y = np.ma.masked_where(img == 1, a=y) Q =", "t) ** 2 eq3 = 4 * R * D * t x", "\"\"\" Plotting method for 2d representation of colloid-colloid dlvo profiles. Parameters: ---------- :param", ":param str filename: endpoint file name \"\"\" def __init__(self, filename): if not filename.endswith(\".endpoint\"):", "@property def max(self): \"\"\" :return: maximum colloid velocity \"\"\" return self.velocity['velocity'].max() @property def", "calculate Dispersivity and Retardation from breakthrough data. Parameters: vars: (np.array) [dispersivity, retardation] x:", "**kwargs) # todo: think about this one. Does it belong here? Finish class.", "<>.endpoint file Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint data :ivar", "velocity for a simulation. Class needs to be rebuilt to work with timeseries", "False for idx, rec in enumerate(self.pdf): if not bt: if rec['ncol'] != 0:", "arr def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume calculation conversion for", "adjuster) ncols.append(0) plt.bar(velocity, ncols, width, *args, **kwargs) # todo: think about this one.", "reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy = reader.uy self.pdf = None self.__dist_func = DistributionFunction(filename,", "ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars, A, L, t, v, pdf): \"\"\" Method", "plt.show() \"\"\" import numpy as np import matplotlib.pyplot as plt import pandas as", "/ eq3) x[0] = 0 return x def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000,", "self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by specific path", "import pandas as pd import h5py as H class Breakthrough(object): \"\"\" Class to", "ColloidVelocity(object): \"\"\" Method to return colloid velocity and statistics relating to colloid velocity", "of the Colloid_output.py module is as follows >>> from lb_colloids import ColloidOutput >>>", "velocity t: (float) time \"\"\" from scipy import special D = vars[0] R", "x[0] = 0 return x def __prep_data(self): \"\"\" Prepares breakthrough data by stripping", "reset values based on changing the pdf bin values Parameters: ---------- :param int", "and Winerega 1986 to calculate Dispersivity and Retardation from breakthrough data. Parameters: vars:", "2 mesh[center, center] = mesh[center, center + 1] xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1),", "not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied')", "reader = ASCIIReader(filename) self.df = reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous", "probablity distribution function of colloid breakthrough from endpoint files. Parameters: ---------- :param str", "else: if rec['ncol'] == 0: if not seq: strip_idx = idx seq =", "arr[idx] = tuple([value, velocity[idx]]) self.velocity = arr @property def max(self): \"\"\" :return: maximum", "shape = colcol.shape center = shape[0] // 2 if key == \"<KEY>\": x", "data_paths = {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y':", "= reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous self.ncol = float(reader.ncol) self.total_ncol =", "jury 1991. Pulse flux. Parameters: ---------- :param float D: Diffusivity initial guess. Cannot", "= reader.timestep self.continuous = reader.continuous # todo: replace this call with something from", "= 0 return x def __prep_data(self): \"\"\" Prepares breakthrough data by stripping off", "nbin=10, width=0.01, *args, **kwargs): \"\"\" User method to plot a histogram of velocities", ":param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" adjuster =", "= pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return self.__breakthrough_curve def", "in ('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}: key not valid for plotting'.format(key)) elif key", "mesh.shape[0] / 2. ax.plot([center], [center], 'ko') return p class ColloidVelocity(object): \"\"\" Method to", "[] for index, row in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) / (row['delta-ts']", "v: (float) mean fluid velocity t: (float) time \"\"\" from scipy import special", "center:] elif key == \"col_col_y\": x = self.__hdf5.get_data('distance_y') x = x.T[center, center:] y", "elif key in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis,", "by stripping off trailing zeros. Returns: pdf = (np.array) stripped pdf t =", "np.ma.masked_where(img == 1, a=yy) x = np.ma.masked_where(img == 1, a=x) y = np.ma.masked_where(img", "bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)] ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts'])", "mean fluid_velocity t: (float) time \"\"\" D = vars[0] R = vars[1] eq0", "return (self.stdev / self.mean) * 100 def plot(self, *args, **kwargs): \"\"\" Method to", "of valid hdf5 data keys \"\"\" return [i for i in Hdf5Reader.data_paths] def", "(pandas DataFrame): dataframe of endpoint data :ivar resolution: (float): model resolution :ivar timestep:", "bt = False for idx, rec in enumerate(self.pdf): if not bt: if rec['ncol']", "np.int, 'flag': np.int, 'nts': np.int, 'x-position': np.float, 'y-position': np.float, 'x-model': np.float, 'y-model': np.float,", "factor = self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data = self.__hdf5.get_data(key) * factor else: data", "on :param bool normalize: method to calculate pdf by residence time or end", "return self.velocity['velocity'].min() @property def mean(self): \"\"\" :return: mean colloid velocity \"\"\" return self.velocity['velocity'].mean()", "self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Convience method to plot", "timeseries and pathline files for a more precise velocity measurement Parameters: ---------- :param", "to use keys to access data Parameters: ---------- :param str hdf5: LB-Colloid hdf5", "if key == 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx", "def read_header(self, filename): \"\"\" Method to read the header from ascii output files", "'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x':", "from colloid models. Contains a data_paths dictionary which allows the user to use", "= Hdf5Reader(hdf5) @property def keys(self): return self.__hdf.keys def get_data(self, key): \"\"\" Get data", "'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x',", "t: (float) time \"\"\" D = vars[0] R = vars[1] eq0 = (A", "ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.ylen = reader.ylen self.ncol = reader.ncol", "= self.read_ascii(filename) def read_header(self, filename): \"\"\" Method to read the header from ascii", "pass elif idx == self.__data_startline: self.__header = [i.rstrip() for i in line.split() if", "= tuple([value, ncols[idx]]) self.pdf = arr def pore_volume_conversion(self): \"\"\" Method to retrieve the", "ncols = [] lower_nts = 0 max_ts = self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] ==", "'gravity', 'bouyancy'): raise KeyError('{}: key not valid for plotting'.format(key)) elif key in ('dlvo_fine',", "plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\" Class to query colloid-colloid interactions and plot data", "and set to nearby value to prevent log scale crashing shape = mesh.shape", "self.mean) * 100 def plot(self, *args, **kwargs): \"\"\" Method to plot distribution of", "= ccx + ccy elif key == 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy =", "('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs)", "= line.split() self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t = line.split() self.velocity_factor = float(t[-1].rstrip())", "data = data[0] elif key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x',", "distribution of velocities by colloid for array of velocity. Parameters ---------- :param *args:", "\"\"\" if key in (\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data", "= reader.ylen self.ncol = reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy = reader.uy self.pdf =", "= line.split() self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline = idx + 1 break", "self.resolution = reader.resolution self.ylen = reader.ylen self.ncol = reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy", "= plt.gca() if key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y',", "* pv_factor * self.timestep)]) class DistributionFunction(object): \"\"\" Class to plot a probablity distribution", "time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000,", "'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'):", "output filename (ie. endpoint, timestep, or pathline) \"\"\" with open(filename) as f: for", "self.__normalize = False self.__reader = reader def reset_pdf(self, nbin, normalize=False): \"\"\" Method to", "prevent log scale crashing shape = mesh.shape center = shape[0] // 2 mesh[center,", "keys(self): \"\"\" :return: Lattice boltzmann data keys \"\"\" return LBOutput.data_paths.keys() def get_data(self, key):", "ncols.append(0) plt.bar(velocity, ncols, width, *args, **kwargs) # todo: think about this one. Does", "bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars, l, v, t, bt): \"\"\" Method", "in (\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data = self.__hdf5.get_data(key) *", "method to plot data into a matplotlib chart. Parameters: ---------- :param bool time:", "**kwargs): \"\"\" Plotting method for 1d colloid-colloid dlvo profiles Parameters: ---------- :param str", "- adjuster velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity, ncols, width, *args, **kwargs) # todo:", "'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) if", "create a quiver plot to display the magnitude and direction of velocity vectors", "colloid force arrays Parameters: ---------- :param str key: valid dictionary key from self.keys", "elif line.startswith('ylen'): t = line.split() self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'): t = line.split()", "directory path to data Returns: ------ :return: data <varies> \"\"\" hdf = H.File(self.file_name,", "0 return x def __prep_data(self): \"\"\" Prepares breakthrough data by stripping off trailing", "t = line.split() self.ux = float(t[-1].rstrip()) elif line.startswith('uy'): t = line.split() self.uy =", "pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return self.__breakthrough_curve def pore_volume_conversion(self):", "\"\"\" return np.std(self.velocity['velocity']) @property def cv(self): \"\"\" :return: coeficient of variance of colloid", "arguments for 1d charts \"\"\" if time: if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol']", "test this method! look up references for clearer examples! from scipy.optimize import leastsq,", "= None if ax is None: ax = plt.gca() if key in ('lvdw_x',", "return self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\" Method to return data by hdf5 path", "dlvo profiles. Parameters: ---------- :param str key: valid data key :param object ax:", "meshgrid object More sophisticated than standard ModelPlot Parameters: ---------- :param str hdf5: hdf5", "for upper_nts in range(0, int(max_ts) + 1, nbin): ncol = 0 for index,", "'colloids/distance_fine'} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or", "def plot(self, key, ax=None, masked=False, *args, **kwargs): \"\"\" Hdf array plotting using Hdf5Reader", "self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method", "elif key == 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ #", "not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.df =", "kwargs Returns: ------- :return: scipy least squares dictionary. Answer in dict['x'] \"\"\" from", "by dict. key Parameters: ---------- :param str key: valid dictionary key from self.keys", "Method to retrieve hdf5 data by specific path Parameters: ---------- :param str path:", "def cv(self): \"\"\" :return: coeficient of variance of colloid velocities \"\"\" return (self.stdev", "not in supported filetypes\".format(filename)) else: self.read_header(filename) self.df = self.read_ascii(filename) def read_header(self, filename): \"\"\"", "'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine':", "idx == self.__data_startline: self.__header = [i.rstrip() for i in line.split() if i not", "l: (float) ylen v: (float) mean fluid_velocity t: (float) time pdf: pd.dataframe c/co", "xx, yy = np.meshgrid(xx, yy) if masked: img = self.__hdf.get_data('image') xx = np.ma.masked_where(img", "colloid model :param int nbin: number of timesteps to bin a pdf for", "**kwargs): \"\"\" Method to plot distribution of velocities by colloid for array of", "df: (pandas DataFrame): dataframe of endpoint data :ivar resolution: (float): model resolution :ivar", "pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df else: bt_colloids = self.df.loc[self.df['flag'] == 3]", "__prep_data(self): \"\"\" Prepares breakthrough data by stripping off trailing zeros. Returns: pdf =", "np.ma.masked_where(img == 1, a=x) y = np.ma.masked_where(img == 1, a=y) Q = plt.quiver(xx[::nbin,", "plot(self, key, *args, **kwargs): \"\"\" Plotting method for 1d colloid-colloid dlvo profiles Parameters:", ":param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if dimensional:", "if true x-axis is time, false is nts :param *args: matplotlib args for", "**kwargs: matplotlib keyword arguments for 1d charts \"\"\" if time: if self.continuous: plt.plot(self.breakthrough_curve['nts']", "if strip_idx is not None: pdf = self.pdf['ncol'][:strip_idx + 1] time = self.pdf['nts'][:strip_idx", "::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin], units='width', *args, **kwargs) qk = plt.quiverkey(Q,", "data by specific hdf5 path Parameters: ---------- :param str path: hdf5 directory path", "must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" :return: Lattice boltzmann", "velocities using a bar chart. Parameters: ---------- :param int nbin: number of specific", "vars[1] eq0 = (A * L * np.sqrt(R)) eq1 = 2 * np.sqrt(np.pi", "in bins: ncol = 0 for v in self.velocity['velocity']: if lower_v < v", "a pdf for calculation :param bool normalize: flag to calculate pdf by residence", "x[center, center:] # * 1e-6 y = colcol[center, center:] else: x = self.__hdf5.get_data('distance_fine_y')", "'ko') return p class ColloidVelocity(object): \"\"\" Method to return colloid velocity and statistics", "= [] ncol = 0 ncol_per_release = [] for index, row in bt_colloids.iterrows():", "direction of velocity vectors within the system. Parameters: ---------- :param int nbin: refinement", "= self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method", "Parameters: ---------- :param str hdf: hdf5 output filename \"\"\" data_paths = {'velocity_x': None,", "data for idx, data in enumerate(temp)} df = pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1)", "= float(t[-1].rstrip()) elif line.startswith('ylen'): t = line.split() self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'): t", "colloids in simulation \"\"\" def __init__(self, filename): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file", "data from endpoint files. Parameters: ---------- :param str filename: <>.endpoint file Attributes: ----------", "dimensional=True, masked=False, *args, **kwargs): \"\"\" Method to create a quiver plot to display", "measurement Parameters: ---------- :param str filename: endpoint file name \"\"\" def __init__(self, filename):", "file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" Property method", "colloids in simulation :ivar pdf: (np.recarray) colloid probability distribution function \"\"\" def __init__(self,", "__jury_residuals(self, vars, A, L, t, v, pdf): \"\"\" Method to estimate residuals from", "visualizing colloid-surface forces and colloid-colloid forces respectively. example import of the Colloid_output.py module", "profiles. Parameters: ---------- :param str key: valid data key :param object ax: matplotlib", "matplotlib keyword arguments for 1d charts \"\"\" if time: if self.__normalize: plt.plot(self.pdf['nts'] *", "filename): if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename)", "matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] *", "bins = np.linspace(self.min - adjuster, self.max, nbin) ncols = [] velocity = []", "/ self.ncol x0 = np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t, bt),", "matplotlib.pyplot as plt import pandas as pd import h5py as H class Breakthrough(object):", "reader.xlen self.ylen = reader.ylen self.df = reader.df self.ncol = reader.df.shape[0] self.max_time = max(reader.df['nts'])", "and plot breakthrough curve data from endpoint files. Parameters: ---------- :param str filename:", ":ivar total_ncol: (int): total number of colloids in simulation \"\"\" def __init__(self, filename):", "ColloidOutput.ModelPlot(hdf) >>> # model plot accepts matplotlib args and kwargs!!! >>> mp.plot('edl_x', cmap='viridis')", "timestep, or pathline) \"\"\" with open(filename) as f: for idx, line in enumerate(f):", "== 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx + ccy", "ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df else: bt_colloids", "== 1, a=x) y = np.ma.masked_where(img == 1, a=y) Q = plt.quiver(xx[::nbin, ::nbin],", "else: data = self.__hdf5.get_data(key) return data class ASCIIReader(object): \"\"\" Class to read in", "None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y',", "plot a probablity distribution function of colloid breakthrough from endpoint files. Parameters: ----------", "a probability distribution function based upon user supplied bin size. Parameters: ---------- :param", ":param **kwargs: matplotlib plotting kwargs \"\"\" adjuster = 0.00001 bins = np.linspace(self.min -", "post processing. Many classes are available to provide plotting functionality. ModelPlot and CCModelPlot", "is None: if not self.continuous: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts')", "from scipy import special D = vars[0] R = vars[1] eq0 = R", "---------- :ivar df: (pandas DataFrame): dataframe of endpoint data :ivar resolution: (float): model", "view and analyze colloid force arrays Parameters: ---------- :param str key: valid dictionary", "self.reset_pdf(nbin) self.__normalize = False self.__reader = reader def reset_pdf(self, nbin, normalize=False): \"\"\" Method", "is None: ax = plt.gca() if key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x',", "matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor *", "models. Contains a data_paths dictionary which allows the user to use keys to", "reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous self.ncol = float(reader.ncol)", "a=xx) yy = np.ma.masked_where(img == 1, a=yy) x = np.ma.masked_where(img == 1, a=x)", "'col_col_fine': None} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf", "module contains classes to read LB Colloids simulation outputs and perform post processing.", "pdf \"\"\" return pdf - self.__jury_1991(vars, A, L, t, v) def __jury_1991(self, vars,", "kwargs: vmin = kwargs.pop('vmin') if 'vmax' in kwargs: vamx = kwargs.pop('vmax') p =", "v <= upper_v: ncol += 1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v = upper_v", "= x[center, center:] # * 1e-6 y = colcol[center, center:] else: x =", "LB Colloids simulation outputs and perform post processing. Many classes are available to", "special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] = 0 return x def __prep_data(self): \"\"\" Prepares breakthrough", "= nbin self.__normalize = normalize ts = [] ncols = [] lower_nts =", "and Retardation from breakthrough data. Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column", "return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Method to plot data into", "get_data(self, key): \"\"\" Method to return data by key Parameters: ---------- :param str", "if key == 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y': data =", "*args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) else: if", "* 1e-6 y = colcol[center, center:] else: x = self.__hdf5.get_data('distance_fine_y') x = x[center,", "self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume calculation conversion for", "ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous) & (bt_colloids['end-ts'] <=", "or end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01,", "cmap='viridis') >>> plt.show() \"\"\" import numpy as np import matplotlib.pyplot as plt import", "'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) colcol = self.__hdf5.get_data(key)", "if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] *", "L, t, v) def __jury_1991(self, vars, A, L, t, v): \"\"\" Equation for", "= self.__hdf5.get_data('distance_y') x = x.T[center, center:] y = colcol.T[center, center:] elif key ==", "= self.__hdf5.get_data('distance_fine_x') x = x[center, center:] # * 1e-6 y = colcol[center, center:]", "'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx + ccy else:", "bt - self.__van_genuchten_1986(vars, l, v, t) def __van_genuchten_1986(self, vars, l, v, t): \"\"\"", "np.var(self.velocity['velocity']) @property def stdev(self): \"\"\" :return: standard deviation of colloid velocities \"\"\" return", "to a pandas dataframe Parameters: ---------- :param str filename: output filename (ie. endpoint,", "'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity',", "'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x':", "v = self.uy pdf, t = self.__prep_data() x0 = np.array([D, R]) return least_squares(self.__jury_residuals,", "\"lb_{}\".format(key) data = self.__hdf5.get_data(key) * factor else: data = self.__hdf5.get_data(key) return data class", "+= 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve", "'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self, hdf5):", "hdf5 directory path to data Returns: ------ :return: data <varies> \"\"\" hdf =", "self.__hdf.get_data_by_path(path) def plot(self, key, ax=None, masked=False, *args, **kwargs): \"\"\" Hdf array plotting using", "vars, l, v, t, bt): \"\"\" Method to estimate residuals from vanGenuchten and", "read_header(self, filename): \"\"\" Method to read the header from ascii output files for", "'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return", "that performs a dynamic calculation of breakthrough curve data \"\"\" max_ts = self.df['nts'].max()", "not bt: if rec['ncol'] != 0: bt = True else: pass else: if", "object (optional) :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\"", "float('nan') class Hdf5Reader(object): \"\"\" Reader object to read in HDF5 stored outputs from", "**kwargs): \"\"\" Scipy optimize method to solve least squares for van genuchten 1986.", "one. Does it belong here? Finish class. Integrate into LB class LBOutput(object): \"\"\"", "= line.split() self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'): t = line.split() self.resolution = float(t[-1].rstrip())", "np.float)]) for idx, value in enumerate(ts): arr[idx] = tuple([value, ncols[idx]]) self.pdf = arr", "mesh.shape center = shape[0] // 2 mesh[center, center] = mesh[center, center + 1]", "# hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif key == 'dlvo_y': data", "vmax=mesh.max()), *args, **kwargs) else: p = ax.pcolormesh(xx, yy, mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]])", "y = self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1]) yy = np.arange(0, x.shape[0]) xx, yy", "plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object):", "'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y',", "df = df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid') return df @staticmethod def __try_float(val): try:", "None self.reset_pdf(nbin) self.__normalize = False self.__reader = reader def reset_pdf(self, nbin, normalize=False): \"\"\"", "be 0 :param float ftol: scipy function tolerance for solution :param int max_nfev:", "2. ax.plot([center], [center], 'ko') return p class ColloidVelocity(object): \"\"\" Method to return colloid", "ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts arr = np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)]) for", "bin values Parameters: ---------- :param int nbin: number of timesteps to bin a", ":param int nbin: refinement for quiver plotting :param *args: matplotlib plotting args :param", "key \"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\" Method to return data by", "np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) / (row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position'] * self.resolution) /", "R * l - v * t eq1 = np.sqrt(4 * D *", "(float) mean fluid_velocity t: (float) time pdf: pd.dataframe c/co of colloid pdf \"\"\"", "/ eq1) * np.exp(eq2 / eq3) x[0] = 0 return x def solve_van_genuchten_1986(self,", "return x def __prep_data(self): \"\"\" Prepares breakthrough data by stripping off trailing zeros.", "useful for visualizing colloid-surface forces and colloid-colloid forces respectively. example import of the", "seq = False bt = False for idx, rec in enumerate(self.pdf): if not", "\"\"\" pv_factor = self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve['ncpr'] /", "nbin: number of specific bins for plotting :param float width: matplotlib bar width.", "x[center, center:] # * 1e-6 y = colcol[center, center:] plt.plot(x, y * -1,", "key not in Hdf5Reader.data_paths: raise KeyError('Dictionary key not in valid keys. Use get_data_by_path')", "normalize ts = [] ncols = [] lower_nts = 0 max_ts = self.df['nts'].max()", "todo: think about this one. Does it belong here? Finish class. Integrate into", "simulation :ivar total_ncol: (int): total number of colloids in simulation :ivar pdf: (np.recarray)", "self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor *", "velocity \"\"\" return self.velocity['velocity'].min() @property def mean(self): \"\"\" :return: mean colloid velocity \"\"\"", "than standard ModelPlot Parameters: ---------- :param str hdf5: hdf5 file name \"\"\" data_paths", "= 1. self.continuous = 0 self.__data_startline = 0 self.__header = [] if filename.split('.')[-1]", "read the header from ascii output files for LB-Colloids Parameters: ---------- :param str", "self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] /", "Method to estimate residuals from jury 1991 equation using data Parameters vars: (np.array)", "*args: matplotlib args for 1d charts :param **kwargs: matplotlib keyword arguments for 1d", "reader.continuous # todo: replace this call with something from the header later! self.ncol", "Class to retrieve Colloid force arrays and plot for data analysis. Parameters: ----------", "dtype=[('colloid', np.int), ('velocity', np.float)]) for idx, value in enumerate(colloid): arr[idx] = tuple([value, velocity[idx]])", "to retrieve the pore volume calculation conversion for plotting colloids. \"\"\" pv_factor =", "== 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data", "calculation of breakthrough curve data \"\"\" max_ts = self.df['nts'].max() if self.__breakthrough_curve is None:", "= idx + 1 break else: pass def read_ascii(self, filename): \"\"\" Method to", "'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity':", "v, pdf): \"\"\" Method to estimate residuals from jury 1991 equation using data", "key == 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()]", "\"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be", "t, v): \"\"\" Equation for Jury 1991 calculation of Dispersivity and Retardation Parameters", "from endpoint files. Parameters: ---------- :param str filename: <>.endpoint file name :param int", "plotting :param **kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts']", "to be re-named and updated to CDE equation Parameters: ---------- :param str filename:", "data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key ==", ":param **kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() if self.continuous:", "Parameters: ---------- :param int nbin: number of timesteps to bin a pdf for", "\"\"\" if key not in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not", "must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.xlen", "* D * t x = (eq0 / eq1) * np.exp(eq2 / eq3)", "def __reset(self): self.pdf = self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False): \"\"\" User method to", "@property def stdev(self): \"\"\" :return: standard deviation of colloid velocities \"\"\" return np.std(self.velocity['velocity'])", "(float) column length v: (float) mean fluid velocity t: (float) time bt: (np.array)", "key, instead of data path Parameters: ---------- :param str key: lattice boltzmann data", "hdf5 data by specific hdf5 path Parameters: ---------- :param str path: hdf5 directory", "str key: valid data key :param *args: matplotlib plotting args :param **kwargs: matplotlib", "KeyError(\"{} is not a valid key\".format(key)) colcol = self.__hdf5.get_data(key) shape = colcol.shape center", "ncol = 0 ncol_per_release = [] for index, row in bt_colloids.iterrows(): lower_ts =", "colloid velocity for a simulation. Class needs to be rebuilt to work with", "numpy as np import matplotlib.pyplot as plt import pandas as pd import h5py", "of time steps to base bin on :param bool normalize: method to calculate", "3) eq2 = -(R * L - v * t) ** 2 eq3", "method to return valid keys to obtain data \"\"\" return CCModelPlot.keys def get_data(self,", "scipy.optimize import least_squares l = self.ylen * self.resolution v = self.uy t =", "= x[center, center:] # * 1e-6 y = colcol[center, center:] plt.plot(x, y *", "row in bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts':", "True else: pass else: seq = False strip_idx = None if strip_idx is", "lower_v = self.min - adjuster upper_v = 0 for upper_v in bins: ncol", "\"\"\" Convience method to plot data into a matplotlib chart. Parameters: ---------- :param", "/\\ (self.__reader.ylen * self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Method", "= self.bt['ncpr'].as_matrix() / self.ncol x0 = np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l, v,", "@property def cv(self): \"\"\" :return: coeficient of variance of colloid velocities \"\"\" return", "plot accepts matplotlib args and kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\" import", "lower_ts) & (bt_colloids['end-ts'] <= upper_ts)] ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts)", "pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Method to plot data into a", "ModelPlot and CCModelPlot are useful for visualizing colloid-surface forces and colloid-colloid forces respectively.", "bin a pdf for calculation \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'):", "from lb_colloids import ColloidOutput >>> import matplotlib.pyplot as plt >>> >>> hdf =", "of bins for pdf calculation Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of", "self.resolution) / (row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position'] * self.resolution) / (row['nts'] * self.timestep))", "calculate pdf by residence time or end time \"\"\" self.bin = nbin self.__normalize", "class ADE(object): \"\"\" Class to calculate macroscopic advection dispersion equation parameters for field", "100 def plot(self, *args, **kwargs): \"\"\" Method to plot distribution of velocities by", "def __van_genuchten_residuals(self, vars, l, v, t, bt): \"\"\" Method to estimate residuals from", "plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" adjuster = 0.00001 bins =", "to nearby value to prevent log scale crashing shape = mesh.shape center =", "if 'vmax' in kwargs: vamx = kwargs.pop('vmax') p = ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(),", "/ self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs)", "keys \"\"\" return LBOutput.data_paths.keys() def get_data(self, key): \"\"\" Method to select data from", "in HDF5 stored outputs from colloid models. Contains a data_paths dictionary which allows", "= self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) class", "ax: matplotlib pyplot axes object (optional) :param *args: matplotlib plotting args :param **kwargs:", "time \"\"\" D = vars[0] R = vars[1] eq0 = (A * L", "---------- :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'],", "None, 'porosity': None, 'pore_diameter': None, 'conversion_factor': None, 'reynolds_number': None} def __init__(self, hdf5): if", "x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\" Class to query colloid-colloid interactions and plot", "\"\"\" return self.__hdf.get_data_by_path(path) def plot(self, key, ax=None, masked=False, *args, **kwargs): \"\"\" Hdf array", "a pdf for calculation \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise", "y = colcol[center, center:] elif key == \"col_col_y\": x = self.__hdf5.get_data('distance_y') x =", "= line.split() self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'): t = line.split() self.continuous = int(t[-1].rstrip())", "\"\"\" Method to plot distribution of velocities by colloid for array of velocity.", "initial guess. Cannot be 0 :param float R: Retardation initial guess. Cannot be", "1)) if mesh.max()/mesh.min() > 10: vmin = mesh.min() vmax = mesh.max() if 'vmin'", "if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.df", "pdf data with pore volumes (non-dimensional time) Parameters: ---------- :param *args: matplotlib args", "1d plotting :param **kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion()", "None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y',", "= ax.pcolormesh(xx, yy, mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0]", "def var(self): \"\"\" :return: variance of colloid velocities \"\"\" return np.var(self.velocity['velocity']) @property def", "plot_pv(self, *args, **kwargs): \"\"\" Method to plot pdf data with pore volumes (non-dimensional", "\"\"\" data_paths = {'velocity_x': None, 'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution': None,", "matplotlib args for 1d plotting :param **kwargs: matplotlib kwargs for 1d plotting \"\"\"", "self.velocity_factor = 1. self.continuous = 0 self.__data_startline = 0 self.__header = [] if", "(int): interval of continuous release, 0 means pulse :ivar ncol: (float): number of", "center:] y = colcol.T[center, center:] elif key == \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x", "colloid velocities \"\"\" return (self.stdev / self.mean) * 100 def plot(self, *args, **kwargs):", "method to calculate the mean velocity of each colloid in the simulation \"\"\"", "todo: replace this call with something from the header later! self.ncol = reader.ncol", ":return: data \"\"\" if key in (\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key =", "pathline) \"\"\" dtypes = {'colloid': np.int, 'flag': np.int, 'nts': np.int, 'x-position': np.float, 'y-position':", "x0, args=(l, v, t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars, l, v,", "for quiver plotting :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs", "be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" :return: Lattice boltzmann data", "float(val) except ValueError: return float('nan') class Hdf5Reader(object): \"\"\" Reader object to read in", "to read LB Colloids simulation outputs and perform post processing. Many classes are", "ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method to retrieve the", "\"\"\" Method to create a quiver plot to display the magnitude and direction", "matplotlib chart. Parameters: ---------- :param bool time: if true x-axis is time, false", "self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1]) yy", "colloid velocity \"\"\" return self.velocity['velocity'].mean() @property def var(self): \"\"\" :return: variance of colloid", "hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf = Hdf5Reader(hdf5) @property", "'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity':", "+\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif key ==", "\"\"\" Method to plot breakthrough data with pore volumes (non-dimensional time) Parameters: ----------", "self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep,", "(np.recarray) colloid probability distribution function \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'):", "max_ts = self.df['nts'].max() if self.__breakthrough_curve is None: if not self.continuous: bt_colloids = self.df.loc[self.df['flag']", "# todo: create a function_fmt for axis options mesh = None if ax", "1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve =", "if normalize: if lower_nts < row['delta-ts'] <= upper_nts: ncol += 1 else: if", "for 1d plotting :param **kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor =", "self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs)", "calculate macroscopic advection dispersion equation parameters for field scale model parameterization Class needs", "plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] *", ">>> plt.show() \"\"\" import numpy as np import matplotlib.pyplot as plt import pandas", "---------- :param *args: matplotlib args for 1d plotting :param **kwargs: matplotlib kwargs for", "Colloids simulation outputs and perform post processing. Many classes are available to provide", "Parameters: ---------- :param str path: hdf5 directory path to data Returns: ------- :return:", "or pathline) \"\"\" dtypes = {'colloid': np.int, 'flag': np.int, 'nts': np.int, 'x-position': np.float,", "'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine',", "yy, mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0] / 2.", "str filename: ascii output file name from colloid model :param int nbin: number", "kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor", "> 10: vmin = mesh.min() vmax = mesh.max() if 'vmin' in kwargs: vmin", "line.split() self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline = idx + 1 break else:", "'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y':", "self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'): t = line.split() self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'):", "D * t ** 3) eq2 = -(R * L - v *", "flag to calculate pdf by residence time or end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize)", "'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y':", "np.arange(0, mesh.shape[1] + 1)) if mesh.max()/mesh.min() > 10: vmin = mesh.min() vmax =", "to select data from hdf5 file based on key, instead of data path", "mp = ColloidOutput.ModelPlot(hdf) >>> # model plot accepts matplotlib args and kwargs!!! >>>", "x[0] = 0 return x def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\"", "x: (float) column length v: (float) mean fluid velocity t: (float) time \"\"\"", "return data class ASCIIReader(object): \"\"\" Class to read in text based output files", "to solve least squares for van genuchten 1986. Miscable displacement. Parameters: ---------- :param", "self.ylen = 0 self.ux = 0 self.uy = 0 self.velocity_factor = 1. self.continuous", "filename): self.timestep = 0 self.ncol = 0 self.resolution = 0 self.xlen = 0", "vars: (np.array) [dispersivity, retardation] A: ncol l: (float) ylen v: (float) mean fluid_velocity", "x0 = np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t, bt), ftol=ftol, max_nfev=max_nfev,", "self.bin = nbin self.__normalize = normalize ts = [] ncols = [] lower_nts", "def __get_velocity_array(self): \"\"\" Built in method to calculate the mean velocity of each", "ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx + ccy elif key", "representation of colloid-colloid dlvo profiles. Parameters: ---------- :param str key: valid data key", "line.startswith('Continuous'): t = line.split() self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline = idx +", "**kwargs) else: arr = self.__hdf.get_data(key) if masked: img = self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img", "as f: for idx, line in enumerate(f): if line.startswith(\"Timestep\"): t = line.split() self.timestep", "'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif", "model :param int nbin: number of timesteps to bin a pdf for calculation", "*args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self,", "< row['end-ts'] <= upper_nts: ncol += 1 ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts arr", "(np.array) times \"\"\" strip_idx = None seq = False bt = False for", "ax def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args, **kwargs): \"\"\" Method to create a", "valid key\".format(key)) colcol = self.__hdf5.get_data(key) shape = colcol.shape center = shape[0] // 2", "str key: valid dictionary key from self.keys Returns: ------- :return: data <varies> \"\"\"", "------- :return: data <varies> \"\"\" return self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\" Method to", "elif idx == self.__data_startline: self.__header = [i.rstrip() for i in line.split() if i", "center:] elif key == \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x = x[center, center:] #", "colloid-colloid dlvo profiles Parameters: ---------- :param str key: valid data key :param *args:", "self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1]) yy = np.arange(0, x.shape[0]) xx,", "= pdf_colloids.sort_values('delta-ts') for upper_nts in range(0, int(max_ts) + 1, nbin): ncol = 0", "nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\"", "else: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1])", "= np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx + ccy elif key ==", "velocity[idx]]) self.velocity = arr @property def max(self): \"\"\" :return: maximum colloid velocity \"\"\"", "& (bt_colloids['end-ts'] <= upper_ts)] ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts']", "LB-Colloid hdf5 file name \"\"\" data_paths = {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray',", "'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y':", "read_ascii(self, filename): \"\"\" Method to read endpoint file data from from ascii files", "pdf_colloids = self.df.loc[self.df['flag'] == 3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts in range(0, int(max_ts)", "\"\"\" Class to prepare and plot breakthrough curve data from endpoint files. Parameters:", "self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve.index.values /", "filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep", "plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)]) class DistributionFunction(object): \"\"\" Class to", "@property def breakthrough_curve(self): \"\"\" Property method that performs a dynamic calculation of breakthrough", "\"\"\" from scipy.optimize import least_squares l = self.ylen * self.resolution v = self.uy", "= np.sqrt(4 * D * R * t) x = 0.5 * special.erfc(eq0/eq1)", "*args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if key not", "nbin) ncols = [] velocity = [] lower_v = self.min - adjuster upper_v", "hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif key == 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()]", "0 self.uy = 0 self.velocity_factor = 1. self.continuous = 0 self.__data_startline = 0", "mean fluid_velocity t: (float) time pdf: pd.dataframe c/co of colloid pdf \"\"\" return", "Parameters: ---------- :param str hdf5: LB-Colloid hdf5 file name \"\"\" data_paths = {'ac':", "files. Parameters: ---------- :param str filename: <>.endpoint file Attributes: ---------- :ivar df: (pandas", "self.reset_pdf(nbin) def __reset(self): self.pdf = self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False): \"\"\" User method", "= self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key in ('conversion_factor', 'gravity', 'bouyancy'): raise", "the pdf bin values Parameters: ---------- :param int nbin: number of timesteps to", "rec in enumerate(self.pdf): if not bt: if rec['ncol'] != 0: bt = True", "to view and analyze colloid force arrays Parameters: ---------- :param str key: valid", "= float(t[-1].rstrip()) elif line.startswith('xlen'): t = line.split() self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'): t", "supplied bin size. Parameters: ---------- :param int nbin: number of time steps to", "method for 1d colloid-colloid dlvo profiles Parameters: ---------- :param str key: valid data", "'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y',", "= self.uy pdf, t = self.__prep_data() x0 = np.array([D, R]) return least_squares(self.__jury_residuals, x0,", "x_axis = self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key in", "::nbin], units='width', *args, **kwargs) qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure')", "simulation :ivar total_ncol: (int): total number of colloids in simulation \"\"\" def __init__(self,", "if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep", "a valid key\".format(key)) if key == 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y'))", "time=True, *args, **kwargs): \"\"\" Method to plot data into a matplotlib chart. Parameters:", "function based upon user supplied bin size. Parameters: ---------- :param int nbin: number", "data by specific path Parameters: ---------- :param str path: hdf5 directory path to", "if ax is None: ax = plt.gca() if key not in ('col_col', 'col_col_fine',", "np.float, 'y-model': np.float, 'start-ts': np.int, 'end-ts': np.int, 'delta-ts': np.int, 'continuous': np.int} def __init__(self,", "to generate a probability distribution function based upon user supplied bin size. Parameters:", "else: pass else: if rec['ncol'] == 0: if not seq: strip_idx = idx", "to return colloid velocity and statistics relating to colloid velocity for a simulation.", "= reader.timestep self.resolution = reader.resolution self.xlen = reader.xlen self.ylen = reader.ylen self.df =", "conversion for plotting colloids. \"\"\" pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen *", "\"\"\" Method to return data by key Parameters: ---------- :param str key: valid", "\"\"\" Class to calculate macroscopic advection dispersion equation parameters for field scale model", "1991 equation using data Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float)", "ax = plt.gca() if key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x',", "be supplied') self.__hdf = Hdf5Reader(hdf5) @property def keys(self): return self.__hdf.keys def get_data(self, key):", "np.int, 'end-ts': np.int, 'delta-ts': np.int, 'continuous': np.int} def __init__(self, filename): self.timestep = 0", "standard ModelPlot Parameters: ---------- :param str hdf5: hdf5 file name \"\"\" data_paths =", "# hdf[Hdf5Reader.data_paths['lvdw_x']][()] data = data[0] elif key == 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\", "method for 2d representation of colloid-colloid dlvo profiles. Parameters: ---------- :param str key:", "from self.keys Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data(key) def get_data_by_path(self, path):", "float(self.df.shape[0]) self.bin = nbin self.pdf = None self.reset_pdf(nbin) self.__normalize = False self.__reader =", "normalize=False): \"\"\" Method to generate a probability distribution function based upon user supplied", "// 2 if key == \"<KEY>\": x = self.__hdf5.get_data('distance_x') x = x[center, center:]", "pdf by residence time or end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf", "self.__hdf5.get_data('distance_fine_x') x = x[center, center:] # * 1e-6 y = colcol[center, center:] else:", "pd import h5py as H class Breakthrough(object): \"\"\" Class to prepare and plot", "to data Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data_by_path(path) def plot(self, key,", "hdf5 file name \"\"\" def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'):", "reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.xlen = reader.xlen self.ylen", "for v in self.velocity['velocity']: if lower_v < v <= upper_v: ncol += 1", "lower_v < v <= upper_v: ncol += 1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v", "(ie. endpoint, timestep, or pathline) \"\"\" with open(filename) as f: t = []", "use keys to access data Parameters: ---------- :param str hdf5: LB-Colloid hdf5 file", "and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.file_name =", "\"\"\" Class to query colloid-colloid interactions and plot data as 1d or as", "calculation :param bool normalize: flag to calculate pdf by residence time or end", "filename: <>.endpoint file name :param int nbin: number of bins for pdf calculation", "number of specific bins for plotting :param float width: matplotlib bar width. :param", "*args, **kwargs) def plot_mesh(self, key, ax=None, *args, **kwargs): \"\"\" Plotting method for 2d", "adjuster upper_v = 0 for upper_v in bins: ncol = 0 for v", "*args, **kwargs): \"\"\" Method to plot breakthrough data with pore volumes (non-dimensional time)", "key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array',", "= -(R * L - v * t) ** 2 eq3 = 4", "A: ncol l: (float) ylen v: (float) mean fluid_velocity t: (float) time pdf:", "= hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif", "# hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif key == 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] +", "LBOutput(object): \"\"\" Class to anaylze LB fluid/solid properties Parameters: ---------- :param str hdf:", "Parameters: ---------- :param *args: matplotlib args for 1d plotting :param **kwargs: matplotlib kwargs", "fluid velocity t: (float) time \"\"\" from scipy import special D = vars[0]", "nearby value to prevent log scale crashing shape = mesh.shape center = shape[0]", "'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'):", "\"\"\" Method to plot data into a matplotlib chart. Parameters: ---------- :param bool", "for upper_v in bins: ncol = 0 for v in self.velocity['velocity']: if lower_v", "if key in (\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data =", "eq1 = np.sqrt(4 * D * R * t) x = 0.5 *", "\"\"\" if key not in Hdf5Reader.data_paths: raise KeyError('Dictionary key not in valid keys.", "A, L, t, v) def __jury_1991(self, vars, A, L, t, v): \"\"\" Equation", "to access data Parameters: ---------- :param str hdf5: LB-Colloid hdf5 file name \"\"\"", "timestep, pathline> to a pandas dataframe Parameters: ---------- :param str filename: output filename", "(eq0 / eq1) * np.exp(eq2 / eq3) x[0] = 0 return x def", "= np.array([D, R]) return least_squares(self.__jury_residuals, x0, args=(a, l, t, v, pdf), ftol=ftol, max_nfev=max_nfev,", "= {'velocity_x': None, 'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution': None, 'porosity': None,", "colloid probability distribution function \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise", "steps to base bin on :param bool normalize: method to calculate pdf by", "standard deviation of colloid velocities \"\"\" return np.std(self.velocity['velocity']) @property def cv(self): \"\"\" :return:", "'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y',", "= self.__prep_data() x0 = np.array([D, R]) return least_squares(self.__jury_residuals, x0, args=(a, l, t, v,", "ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous) & (bt_colloids['end-ts'] <= max_ts)]))", "0 for index, row in pdf_colloids.iterrows(): if normalize: if lower_nts < row['delta-ts'] <=", "hdf5 file name \"\"\" data_paths = {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y':", "data. Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length v: (float) mean", "to read the header from ascii output files for LB-Colloids Parameters: ---------- :param", "ccy else: mesh = self.__hdf5.get_data(key) # find center and set to nearby value", "---------- :param int nbin: number of time steps to base bin on :param", "charts \"\"\" if time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args,", "'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy',", "vmin = mesh.min() vmax = mesh.max() if 'vmin' in kwargs: vmin = kwargs.pop('vmin')", "enumerate(f): if idx < self.__data_startline: pass elif idx == self.__data_startline: self.__header = [i.rstrip()", "self.ncol, *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot", "in valid keys. Use get_data_by_path') hdf = H.File(self.file_name, 'r') if key == 'lb_velocity_x':", "class CCModelPlot(object): \"\"\" Class to query colloid-colloid interactions and plot data as 1d", "colloid pdf \"\"\" return pdf - self.__jury_1991(vars, A, L, t, v) def __jury_1991(self,", "forces respectively. example import of the Colloid_output.py module is as follows >>> from", "None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x',", "Cannot be 0 :param float ftol: scipy function tolerance for solution :param int", "simulation :ivar pdf: (np.recarray) colloid probability distribution function \"\"\" def __init__(self, filename, nbin=1000):", "dictionary. Answer in dict['x'] \"\"\" # todo: test this method! look up references", "self.continuous = reader.continuous # todo: replace this call with something from the header", "volume calculation conversion for plotting colloids. \"\"\" pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\", "v: (float) mean fluid_velocity t: (float) time pdf: pd.dataframe c/co of colloid pdf", "self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img == 1, a=arr) mesh = ax.imshow(arr, *args, **kwargs) if", "key: valid dictionary key from self.keys Returns: ------- :return: data <varies> \"\"\" return", "bt: if rec['ncol'] != 0: bt = True else: pass else: if rec['ncol']", "self.xlen = reader.xlen self.ylen = reader.ylen self.df = reader.df self.ncol = reader.df.shape[0] self.max_time", "normalize) self.pdf = self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy", "ncols = [] velocity = [] lower_v = self.min - adjuster upper_v =", "curve data from endpoint files. Parameters: ---------- :param str filename: <>.endpoint file Attributes:", "not in ('\\t', '', ' ', '\\n')]) temp = np.array(t).T temp = {self.__header[idx]:", "= False self.__reader = reader def reset_pdf(self, nbin, normalize=False): \"\"\" Method to generate", "self.pdf['ncol'] / self.ncol, *args, **kwargs) class ADE(object): \"\"\" Class to calculate macroscopic advection", "class ASCIIReader(object): \"\"\" Class to read in text based output files <endpoint, timestep,", "the magnitude and direction of velocity vectors within the system. Parameters: ---------- :param", "= self.__hdf.get_data('image') xx = np.ma.masked_where(img == 1, a=xx) yy = np.ma.masked_where(img == 1,", "* self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values /", "bt = True else: pass else: if rec['ncol'] == 0: if not seq:", "', '\\n')] else: t.append([self.__try_float(i.rstrip()) for i in line.split() if i not in ('\\t',", "self.__breakthrough_curve = df else: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols", "t = self.__prep_data() x0 = np.array([D, R]) return least_squares(self.__jury_residuals, x0, args=(a, l, t,", "t): \"\"\" Equation for <NAME> and Winerega 1986 to calculate Dispersivity and Retardation", "df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid') return df @staticmethod def __try_float(val): try: return float(val)", "def get_data(self, key): \"\"\" Method to select data from hdf5 file based on", "Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float) ylen v: (float) mean", "= self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1]) yy = np.arange(0, x.shape[0])", "/ (row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position'] * self.resolution) / (row['nts'] * self.timestep)) colloid.append(index)", "<>.endpoint file name :param int nbin: number of bins for pdf calculation Attributes:", "\"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf) >>> # model plot accepts matplotlib args and", "valid dictionary key from self.keys Returns: ------- :return: data <varies> \"\"\" if key", "else: seq = False strip_idx = None if strip_idx is not None: pdf", "= colcol[center, center:] elif key == \"col_col_y\": x = self.__hdf5.get_data('distance_y') x = x.T[center,", "x.T[center, center:] y = colcol.T[center, center:] elif key == \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x')", "as follows >>> from lb_colloids import ColloidOutput >>> import matplotlib.pyplot as plt >>>", "+ 1 break else: pass def read_ascii(self, filename): \"\"\" Method to read endpoint", "Method to select data from hdf5 file based on key, instead of data", "'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y':", "\"\"\" Method to retrieve hdf5 data by specific hdf5 path Parameters: ---------- :param", "---------- :param str key: lattice boltzmann data key Returns: ------- :return: data \"\"\"", "pd.dataframe c/co of colloid pdf \"\"\" return pdf - self.__jury_1991(vars, A, L, t,", "= float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader = reader @property def breakthrough_curve(self): \"\"\" Property", "self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'] * self.timestep,", "= False strip_idx = None if strip_idx is not None: pdf = self.pdf['ncol'][:strip_idx", "float width: matplotlib bar width. :param *args: matplotlib plotting args :param **kwargs: matplotlib", "to calculate Dispersivity and Retardation from breakthrough data. Parameters: vars: (np.array) [dispersivity, retardation]", "return self.velocity['velocity'].mean() @property def var(self): \"\"\" :return: variance of colloid velocities \"\"\" return", "'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5): if not hdf5.endswith('hdf')", "query colloid-colloid interactions and plot data as 1d or as a meshgrid object", "= mesh.max() if 'vmin' in kwargs: vmin = kwargs.pop('vmin') if 'vmax' in kwargs:", "'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x',", "pv_factor * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) class ADE(object): \"\"\" Class to", "= df return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume", "to plot a probablity distribution function of colloid breakthrough from endpoint files. Parameters:", "pandas as pd import h5py as H class Breakthrough(object): \"\"\" Class to prepare", "t = self.bt['nts'].as_matrix() * self.timestep bt = self.bt['ncpr'].as_matrix() / self.ncol x0 = np.array([D,", "and direction of velocity vectors within the system. Parameters: ---------- :param int nbin:", "x.shape[1]) yy = np.arange(0, x.shape[0]) xx, yy = np.meshgrid(xx, yy) if masked: img", "class Hdf5Reader(object): \"\"\" Reader object to read in HDF5 stored outputs from colloid", "yy = np.ma.masked_where(img == 1, a=yy) x = np.ma.masked_where(img == 1, a=x) y", "::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin], units='width', *args, **kwargs) qk = plt.quiverkey(Q, 0.9, 0.9,", "ValueError: return float('nan') class Hdf5Reader(object): \"\"\" Reader object to read in HDF5 stored", "float D: Diffusivity initial guess. Cannot be 0 :param float R: Retardation initial", "def __van_genuchten_1986(self, vars, l, v, t): \"\"\" Equation for <NAME> and Winerega 1986", "__jury_1991(self, vars, A, L, t, v): \"\"\" Equation for Jury 1991 calculation of", "v, t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars, l, v, t, bt):", "v, t) def __van_genuchten_1986(self, vars, l, v, t): \"\"\" Equation for <NAME> and", "self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts']", "None if strip_idx is not None: pdf = self.pdf['ncol'][:strip_idx + 1] time =", "keys(self): \"\"\" Property method to return valid keys to obtain data \"\"\" return", "in ('\\t', '', ' ', '\\n')]) temp = np.array(t).T temp = {self.__header[idx]: data", "data method to view and analyze colloid force arrays Parameters: ---------- :param str", "<endpoint, timestep, pathline> to a pandas dataframe Parameters: ---------- :param str filename: output", "by hdf5 path Parameters: ---------- :param str path: valid HDF5 data path \"\"\"", "velocity.append((self.ylen * self.resolution) / (row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position'] * self.resolution) / (row['nts']", "by residence time or end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf def", "not self.continuous: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = []", "nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous) & (bt_colloids['end-ts'] <= max_ts)])) df", "\"\"\" Scipy optimize method to solve least squares for van genuchten 1986. Miscable", ":return: standard deviation of colloid velocities \"\"\" return np.std(self.velocity['velocity']) @property def cv(self): \"\"\"", "kwargs \"\"\" if key not in ('col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is", "ncol = 0 for index, row in bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts'])", "arrays Parameters: ---------- :param str key: valid dictionary key from self.keys Returns: -------", "function tolerance for solution :param int max_nfev: maximum number of function iterations :param", "self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data = self.__hdf5.get_data(key) * factor else: data = self.__hdf5.get_data(key)", "instead of data path Parameters: ---------- :param str key: lattice boltzmann data key", "x_axis = self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key ==", "'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0]", "1d colloid-colloid dlvo profiles Parameters: ---------- :param str key: valid data key :param", "charts \"\"\" if time: if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.total_ncol, *args,", "volumes (non-dimensional time) Parameters: ---------- :param *args: matplotlib args for 1d plotting :param", "self.timestep = 0 self.ncol = 0 self.resolution = 0 self.xlen = 0 self.ylen", "= 4 * R * D * t x = (eq0 / eq1)", "'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x',", "for LB-Colloids Sets data to pandas dataframe Parameters: ---------- :param str filename: colloid", "(self.__reader.ylen * self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Convience method", "tuple([value, velocity[idx]]) self.velocity = arr @property def max(self): \"\"\" :return: maximum colloid velocity", "def get_data(self, key): \"\"\" Method to return data by key Parameters: ---------- :param", "'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df else: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids =", "mesh.max() if 'vmin' in kwargs: vmin = kwargs.pop('vmin') if 'vmax' in kwargs: vamx", "analysis. Parameters: ---------- :param str hdf5: hdf5 file name \"\"\" def __init__(self, hdf5):", "means pulse :ivar ncol: (float): number of colloids per release in simulation :ivar", ":return: data <varies> \"\"\" if key not in Hdf5Reader.data_paths: raise KeyError('Dictionary key not", "*args, **kwargs) else: arr = self.__hdf.get_data(key) if masked: img = self.__hdf.get_data(\"image\") arr =", "row in pdf_colloids.iterrows(): if normalize: if lower_nts < row['delta-ts'] <= upper_nts: ncol +=", "in bt_colloids.iterrows(): lower_ts = row['end-ts'] - self.continuous upper_ts = row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts']", "output files for LB-Colloids Parameters: ---------- :param str filename: colloid model output filename", "data with pore volumes (non-dimensional time) Parameters: ---------- :param *args: matplotlib args for", "\"\"\" Built in method to calculate the mean velocity of each colloid in", "'', ' ', '\\n')] else: t.append([self.__try_float(i.rstrip()) for i in line.split() if i not", "data :ivar resolution: (float): model resolution :ivar timestep: (float): model timestep :ivar continuous:", "nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df else: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids", "(float) column length v: (float) mean fluid velocity t: (float) time \"\"\" from", "keys(self): return self.__hdf.keys def get_data(self, key): \"\"\" Get data method to view and", "plt.gca() if key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x',", "'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None,", "\"\"\" :return: coeficient of variance of colloid velocities \"\"\" return (self.stdev / self.mean)", "p = ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else: p = ax.pcolormesh(xx,", "for i in line.split() if i not in ('\\t', '', ' ', '\\n')]", "if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values", "reader.timestep self.continuous = reader.continuous # todo: replace this call with something from the", "\"\"\" Get data method to view and analyze colloid force arrays Parameters: ----------", "chart. Parameters: ---------- :param int nbin: number of specific bins for plotting :param", "a=y) Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin], units='width', *args,", "= np.array(t).T temp = {self.__header[idx]: data for idx, data in enumerate(temp)} df =", "1d or as a meshgrid object More sophisticated than standard ModelPlot Parameters: ----------", ":param str key: valid data key :param *args: matplotlib plotting args :param **kwargs:", "in pdf_colloids.iterrows(): if normalize: if lower_nts < row['delta-ts'] <= upper_nts: ncol += 1", "of timesteps to bin a pdf for calculation :param bool normalize: flag to", "Pulse flux. Parameters: ---------- :param float D: Diffusivity initial guess. Cannot be 0", "plotting :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if", "for 1d charts \"\"\" if time: if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] /", "plotting \"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol'] / self.ncol,", "0 :param float R: Retardation initial guess. Cannot be 0 :param float ftol:", "reader.df self.ncol = reader.df.shape[0] self.max_time = max(reader.df['nts']) * self.timestep self.velocity = None self.__get_velocity_array()", "axis options mesh = None if ax is None: ax = plt.gca() if", "re-named and updated to CDE equation Parameters: ---------- :param str filename: ascii output", "def mean(self): \"\"\" :return: mean colloid velocity \"\"\" return self.velocity['velocity'].mean() @property def var(self):", "np.int, 'nts': np.int, 'x-position': np.float, 'y-position': np.float, 'x-model': np.float, 'y-model': np.float, 'start-ts': np.int,", "time pdf: pd.dataframe c/co of colloid pdf \"\"\" return pdf - self.__jury_1991(vars, A,", "D: Diffusivity initial guess. Cannot be 0 :param float R: Retardation initial guess.", "\"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data = self.__hdf5.get_data(key) * factor else:", "nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts': nts, 'ncol': ncols}).set_index('ncol') self.__breakthrough_curve = df else:", "for jury 1991. Pulse flux. Parameters: ---------- :param float D: Diffusivity initial guess.", "key, *args, **kwargs): \"\"\" Plotting method for 1d colloid-colloid dlvo profiles Parameters: ----------", "'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy',", ":ivar ncol: (float): number of colloids per release in simulation :ivar total_ncol: (int):", "* self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Convience method to", "of each colloid in the simulation \"\"\" colloid = [] velocity = []", "follows >>> from lb_colloids import ColloidOutput >>> import matplotlib.pyplot as plt >>> >>>", "* t) x = 0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] = 0 return", "Hdf5Reader keys Parameters: ---------- :param str key: valid dictionary key from self.keys :param", "'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array':", "line in enumerate(f): if idx < self.__data_startline: pass elif idx == self.__data_startline: self.__header", "Returns: ------ :return: data <varies> \"\"\" hdf = H.File(self.file_name, 'r') data = hdf[path][()]", "from the header later! self.ncol = reader.ncol self.total_ncol = float(self.df.shape[0]) self.__breakthrough_curve = None", "in simulation \"\"\" def __init__(self, filename): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must", "self.__hdf5.get_data(key) * factor else: data = self.__hdf5.get_data(key) return data class ASCIIReader(object): \"\"\" Class", "= [] lower_nts = 0 max_ts = self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] == 3]", "1 else: if lower_nts < row['end-ts'] <= upper_nts: ncol += 1 ts.append(upper_nts) ncols.append(ncol)", "\"\"\" The Colloid_output module contains classes to read LB Colloids simulation outputs and", "1d charts :param **kwargs: matplotlib keyword arguments for 1d charts \"\"\" if time:", "i not in ('\\t', '', ' ', '\\n')] else: t.append([self.__try_float(i.rstrip()) for i in", "* t eq1 = np.sqrt(4 * D * R * t) x =", "raise FileTypeError('<>.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution", "in ('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}: not in supported filetypes\".format(filename)) else: self.read_header(filename) self.df", "@staticmethod def __try_float(val): try: return float(val) except ValueError: return float('nan') class Hdf5Reader(object): \"\"\"", "index, row in bt_colloids.iterrows(): lower_ts = row['end-ts'] - self.continuous upper_ts = row['end-ts'] t", "plot_histogram(self, nbin=10, width=0.01, *args, **kwargs): \"\"\" User method to plot a histogram of", "- v * t eq1 = np.sqrt(4 * D * R * t)", "vars, A, L, t, v): \"\"\" Equation for Jury 1991 calculation of Dispersivity", "model resolution :ivar timestep: (float): model timestep :ivar continuous: (int): interval of continuous", "self.__hdf.get_data('image') xx = np.ma.masked_where(img == 1, a=xx) yy = np.ma.masked_where(img == 1, a=yy)", "nbin self.__normalize = normalize ts = [] ncols = [] lower_nts = 0", ":ivar continuous: (int): interval of continuous release, 0 means pulse :ivar ncol: (float):", "= self.__hdf.get_data(key) if masked: arr = np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr, *args, **kwargs)", "raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def", "matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if dimensional: x =", "Returns: ------- :return: data \"\"\" if key in (\"velocity_x\", \"velocity_y\"): factor = self.__hdf5.get_data(\"conversion_factor\")", "\"\"\" hdf = H.File(self.file_name, 'r') data = hdf[path][()] hdf.close() return data class FileTypeError(Exception):", "* 100 def plot(self, *args, **kwargs): \"\"\" Method to plot distribution of velocities", "on key, instead of data path Parameters: ---------- :param str key: lattice boltzmann", "to read in HDF5 stored outputs from colloid models. Contains a data_paths dictionary", "**kwargs) else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) else: if self.__normalize:", "\"\"\" return pdf - self.__jury_1991(vars, A, L, t, v) def __jury_1991(self, vars, A,", "'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else:", "* pv_factor * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) class ADE(object): \"\"\" Class", ":param int nbin: number of timesteps to bin a pdf for calculation \"\"\"", "self.ncol = 0 self.resolution = 0 self.xlen = 0 self.ylen = 0 self.ux", "self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs): \"\"\" Method to plot data", "= np.recarray((len(ts),), dtype=[('nts', np.float), ('ncol', np.float)]) for idx, value in enumerate(ts): arr[idx] =", "dispersion equation parameters for field scale model parameterization Class needs to be re-named", "minimum colloid velocity \"\"\" return self.velocity['velocity'].min() @property def mean(self): \"\"\" :return: mean colloid", "**kwargs) elif key in ('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}: key not valid for", "= reader.timestep self.resolution = reader.resolution self.ylen = reader.ylen self.ncol = reader.ncol self.total_ncol =", "= upper_v - adjuster velocity.append(upper_v + adjuster) ncols.append(0) plt.bar(velocity, ncols, width, *args, **kwargs)", "self.velocity['velocity'].max() @property def min(self): \"\"\" :return: minimum colloid velocity \"\"\" return self.velocity['velocity'].min() @property", "bin on :param bool normalize: method to calculate pdf by residence time or", "None seq = False bt = False for idx, rec in enumerate(self.pdf): if", "== 1, a=yy) x = np.ma.masked_where(img == 1, a=x) y = np.ma.masked_where(img ==", "not in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a", "in enumerate(self.pdf): if not bt: if rec['ncol'] != 0: bt = True else:", "number of timesteps to bin a pdf for calculation :param bool normalize: flag", "[] nts = [] ncol = 0 ncol_per_release = [] for index, row", "\"\"\" Method to return colloid velocity and statistics relating to colloid velocity for", "'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'}", "eq3 = 4 * R * D * t x = (eq0 /", "Method to return data by hdf5 path Parameters: ---------- :param str path: valid", "x def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to", "plot_mesh(self, key, ax=None, *args, **kwargs): \"\"\" Plotting method for 2d representation of colloid-colloid", "'x-position': np.float, 'y-position': np.float, 'x-model': np.float, 'y-model': np.float, 'start-ts': np.int, 'end-ts': np.int, 'delta-ts':", "keys. Use get_data_by_path') hdf = H.File(self.file_name, 'r') if key == 'lb_velocity_x': data =", ":return: data <varies> \"\"\" hdf = H.File(self.file_name, 'r') data = hdf[path][()] hdf.close() return", "x = x.T[center, center:] y = colcol.T[center, center:] elif key == \"col_col_fine_x\": x", "row['end-ts'] - self.continuous upper_ts = row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts']", "be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.ylen =", "0 self.ux = 0 self.uy = 0 self.velocity_factor = 1. self.continuous = 0", "axis=1) df = df.set_index('colloid') return df @staticmethod def __try_float(val): try: return float(val) except", "upper_ts = row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)] ncol", "= data[0] elif key == 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()]", "__van_genuchten_1986(self, vars, l, v, t): \"\"\" Equation for <NAME> and Winerega 1986 to", "width. :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" adjuster", "max(reader.df['nts']) * self.timestep self.velocity = None self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built in method", ":param *args: matplotlib args for 1d charts :param **kwargs: matplotlib keyword arguments for", "= self.bt['nts'].as_matrix() * self.timestep bt = self.bt['ncpr'].as_matrix() / self.ncol x0 = np.array([D, R])", "variance of colloid velocities \"\"\" return np.var(self.velocity['velocity']) @property def stdev(self): \"\"\" :return: standard", "'distance_fine': 'colloids/distance_fine'} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf", "eq2 = -(R * L - v * t) ** 2 eq3 =", "ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least squares for van", "self.pdf['ncol'] / self.ncol, *args, **kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args,", "a matplotlib chart. Parameters: ---------- :param bool time: if true x-axis is time,", "= np.meshgrid(xx, yy) if masked: img = self.__hdf.get_data('image') xx = np.ma.masked_where(img == 1,", "not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep =", "if masked: arr = np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr, *args, **kwargs) else: arr", "plot to display the magnitude and direction of velocity vectors within the system.", "pathline) \"\"\" with open(filename) as f: for idx, line in enumerate(f): if line.startswith(\"Timestep\"):", "data = data[0] elif key == 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()]", "if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else:", "plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol']", "matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def plot_histogram(self, nbin=10, width=0.01, *args,", "/ float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to", "in enumerate(colloid): arr[idx] = tuple([value, velocity[idx]]) self.velocity = arr @property def max(self): \"\"\"", "np.float), ('ncol', np.float)]) for idx, value in enumerate(ts): arr[idx] = tuple([value, ncols[idx]]) self.pdf", "plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)]) class DistributionFunction(object): \"\"\" Class to plot a", "self.ncol x0 = np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t, bt), ftol=ftol,", "== \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x = x[center, center:] # * 1e-6 y", "ylen v: (float) mean fluid_velocity t: (float) time \"\"\" D = vars[0] R", "method to solve least sqares for jury 1991. Pulse flux. Parameters: ---------- :param", "hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.file_name", "str filename: <>.endpoint file name :param int nbin: number of bins for pdf", "0 self.__header = [] if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}:", "in enumerate(f): if idx < self.__data_startline: pass elif idx == self.__data_startline: self.__header =", "ax.imshow(arr, *args, **kwargs) else: arr = self.__hdf.get_data(key) if masked: img = self.__hdf.get_data(\"image\") arr", "class LBOutput(object): \"\"\" Class to anaylze LB fluid/solid properties Parameters: ---------- :param str", ":ivar total_ncol: (int): total number of colloids in simulation :ivar pdf: (np.recarray) colloid", "---------- :param float D: Diffusivity initial guess. Cannot be 0 :param float R:", "key: lattice boltzmann data key Returns: ------- :return: data \"\"\" if key in", "= ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.ylen = reader.ylen self.ncol =", "max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least squares for van genuchten", "for axis options mesh = None if ax is None: ax = plt.gca()", "of function iterations :param **kwargs: scipy least squares kwargs Returns: ------- :return: scipy", "hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property", "= 0 self.ux = 0 self.uy = 0 self.velocity_factor = 1. self.continuous =", "timestep :ivar continuous: (int): interval of continuous release, 0 means pulse :ivar ncol:", "'pathline'): raise FileTypeError(\"{}: not in supported filetypes\".format(filename)) else: self.read_header(filename) self.df = self.read_ascii(filename) def", "/ float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1])", "t = [] for idx, line in enumerate(f): if idx < self.__data_startline: pass", "self.keys Returns: ------- :return: data <varies> \"\"\" if key not in Hdf5Reader.data_paths: raise", "+ 1, nbin): ncol = 0 for index, row in pdf_colloids.iterrows(): if normalize:", "eq0 = R * l - v * t eq1 = np.sqrt(4 *", "raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf = Hdf5Reader(hdf5) @property def", "= [] if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}: not in", "needs to be rebuilt to work with timeseries and pathline files for a", "1 break else: pass def read_ascii(self, filename): \"\"\" Method to read endpoint file", "def __init__(self, filename): if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must be supplied') reader", "from colloid model :param int nbin: number of timesteps to bin a pdf", "np.linspace(self.min - adjuster, self.max, nbin) ncols = [] velocity = [] lower_v =", "LBOutput.data_paths.keys() def get_data(self, key): \"\"\" Method to select data from hdf5 file based", "KeyError(\"{} is not a valid key\".format(key)) if key == 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x'))", "**kwargs): \"\"\" Plotting method for 2d representation of colloid-colloid dlvo profiles. Parameters: ----------", "boltzmann data keys \"\"\" return LBOutput.data_paths.keys() def get_data(self, key): \"\"\" Method to select", "args :param **kwargs: matplotlib plotting kwargs \"\"\" adjuster = 0.00001 bins = np.linspace(self.min", ">>> >>> hdf = \"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf) >>> # model plot", "*args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args,", "example import of the Colloid_output.py module is as follows >>> from lb_colloids import", ":param **kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] *", "key = \"lb_{}\".format(key) data = self.__hdf5.get_data(key) * factor else: data = self.__hdf5.get_data(key) return", "colloid models. Contains a data_paths dictionary which allows the user to use keys", "line.split() self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'): t = line.split() self.xlen = float(t[-1].rstrip()) elif", "model timestep :ivar continuous: (int): interval of continuous release, 0 means pulse :ivar", "scipy least squares kwargs Returns: ------- :return: scipy least squares dictionary. Answer in", "raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution", "ncol_per_release = [] for index, row in bt_colloids.iterrows(): lower_ts = row['end-ts'] - self.continuous", "* self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution) return pv_factor def plot(self, time=True, *args, **kwargs):", "= hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def get_data_by_path(self, path): \"\"\"", "self.resolution v = self.uy t = self.bt['nts'].as_matrix() * self.timestep bt = self.bt['ncpr'].as_matrix() /", "self.__hdf5.get_data(key) # find center and set to nearby value to prevent log scale", "arr = self.__hdf.get_data(key) if masked: arr = np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr, *args,", "\"\"\" from scipy import special D = vars[0] R = vars[1] eq0 =", "* self.timestep bt = self.bt['ncpr'].as_matrix() / self.ncol x0 = np.array([D, R]) return least_squares(self.__van_genuchten_residuals,", "resolution :ivar timestep: (float): model timestep :ivar continuous: (int): interval of continuous release,", "think about this one. Does it belong here? Finish class. Integrate into LB", "<= upper_nts: ncol += 1 else: if lower_nts < row['end-ts'] <= upper_nts: ncol", "on changing the pdf bin values Parameters: ---------- :param int nbin: number of", "colloid velocity \"\"\" return self.velocity['velocity'].min() @property def mean(self): \"\"\" :return: mean colloid velocity", "total_ncol: (int): total number of colloids in simulation \"\"\" def __init__(self, filename): if", "bar width. :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\"", "raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename) self.df = reader.df self.resolution", ":param object ax: matplotlib pyplot axes object (optional) :param *args: matplotlib plotting args", "temp = np.array(t).T temp = {self.__header[idx]: data for idx, data in enumerate(temp)} df", "---------- :param str filename: endpoint file name \"\"\" def __init__(self, filename): if not", "into LB class LBOutput(object): \"\"\" Class to anaylze LB fluid/solid properties Parameters: ----------", "strip_idx is not None: pdf = self.pdf['ncol'][:strip_idx + 1] time = self.pdf['nts'][:strip_idx +", "data[0] elif key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x',", "plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\"", "= float(t[-1].rstrip()) elif line.startswith('ux'): t = line.split() self.ux = float(t[-1].rstrip()) elif line.startswith('uy'): t", "\"\"\" pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen * self.resolution) return pv_factor def", "= Hdf5Reader(hdf5) @property def keys(self): \"\"\" :return: Lattice boltzmann data keys \"\"\" return", "key: valid data key :param object ax: matplotlib axes object (optional) :param *args:", "not a valid key\".format(key)) colcol = self.__hdf5.get_data(key) shape = colcol.shape center = shape[0]", "args and kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\" import numpy as np", "return least_squares(self.__van_genuchten_residuals, x0, args=(l, v, t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars,", "'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key", "return float('nan') class Hdf5Reader(object): \"\"\" Reader object to read in HDF5 stored outputs", "(np.array) [dispersivity, retardation] x: (float) column length v: (float) mean fluid velocity t:", "'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None,", "Class needs to be re-named and updated to CDE equation Parameters: ---------- :param", "to anaylze LB fluid/solid properties Parameters: ---------- :param str hdf: hdf5 output filename", "ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts - self.continuous)", "if idx < self.__data_startline: pass elif idx == self.__data_startline: self.__header = [i.rstrip() for", "breakthrough curve data \"\"\" max_ts = self.df['nts'].max() if self.__breakthrough_curve is None: if not", "else: return ax def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args, **kwargs): \"\"\" Method to", "for index, row in bt_colloids.iterrows(): lower_ts = row['end-ts'] - self.continuous upper_ts = row['end-ts']", "key): \"\"\" Get data method to view and analyze colloid force arrays Parameters:", "@property def keys(self): \"\"\" Property method to return valid keys to obtain data", "pdf, time class ModelPlot(object): \"\"\" Class to retrieve Colloid force arrays and plot", "self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key in ('conversion_factor', 'gravity',", "yy = np.meshgrid(xx, yy) if masked: img = self.__hdf.get_data('image') xx = np.ma.masked_where(img ==", "self.__breakthrough_curve is None: if not self.continuous: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids =", "not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be supplied') self.__hdf = Hdf5Reader(hdf5)", "self.continuous upper_ts = row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)]", "Method to create a quiver plot to display the magnitude and direction of", "t = line.split() self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t = line.split() self.ncol =", "log scale crashing shape = mesh.shape center = shape[0] // 2 mesh[center, center]", "img = self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img == 1, a=arr) mesh = ax.imshow(arr, *args,", "y = colcol.T[center, center:] elif key == \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x =", "self.ylen = reader.ylen self.df = reader.df self.ncol = reader.df.shape[0] self.max_time = max(reader.df['nts']) *", "return self.__hdf.keys def get_data(self, key): \"\"\" Get data method to view and analyze", "raise FileTypeError(\"{}: not in supported filetypes\".format(filename)) else: self.read_header(filename) self.df = self.read_ascii(filename) def read_header(self,", "Parameters: ---------- :param int nbin: number of time steps to base bin on", "release in simulation :ivar total_ncol: (int): total number of colloids in simulation :ivar", "1, nbin): ncol = 0 for index, row in pdf_colloids.iterrows(): if normalize: if", "end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10,", "provide plotting functionality. ModelPlot and CCModelPlot are useful for visualizing colloid-surface forces and", "is nts :param *args: matplotlib args for 1d charts :param **kwargs: matplotlib keyword", "in Hdf5Reader.data_paths: raise KeyError('Dictionary key not in valid keys. Use get_data_by_path') hdf =", "True else: pass else: if rec['ncol'] == 0: if not seq: strip_idx =", "'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y', 'velocity_x': 'colloids/ux', 'velocity_y': 'colloids/uy', 'gravity': 'colloids/gravity', 'bouyancy':", "str hdf: hdf5 output filename \"\"\" data_paths = {'velocity_x': None, 'velocity_y': None, 'lb_velocity_x':", "kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\" import numpy as np import matplotlib.pyplot", "= self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key in ('conversion_factor',", "+ 1] xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1)) if mesh.max()/mesh.min()", "FileTypeError('<>.endpoint file must be supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution =", "return valid keys to obtain data \"\"\" return CCModelPlot.keys def get_data(self, key): \"\"\"", "bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol = 0 ncol_per_release = []", "Parameters: ---------- :param int nbin: number of specific bins for plotting :param float", "displacement. Parameters: ---------- :param float D: Diffusivity initial guess. Cannot be 0 :param", "= 0 self.ncol = 0 self.resolution = 0 self.xlen = 0 self.ylen =", "Returns: pdf = (np.array) stripped pdf t = (np.array) times \"\"\" strip_idx =", "= True else: pass else: if rec['ncol'] == 0: if not seq: strip_idx", "def __prep_data(self): \"\"\" Prepares breakthrough data by stripping off trailing zeros. Returns: pdf", "v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars, A, L, t, v, pdf):", "data \"\"\" return CCModelPlot.keys def get_data(self, key): \"\"\" Method to return data by", "row in bt_colloids.iterrows(): lower_ts = row['end-ts'] - self.continuous upper_ts = row['end-ts'] t =", "args :param **kwargs: matplotlib plotting kwargs \"\"\" # todo: create a function_fmt for", "self.continuous = 0 self.__data_startline = 0 self.__header = [] if filename.split('.')[-1] not in", "Cannot be 0 :param float R: Retardation initial guess. Cannot be 0 :param", "self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous self.ncol = float(reader.ncol) self.total_ncol", "ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx + ccy else: mesh", "\"\"\" adjuster = 0.00001 bins = np.linspace(self.min - adjuster, self.max, nbin) ncols =", "ncol l: (float) ylen v: (float) mean fluid_velocity t: (float) time \"\"\" D", "= df else: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols =", "name \"\"\" data_paths = {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x':", "in bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts': nts,", "pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars, A, L, t, v, pdf): \"\"\"", "data <varies> \"\"\" hdf = H.File(self.file_name, 'r') data = hdf[path][()] hdf.close() return data", "Scipy optimize method to solve least sqares for jury 1991. Pulse flux. Parameters:", "retardation] A: ncol l: (float) ylen v: (float) mean fluid_velocity t: (float) time", "ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.xlen = reader.xlen self.ylen = reader.ylen", "= df.set_index('colloid') return df @staticmethod def __try_float(val): try: return float(val) except ValueError: return", "pv_factor * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts'] *", "'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine':", "-1, *args, **kwargs) def plot_mesh(self, key, ax=None, *args, **kwargs): \"\"\" Plotting method for", "np.int, 'delta-ts': np.int, 'continuous': np.int} def __init__(self, filename): self.timestep = 0 self.ncol =", "self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" Property method to return valid keys", "\"\"\" Equation for <NAME> and Winerega 1986 to calculate Dispersivity and Retardation from", ":param int nbin: number of bins for pdf calculation Attributes: ---------- :ivar df:", "= 2 * np.sqrt(np.pi * D * t ** 3) eq2 = -(R", "get_data_by_path(self, path): \"\"\" Method to return data by hdf5 path Parameters: ---------- :param", "{'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y':", "self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False): \"\"\" User method to reset values based on", "center:] else: x = self.__hdf5.get_data('distance_fine_y') x = x[center, center:] # * 1e-6 y", "\"\"\" Method to estimate residuals from vanGenuchten and Winerega 1986 Parameters: vars: (np.array)", "LogNorm if ax is None: ax = plt.gca() if key not in ('col_col',", "__init__(self, filename): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader =", "= reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous # todo: replace this call", "ax=None, masked=False, *args, **kwargs): \"\"\" Hdf array plotting using Hdf5Reader keys Parameters: ----------", "**kwargs) def __van_genuchten_residuals(self, vars, l, v, t, bt): \"\"\" Method to estimate residuals", "key from self.keys Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data(key) def get_data_by_path(self,", "in supported filetypes\".format(filename)) else: self.read_header(filename) self.df = self.read_ascii(filename) def read_header(self, filename): \"\"\" Method", "self.keys Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\"", "* self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep,", "\"\"\" Class to read in text based output files <endpoint, timestep, pathline> to", "keyword arguments for 1d charts \"\"\" if time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep,", "center] = mesh[center, center + 1] xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1]", "str hdf5: hdf5 file name \"\"\" def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\", "---------- :param int nbin: number of timesteps to bin a pdf for calculation", ":param bool time: if true x-axis is time, false is nts :param *args:", "max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least sqares for jury 1991.", "variance of colloid velocities \"\"\" return (self.stdev / self.mean) * 100 def plot(self,", "np.arange(0, x.shape[0]) xx, yy = np.meshgrid(xx, yy) if masked: img = self.__hdf.get_data('image') xx", "key not in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not", "def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by specific path Parameters:", "= line.split() self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'): t = line.split() self.xlen = float(t[-1].rstrip())", "length v: (float) mean fluid velocity t: (float) time \"\"\" from scipy import", "or hdf5 file must be supplied') self.__hdf = Hdf5Reader(hdf5) @property def keys(self): return", "enumerate(temp)} df = pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid') return df", "estimate residuals from vanGenuchten and Winerega 1986 Parameters: vars: (np.array) [dispersivity, retardation] x:", "lower_ts = row['end-ts'] - self.continuous upper_ts = row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts)", "supplied') reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.ylen = reader.ylen", "t = line.split() self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'): t = line.split() self.resolution =", "self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'): t = line.split() self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10):", "least squares for van genuchten 1986. Miscable displacement. Parameters: ---------- :param float D:", "def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5", "key): \"\"\" Method to select data from hdf5 file based on key, instead", "velocities \"\"\" return np.std(self.velocity['velocity']) @property def cv(self): \"\"\" :return: coeficient of variance of", "\"\"\" with open(filename) as f: for idx, line in enumerate(f): if line.startswith(\"Timestep\"): t", "self.resolution = reader.resolution self.xlen = reader.xlen self.ylen = reader.ylen self.df = reader.df self.ncol", "module is as follows >>> from lb_colloids import ColloidOutput >>> import matplotlib.pyplot as", "maximum number of function iterations :param **kwargs: scipy least squares kwargs Returns: -------", "self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) / (row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position'] *", "data_paths dictionary which allows the user to use keys to access data Parameters:", "hdf5 file must be supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" :return:", "self.pdf['ncol'] / self.ncol, *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method", "reader.timestep self.resolution = reader.resolution self.xlen = reader.xlen self.ylen = reader.ylen self.df = reader.df", "'\\n')]) temp = np.array(t).T temp = {self.__header[idx]: data for idx, data in enumerate(temp)}", "'porosity': None, 'pore_diameter': None, 'conversion_factor': None, 'reynolds_number': None} def __init__(self, hdf5): if not", "vars[0] R = vars[1] eq0 = R * l - v * t", "hdf5 file name \"\"\" data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x':", "sophisticated than standard ModelPlot Parameters: ---------- :param str hdf5: hdf5 file name \"\"\"", "ncol += 1 velocity.append((lower_v + upper_v)/2.) ncols.append(ncol) lower_v = upper_v - adjuster velocity.append(upper_v", "str key: lattice boltzmann data key Returns: ------- :return: data \"\"\" if key", ">>> # model plot accepts matplotlib args and kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>>", "def reset_pdf(self, nbin, normalize=False): \"\"\" Method to generate a probability distribution function based", "t: (float) time bt: (np.array) breakthrough curve \"\"\" return bt - self.__van_genuchten_1986(vars, l,", "= (np.array) stripped pdf t = (np.array) times \"\"\" strip_idx = None seq", "+ ccy else: mesh = self.__hdf5.get_data(key) # find center and set to nearby", "float(t[-1].rstrip()) elif line.startswith('xlen'): t = line.split() self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'): t =", "t = line.split() self.xlen = float(t[-1].rstrip()) elif line.startswith('ylen'): t = line.split() self.ylen =", "data Parameters vars: (np.array) [dispersivity, retardation] A: ncol l: (float) ylen v: (float)", "object More sophisticated than standard ModelPlot Parameters: ---------- :param str hdf5: hdf5 file", "breakthrough from endpoint files. Parameters: ---------- :param str filename: <>.endpoint file name :param", "---------- :param int nbin: number of specific bins for plotting :param float width:", "residence time or end time \"\"\" self.bin = nbin self.__normalize = normalize ts", "method that performs a dynamic calculation of breakthrough curve data \"\"\" max_ts =", "a function_fmt for axis options mesh = None if ax is None: ax", "* self.resolution) / (row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position'] * self.resolution) / (row['nts'] *", "return CCModelPlot.keys def get_data(self, key): \"\"\" Method to return data by key Parameters:", "flux. Parameters: ---------- :param float D: Diffusivity initial guess. Cannot be 0 :param", "system. Parameters: ---------- :param int nbin: refinement for quiver plotting :param *args: matplotlib", "if not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5 file must be", "nbin: number of timesteps to bin a pdf for calculation :param bool normalize:", "= ax.imshow(arr, *args, **kwargs) if mesh is not None: return mesh else: return", "= np.ma.masked_where(img == 1, a=y) Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin],", "ccx + ccy elif key == 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y'))", "0, a=arr) ax.imshow(arr, *args, **kwargs) else: arr = self.__hdf.get_data(key) if masked: img =", "if self.__breakthrough_curve is None: if not self.continuous: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids", "# todo: replace this call with something from the header later! self.ncol =", "for calculation \"\"\" def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file", "matplotlib plotting kwargs \"\"\" # todo: create a function_fmt for axis options mesh", "v) def __jury_1991(self, vars, A, L, t, v): \"\"\" Equation for Jury 1991", "bt_colloids = bt_colloids.sort_values('end-ts') ncols = [] nts = [] ncol = 0 for", "- self.continuous upper_ts = row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <=", "to base bin on :param bool normalize: method to calculate pdf by residence", "not valid for plotting'.format(key)) elif key in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine')", "path: valid HDF5 data path \"\"\" return self.__hdf5.get_data_by_path(path) def plot(self, key, *args, **kwargs):", "file name \"\"\" data_paths = {'ac': \"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray',", "= None seq = False bt = False for idx, rec in enumerate(self.pdf):", "pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid') return df @staticmethod def __try_float(val):", "args :param **kwargs: matplotlib plotting kwargs \"\"\" from matplotlib.colors import LogNorm if ax", "pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume calculation conversion for plotting colloids.", "self.pdf = self.__dist_func.pdf def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize", "self.__van_genuchten_1986(vars, l, v, t) def __van_genuchten_1986(self, vars, l, v, t): \"\"\" Equation for", "x = self.__hdf5.get_data('distance_x') x = x[center, center:] y = colcol[center, center:] elif key", "self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'], self.breakthrough_curve.index.values /", "y = colcol[center, center:] plt.plot(x, y * -1, *args, **kwargs) def plot_mesh(self, key,", "'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) if key == 'col_col': ccx", "__init__(self, hdf5): if not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5 file", "return p class ColloidVelocity(object): \"\"\" Method to return colloid velocity and statistics relating", "pandas dataframe Parameters: ---------- :param str filename: output filename (ie. endpoint, timestep, or", "None self.__reader = reader @property def breakthrough_curve(self): \"\"\" Property method that performs a", "('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array')", "= plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class", "df = df.set_index('colloid') return df @staticmethod def __try_float(val): try: return float(val) except ValueError:", "'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) if key ==", "= reader def reset_pdf(self, nbin, normalize=False): \"\"\" Method to generate a probability distribution", "self.min - adjuster upper_v = 0 for upper_v in bins: ncol = 0", ":param str path: hdf5 directory path to data Returns: ------- :return: data <varies>", "lb_colloids import ColloidOutput >>> import matplotlib.pyplot as plt >>> >>> hdf = \"mymodel.hdf5\"", "arr = np.ma.masked_where(arr == 0, a=arr) ax.imshow(arr, *args, **kwargs) else: arr = self.__hdf.get_data(key)", "import matplotlib.pyplot as plt >>> >>> hdf = \"mymodel.hdf5\" >>> mp = ColloidOutput.ModelPlot(hdf)", "------- :return: data <varies> \"\"\" return self.__hdf.get_data_by_path(path) def plot(self, key, ax=None, masked=False, *args,", "hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif key == 'dlvo_fine':", "\"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs) def plot_histogram(self, nbin=10, width=0.01, *args, **kwargs): \"\"\" User", "filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must be supplied') reader =", "elif line.startswith('velocity_factor'): t = line.split() self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'): t = line.split()", "strip_idx = idx seq = True else: pass else: seq = False strip_idx", "**kwargs: matplotlib plotting kwargs \"\"\" # todo: create a function_fmt for axis options", ":return: variance of colloid velocities \"\"\" return np.var(self.velocity['velocity']) @property def stdev(self): \"\"\" :return:", "2 * np.sqrt(np.pi * D * t ** 3) eq2 = -(R *", "eq0 = (A * L * np.sqrt(R)) eq1 = 2 * np.sqrt(np.pi *", "\"\"\" Hdf array plotting using Hdf5Reader keys Parameters: ---------- :param str key: valid", "key\".format(key)) if key == 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh =", "self.resolution = 0 self.xlen = 0 self.ylen = 0 self.ux = 0 self.uy", "* np.exp(eq2 / eq3) x[0] = 0 return x def solve_van_genuchten_1986(self, D=0.01, R=0.01,", "list of valid hdf5 data keys \"\"\" return [i for i in Hdf5Reader.data_paths]", "**kwargs) def plot_histogram(self, nbin=10, width=0.01, *args, **kwargs): \"\"\" User method to plot a", "if key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'):", "plot data into a matplotlib chart. Parameters: ---------- :param bool time: if true", "ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0] / 2. ax.plot([center], [center], 'ko') return", "path Parameters: ---------- :param str path: hdf5 directory path to data Returns: -------", ":param bool normalize: flag to calculate pdf by residence time or end time", "= self.ylen * self.resolution v = self.uy t = self.bt['nts'].as_matrix() * self.timestep bt", "precise velocity measurement Parameters: ---------- :param str filename: endpoint file name \"\"\" def", "if time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else:", "bt = self.bt['ncpr'].as_matrix() / self.ncol x0 = np.array([D, R]) return least_squares(self.__van_genuchten_residuals, x0, args=(l,", "**kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot pdf data", "[] lower_v = self.min - adjuster upper_v = 0 for upper_v in bins:", "path \"\"\" return self.__hdf5.get_data_by_path(path) def plot(self, key, *args, **kwargs): \"\"\" Plotting method for", "'bouyancy'): raise KeyError('{}: key not valid for plotting'.format(key)) elif key in ('dlvo_fine', 'edl_fine',", "reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous self.ncol = float(reader.ncol) self.total_ncol = float(self.df.shape[0])", "times \"\"\" strip_idx = None seq = False bt = False for idx,", "self.pdf['nts'] return pdf, time class ModelPlot(object): \"\"\" Class to retrieve Colloid force arrays", "Class needs to be rebuilt to work with timeseries and pathline files for", "self.uy = reader.uy self.pdf = None self.__dist_func = DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve", "max_ts = self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] == 3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts", "**kwargs: matplotlib kwargs for 1d plotting \"\"\" pv_factor = self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor", "files for LB-Colloids Parameters: ---------- :param str filename: colloid model output filename (ie.", "return np.var(self.velocity['velocity']) @property def stdev(self): \"\"\" :return: standard deviation of colloid velocities \"\"\"", "\"\"\" def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or", "references for clearer examples! from scipy.optimize import leastsq, minimize, least_squares a = self.ncol", "return pdf - self.__jury_1991(vars, A, L, t, v) def __jury_1991(self, vars, A, L,", "if lower_nts < row['end-ts'] <= upper_nts: ncol += 1 ts.append(upper_nts) ncols.append(ncol) lower_nts =", "'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self, hdf5): if not hdf5.endswith('hdf')", "import LogNorm if ax is None: ax = plt.gca() if key not in", "for idx, value in enumerate(ts): arr[idx] = tuple([value, ncols[idx]]) self.pdf = arr def", "ts = [] ncols = [] lower_nts = 0 max_ts = self.df['nts'].max() pdf_colloids", "strip_idx = None if strip_idx is not None: pdf = self.pdf['ncol'][:strip_idx + 1]", "Breakthrough(object): \"\"\" Class to prepare and plot breakthrough curve data from endpoint files.", "nbin=10, dimensional=True, masked=False, *args, **kwargs): \"\"\" Method to create a quiver plot to", "of colloids in simulation \"\"\" def __init__(self, filename): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint", "key not valid for plotting'.format(key)) elif key in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis =", "np.sqrt(4 * D * R * t) x = 0.5 * special.erfc(eq0/eq1) if", "matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" plt.scatter(self.velocity['colloid'], self.velocity['velocity'], *args, **kwargs)", "Method to read endpoint file data from from ascii files for LB-Colloids Sets", "colcol[center, center:] elif key == \"col_col_y\": x = self.__hdf5.get_data('distance_y') x = x.T[center, center:]", "'nts': np.int, 'x-position': np.float, 'y-position': np.float, 'x-model': np.float, 'y-model': np.float, 'start-ts': np.int, 'end-ts':", "masked: img = self.__hdf.get_data('image') xx = np.ma.masked_where(img == 1, a=xx) yy = np.ma.masked_where(img", "\"\"\" colloid = [] velocity = [] for index, row in self.df.iterrows(): if", "retrieve hdf5 data by specific hdf5 path Parameters: ---------- :param str path: hdf5", "def __init__(self, filename): self.timestep = 0 self.ncol = 0 self.resolution = 0 self.xlen", "pv_factor = self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol),", "self.resolution) / (row['nts'] * self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)])", "file Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint data :ivar resolution:", "key == 'col_col': ccx = np.abs(self.__hdf5.get_data('col_col_x')) ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx +", "special D = vars[0] R = vars[1] eq0 = R * l -", "LB-Colloids Sets data to pandas dataframe Parameters: ---------- :param str filename: colloid model", "jury 1991 equation using data Parameters vars: (np.array) [dispersivity, retardation] A: ncol l:", "f: t = [] for idx, line in enumerate(f): if idx < self.__data_startline:", "= kwargs.pop('vmax') p = ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else: p", "plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol']", ":return: maximum colloid velocity \"\"\" return self.velocity['velocity'].max() @property def min(self): \"\"\" :return: minimum", "---------- :param int nbin: refinement for quiver plotting :param *args: matplotlib plotting args", "and updated to CDE equation Parameters: ---------- :param str filename: ascii output file", ":param str key: valid model key \"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\"", "2 if key == \"<KEY>\": x = self.__hdf5.get_data('distance_x') x = x[center, center:] y", "*args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" adjuster = 0.00001", "**kwargs: matplotlib plotting kwargs \"\"\" adjuster = 0.00001 bins = np.linspace(self.min - adjuster,", "def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by specific hdf5 path", "1] else: pdf = self.pdf['ncol'] time = self.pdf['nts'] return pdf, time class ModelPlot(object):", "release in simulation :ivar total_ncol: (int): total number of colloids in simulation \"\"\"", "\"\"\" D = vars[0] R = vars[1] eq0 = (A * L *", "output file name from colloid model :param int nbin: number of timesteps to", ">>> mp = ColloidOutput.ModelPlot(hdf) >>> # model plot accepts matplotlib args and kwargs!!!", "valid keys. Use get_data_by_path') hdf = H.File(self.file_name, 'r') if key == 'lb_velocity_x': data", "a quiver plot to display the magnitude and direction of velocity vectors within", "\"\"\" return np.var(self.velocity['velocity']) @property def stdev(self): \"\"\" :return: standard deviation of colloid velocities", "= (np.array) times \"\"\" strip_idx = None seq = False bt = False", "0.00001 bins = np.linspace(self.min - adjuster, self.max, nbin) ncols = [] velocity =", "None, 'reynolds_number': None} def __init__(self, hdf5): if not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise", "@property def keys(self): \"\"\" :return: Lattice boltzmann data keys \"\"\" return LBOutput.data_paths.keys() def", "FileTypeError('hdf or hdf5 file must be supplied') self.__hdf = Hdf5Reader(hdf5) @property def keys(self):", "None, 'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution': None, 'porosity': None, 'pore_diameter': None,", "select data from hdf5 file based on key, instead of data path Parameters:", "'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None, 'col_col_x': 'colloid_colloid/x', 'col_col_y': 'colloid_colloid/y', 'col_col': None,", "(float) ylen v: (float) mean fluid_velocity t: (float) time \"\"\" D = vars[0]", "---------- :param str hdf5: hdf5 file name \"\"\" def __init__(self, hdf5): if not", "\"\"\" strip_idx = None seq = False bt = False for idx, rec", "strip_idx = None seq = False bt = False for idx, rec in", "key: valid dictionary key from self.keys :param object ax: matplotlib pyplot axes object", "args=(a, l, t, v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars, A, L,", "continuous: (int): interval of continuous release, 0 means pulse :ivar ncol: (float): number", "Parameters: ---------- :param str filename: <>.endpoint file name :param int nbin: number of", "self.continuous = int(t[-1].rstrip()) elif line.startswith(\"#\"*10): self.__data_startline = idx + 1 break else: pass", "= self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key == \"image\": arr = self.__hdf.get_data(key)", "return mesh else: return ax def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args, **kwargs): \"\"\"", "\"\"\" return CCModelPlot.keys def get_data(self, key): \"\"\" Method to return data by key", "elif key == 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ #", "def plot(self, *args, **kwargs): \"\"\" Method to plot distribution of velocities by colloid", "\"\"\" if dimensional: x = self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x')", "= reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy = reader.uy self.pdf = None self.__dist_func =", "= self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False): \"\"\" User method to reset values based", "'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y',", "fluid/solid properties Parameters: ---------- :param str hdf: hdf5 output filename \"\"\" data_paths =", "if line.startswith(\"Timestep\"): t = line.split() self.timestep = float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t = line.split()", "int nbin: number of timesteps to bin a pdf for calculation :param bool", "key == \"col_col_y\": x = self.__hdf5.get_data('distance_y') x = x.T[center, center:] y = colcol.T[center,", "kwargs: vamx = kwargs.pop('vmax') p = ax.pcolormesh(xx, yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs)", "'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x',", "= bt_colloids.loc[(bt_colloids['end-ts'] >= lower_ts) & (bt_colloids['end-ts'] <= upper_ts)] ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t))", "ncol: (float): number of colloids per release in simulation :ivar total_ncol: (int): total", "time=True, *args, **kwargs): \"\"\" Convience method to plot data into a matplotlib chart.", "functionality. ModelPlot and CCModelPlot are useful for visualizing colloid-surface forces and colloid-colloid forces", "normalize: if lower_nts < row['delta-ts'] <= upper_nts: ncol += 1 else: if lower_nts", "= 0 self.uy = 0 self.velocity_factor = 1. self.continuous = 0 self.__data_startline =", "and statistics relating to colloid velocity for a simulation. Class needs to be", "time or end time \"\"\" self.bin = nbin self.__normalize = normalize ts =", "str key: valid data key :param object ax: matplotlib axes object (optional) :param", "'flag': np.int, 'nts': np.int, 'x-position': np.float, 'y-position': np.float, 'x-model': np.float, 'y-model': np.float, 'start-ts':", "'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_x']][()] data =", "x = x[center, center:] # * 1e-6 y = colcol[center, center:] else: x", "nbin: number of timesteps to bin a pdf for calculation \"\"\" def __init__(self,", "None: return mesh else: return ax def plot_velocity_magnitude(self, nbin=10, dimensional=True, masked=False, *args, **kwargs):", "Does it belong here? Finish class. Integrate into LB class LBOutput(object): \"\"\" Class", "'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid key\".format(key)) if key == 'col_col':", "path): \"\"\" Method to retrieve hdf5 data by specific hdf5 path Parameters: ----------", "colloid-surface forces and colloid-colloid forces respectively. example import of the Colloid_output.py module is", "(float) time \"\"\" D = vars[0] R = vars[1] eq0 = (A *", "velocity \"\"\" return self.velocity['velocity'].mean() @property def var(self): \"\"\" :return: variance of colloid velocities", "def plot(self, time=True, *args, **kwargs): \"\"\" Convience method to plot data into a", "* t x = (eq0 / eq1) * np.exp(eq2 / eq3) x[0] =", "df else: bt_colloids = self.df.loc[self.df['flag'] == 3] bt_colloids = bt_colloids.sort_values('end-ts') ncols = []", "\"\"\" import numpy as np import matplotlib.pyplot as plt import pandas as pd", "of colloids per release in simulation :ivar total_ncol: (int): total number of colloids", "(float): number of colloids per release in simulation :ivar total_ncol: (int): total number", "header from ascii output files for LB-Colloids Parameters: ---------- :param str filename: colloid", "\"\"\" Method to read endpoint file data from from ascii files for LB-Colloids", "* self.timestep)) else: velocity.append((row['y-position'] * self.resolution) / (row['nts'] * self.timestep)) colloid.append(index) arr =", "= reader @property def breakthrough_curve(self): \"\"\" Property method that performs a dynamic calculation", "ncols = [] nts = [] ncol = 0 ncol_per_release = [] for", "if np.isnan(x[0]): x[0] = 0 return x def __prep_data(self): \"\"\" Prepares breakthrough data", "is time, false is nts :param *args: matplotlib args for 1d charts :param", "timestep, or pathline) \"\"\" dtypes = {'colloid': np.int, 'flag': np.int, 'nts': np.int, 'x-position':", "<varies> \"\"\" return self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data", "__get_velocity_array(self): \"\"\" Built in method to calculate the mean velocity of each colloid", "ncols, width, *args, **kwargs) # todo: think about this one. Does it belong", "np.ma.masked_where(img == 1, a=y) Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin,", "of endpoint data :ivar resolution: (float): model resolution :ivar timestep: (float): model timestep", "to return data by key Parameters: ---------- :param str key: valid model key", "plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if key not in ('col_col_x',", "var(self): \"\"\" :return: variance of colloid velocities \"\"\" return np.var(self.velocity['velocity']) @property def stdev(self):", "dictionary key from self.keys :param object ax: matplotlib pyplot axes object (optional) :param", "self.timestep = reader.timestep self.resolution = reader.resolution self.ylen = reader.ylen self.ncol = reader.ncol self.total_ncol", "in self.df.iterrows(): if np.isnan(row['y-position']): velocity.append((self.ylen * self.resolution) / (row['delta-ts'] * self.timestep)) else: velocity.append((row['y-position']", "*args, **kwargs): \"\"\" Convience method to plot data into a matplotlib chart. Parameters:", "= None self.__dist_func = DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf", "else: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'],", "def get_data_by_path(self, path): \"\"\" Method to return data by hdf5 path Parameters: ----------", "plotting \"\"\" pv_factor = self.pore_volume_conversion() if self.continuous: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve['ncpr']", "@property def keys(self): return self.__hdf.keys def get_data(self, key): \"\"\" Get data method to", "[] ncols = [] lower_nts = 0 max_ts = self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag']", "def __init__(self, filename, nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('<>.endpoint file must be supplied')", "by specific hdf5 path Parameters: ---------- :param str path: hdf5 directory path to", "calculation of Dispersivity and Retardation Parameters vars: (np.array) [dispersivity, retardation] A: ncol l:", "method! look up references for clearer examples! from scipy.optimize import leastsq, minimize, least_squares", "= None self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built in method to calculate the mean", "= self.__hdf.get_data('velocity_x') y = self.__hdf.get_data('velocity_y') else: x = self.__hdf.get_data('lb_velocity_x') y = self.__hdf.get_data('lb_velocity_y') xx", "self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t = line.split() self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'):", ":return: minimum colloid velocity \"\"\" return self.velocity['velocity'].min() @property def mean(self): \"\"\" :return: mean", ":param **kwargs: scipy least squares kwargs Returns: ------- :return: scipy least squares dictionary.", "'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y':", ":param bool normalize: method to calculate pdf by residence time or end time", "endpoint, timestep, or pathline) \"\"\" with open(filename) as f: for idx, line in", "colcol.shape center = shape[0] // 2 if key == \"<KEY>\": x = self.__hdf5.get_data('distance_x')", "= self.__hdf.get_data('lb_velocity_y') xx = np.arange(0, x.shape[1]) yy = np.arange(0, x.shape[0]) xx, yy =", "\"\"\" Property method to return valid keys to obtain data \"\"\" return CCModelPlot.keys", "path): \"\"\" Method to retrieve hdf5 data by specific path Parameters: ---------- :param", "1]) plt.xlim([0, max(self.breakthrough_curve['nts'] * pv_factor * self.timestep)]) class DistributionFunction(object): \"\"\" Class to plot", "end time \"\"\" self.bin = nbin self.__normalize = normalize ts = [] ncols", "qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0)", "str hdf5: hdf5 file name \"\"\" data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col':", "= float(t[-1].rstrip()) elif line.startswith('uy'): t = line.split() self.uy = float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t", "Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length v: (float) mean fluid", "data = self.__hdf5.get_data(key) * factor else: data = self.__hdf5.get_data(key) return data class ASCIIReader(object):", "this call with something from the header later! self.ncol = reader.ncol self.total_ncol =", "self.pore_volume_conversion() plt.plot(self.pdf['nts'] * pv_factor * self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) class ADE(object):", "= mesh[center, center + 1] xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] +", "velocity and statistics relating to colloid velocity for a simulation. Class needs to", "pdf = self.pdf['ncol'] time = self.pdf['nts'] return pdf, time class ModelPlot(object): \"\"\" Class", "\"\"\" Class to anaylze LB fluid/solid properties Parameters: ---------- :param str hdf: hdf5", "scipy.optimize import leastsq, minimize, least_squares a = self.ncol l = self.ylen * self.resolution", "Plotting method for 2d representation of colloid-colloid dlvo profiles. Parameters: ---------- :param str", "center = shape[0] // 2 mesh[center, center] = mesh[center, center + 1] xx,", "'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y', 'distance_array', 'edl_fine', 'attractive_fine', 'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data", "@property def min(self): \"\"\" :return: minimum colloid velocity \"\"\" return self.velocity['velocity'].min() @property def", "yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1)) if mesh.max()/mesh.min() > 10: vmin", "= False for idx, rec in enumerate(self.pdf): if not bt: if rec['ncol'] !=", "bins for plotting :param float width: matplotlib bar width. :param *args: matplotlib plotting", "str key: valid model key \"\"\" return self.__hdf5.get_data(key) def get_data_by_path(self, path): \"\"\" Method", "for index, row in pdf_colloids.iterrows(): if normalize: if lower_nts < row['delta-ts'] <= upper_nts:", "yy) if masked: img = self.__hdf.get_data('image') xx = np.ma.masked_where(img == 1, a=xx) yy", "get_data(self, key): \"\"\" Method to select data from hdf5 file based on key,", "to create a quiver plot to display the magnitude and direction of velocity", "* L - v * t) ** 2 eq3 = 4 * R", "[] for idx, line in enumerate(f): if idx < self.__data_startline: pass elif idx", "self.velocity = None self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built in method to calculate the", "< self.__data_startline: pass elif idx == self.__data_startline: self.__header = [i.rstrip() for i in", "hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data = data[0] elif key == 'dlvo_fine': data =", "if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file must be", "self.ux = 0 self.uy = 0 self.velocity_factor = 1. self.continuous = 0 self.__data_startline", "else: arr = self.__hdf.get_data(key) if masked: img = self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img ==", "data key :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\"", "'lewis_y', 'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key)", "min(self): \"\"\" :return: minimum colloid velocity \"\"\" return self.velocity['velocity'].min() @property def mean(self): \"\"\"", "'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y': data = hdf[Hdf5Reader.data_paths[key]][()][0] elif key", "def min(self): \"\"\" :return: minimum colloid velocity \"\"\" return self.velocity['velocity'].min() @property def mean(self):", "1, a=y) Q = plt.quiver(xx[::nbin, ::nbin], yy[::nbin, ::nbin], x[::nbin, ::nbin], y[::nbin, ::nbin], units='width',", "**kwargs): \"\"\" Convience method to plot data into a matplotlib chart. Parameters: ----------", "*args, **kwargs): \"\"\" Hdf array plotting using Hdf5Reader keys Parameters: ---------- :param str", "name \"\"\" data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x', 'distance_y':", "self.timestep, self.pdf['ncol'] / self.ncol, *args, **kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol,", "dtypes = {'colloid': np.int, 'flag': np.int, 'nts': np.int, 'x-position': np.float, 'y-position': np.float, 'x-model':", "('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}: not in supported filetypes\".format(filename)) else: self.read_header(filename) self.df =", "distribution function based upon user supplied bin size. Parameters: ---------- :param int nbin:", "squares for van genuchten 1986. Miscable displacement. Parameters: ---------- :param float D: Diffusivity", "self.pdf = None self.__dist_func = DistributionFunction(filename, nbin) self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self):", "reader = ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.ylen = reader.ylen self.ncol", "* pv_factor * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) plt.xlim([0, max(self.breakthrough_curve['nts']", "elif line.startswith(\"Ncols\"): t = line.split() self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'): t = line.split()", "self.__data_startline: pass elif idx == self.__data_startline: self.__header = [i.rstrip() for i in line.split()", "\"\"\" Prepares breakthrough data by stripping off trailing zeros. Returns: pdf = (np.array)", "based output files <endpoint, timestep, pathline> to a pandas dataframe Parameters: ---------- :param", "accepts matplotlib args and kwargs!!! >>> mp.plot('edl_x', cmap='viridis') >>> plt.show() \"\"\" import numpy", "arr, *args, **kwargs) elif key in ('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}: key not", "if not seq: strip_idx = idx seq = True else: pass else: seq", "in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y', 'col_col_fine_x', 'col_col_fine_y'): raise KeyError(\"{} is not a valid", "plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot breakthrough data with", "number of colloids per release in simulation :ivar total_ncol: (int): total number of", "data Parameters: ---------- :param str hdf5: LB-Colloid hdf5 file name \"\"\" data_paths =", "R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least squares for", "key == \"<KEY>\": x = self.__hdf5.get_data('distance_x') x = x[center, center:] y = colcol[center,", "ccy = np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx + ccy elif key == 'col_col_fine': ccx", "dictionary key from self.keys Returns: ------- :return: data <varies> \"\"\" if key not", "v: (float) mean fluid velocity t: (float) time bt: (np.array) breakthrough curve \"\"\"", "float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin = nbin self.pdf = None self.reset_pdf(nbin) self.__normalize =", "to calculate macroscopic advection dispersion equation parameters for field scale model parameterization Class", ":return: data <varies> \"\"\" return self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\" Method to retrieve", "= 0 self.velocity_factor = 1. self.continuous = 0 self.__data_startline = 0 self.__header =", "matplotlib args for 1d charts :param **kwargs: matplotlib keyword arguments for 1d charts", "v = self.uy t = self.bt['nts'].as_matrix() * self.timestep bt = self.bt['ncpr'].as_matrix() / self.ncol", "key == 'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()]", "updated to CDE equation Parameters: ---------- :param str filename: ascii output file name", "path): \"\"\" Method to return data by hdf5 path Parameters: ---------- :param str", "plotting kwargs \"\"\" # todo: create a function_fmt for axis options mesh =", "to calculate the mean velocity of each colloid in the simulation \"\"\" colloid", "plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\" Class to query colloid-colloid interactions and", "file name \"\"\" data_paths = {'col_col_x': 'colloidcolloid/x', 'col_col_y': 'colloidcolloid/y', 'col_col': None, 'distance_x': 'colloid_colloid/distance/x',", "self.uy pdf, t = self.__prep_data() x0 = np.array([D, R]) return least_squares(self.__jury_residuals, x0, args=(a,", "van genuchten 1986. Miscable displacement. Parameters: ---------- :param float D: Diffusivity initial guess.", "self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)]) for idx, value in", "---------- :param str hdf: hdf5 output filename \"\"\" data_paths = {'velocity_x': None, 'velocity_y':", "timestep, or pathline) \"\"\" with open(filename) as f: t = [] for idx,", "'distance_fine'): data = hdf[Hdf5Reader.data_paths[key]][()][0] else: data = hdf[Hdf5Reader.data_paths[key]][()] hdf.close() return data def get_data_by_path(self,", "None: ax = plt.gca() if key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y', 'edl_x', 'edl_y',", "0 :param float ftol: scipy function tolerance for solution :param int max_nfev: maximum", "df return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume calculation", ">>> from lb_colloids import ColloidOutput >>> import matplotlib.pyplot as plt >>> >>> hdf", "np.abs(self.__hdf5.get_data('col_col_y')) mesh = ccx + ccy elif key == 'col_col_fine': ccx = np.abs(self.__hdf5.get_data('col_col_fine_x'))", "self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args,", "least squares dictionary. Answer in dict['x'] \"\"\" # todo: test this method! look", "* self.resolution v = self.uy pdf, t = self.__prep_data() x0 = np.array([D, R])", "None, 'conversion_factor': None, 'reynolds_number': None} def __init__(self, hdf5): if not hdf5.endswith('.hdf') and not\\", "if key == \"<KEY>\": x = self.__hdf5.get_data('distance_x') x = x[center, center:] y =", "'results/velocity_factor', 'pore_diameter': 'results/pore_diameter', 'porosity': 'results/porosity', 'reynolds_number': 'results/reynolds_number', 'brownian_x': 'colloids/brownian/x', 'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x',", "filename): \"\"\" Method to read the header from ascii output files for LB-Colloids", "file must be supplied') reader = ASCIIReader(filename) self.df = reader.df self.resolution = reader.resolution", "plot(self, time=True, *args, **kwargs): \"\"\" Convience method to plot data into a matplotlib", "self.bt = Breakthrough(filename).breakthrough_curve self.reset_pdf(nbin) def __reset(self): self.pdf = self.__dist_func.pdf def reset_pdf(self, nbin, normalize=False):", "with open(filename) as f: t = [] for idx, line in enumerate(f): if", "vmin = kwargs.pop('vmin') if 'vmax' in kwargs: vamx = kwargs.pop('vmax') p = ax.pcolormesh(xx,", "output filename \"\"\" data_paths = {'velocity_x': None, 'velocity_y': None, 'lb_velocity_x': None, 'lb_velocity_y': None,", "plot distribution of velocities by colloid for array of velocity. Parameters ---------- :param", "time or end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf def solve_jury_1991(self, D=0.01,", "for visualizing colloid-surface forces and colloid-colloid forces respectively. example import of the Colloid_output.py", "row['delta-ts'] <= upper_nts: ncol += 1 else: if lower_nts < row['end-ts'] <= upper_nts:", "'edl_x', 'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis,", "---------- :param str key: valid dictionary key from self.keys Returns: ------- :return: data", "data = self.__hdf5.get_data(key) return data class ASCIIReader(object): \"\"\" Class to read in text", "'distance_y': 'colloid_colloid/distance/y', 'distance_fine_x': 'colloid_colloid/fine/distance/x', 'distance_fine_y': 'colloid_colloid/fine/distance/y', 'col_col_fine_x': 'colloid_colloid/fine/x', 'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def", "plot(self, *args, **kwargs): \"\"\" Method to plot distribution of velocities by colloid for", "[center], 'ko') return p class ColloidVelocity(object): \"\"\" Method to return colloid velocity and", "= 0 for upper_v in bins: ncol = 0 for v in self.velocity['velocity']:", "changing the pdf bin values Parameters: ---------- :param int nbin: number of timesteps", "v, t, bt): \"\"\" Method to estimate residuals from vanGenuchten and Winerega 1986", "lower_nts = 0 max_ts = self.df['nts'].max() pdf_colloids = self.df.loc[self.df['flag'] == 3] pdf_colloids =", "1] xx, yy = np.meshgrid(np.arange(0, mesh.shape[0]+1), np.arange(0, mesh.shape[1] + 1)) if mesh.max()/mesh.min() >", "calculation conversion for plotting colloids. \"\"\" pv_factor = (abs(self.__reader.uy) * self.__reader.velocity_factor) /\\ (self.__reader.ylen", "mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center = mesh.shape[0] / 2. ax.plot([center],", "Reader object to read in HDF5 stored outputs from colloid models. Contains a", "time class ModelPlot(object): \"\"\" Class to retrieve Colloid force arrays and plot for", "'colloid_colloid/fine/y', 'col_col_fine': None, 'edl_fine': 'colloids/edl_fine', 'attractive_fine': 'colloids/attractive_fine', 'dlvo_fine': None, 'distance_fine': 'colloids/distance_fine'} def __init__(self,", "key == 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif", "= \"lb_{}\".format(key) data = self.__hdf5.get_data(key) * factor else: data = self.__hdf5.get_data(key) return data", "values based on changing the pdf bin values Parameters: ---------- :param int nbin:", "plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" # todo: create a function_fmt", "None} def __init__(self, hdf5): if not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or", "t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars, l, v, t, bt): \"\"\"", "here? Finish class. Integrate into LB class LBOutput(object): \"\"\" Class to anaylze LB", "'dlvo_y': data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_y']][()] # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\ # hdf[Hdf5Reader.data_paths['lvdw_y']][()] data =", "read in text based output files <endpoint, timestep, pathline> to a pandas dataframe", "least_squares l = self.ylen * self.resolution v = self.uy t = self.bt['nts'].as_matrix() *", "data to pandas dataframe Parameters: ---------- :param str filename: colloid model output filename", "*args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot breakthrough", "l - v * t eq1 = np.sqrt(4 * D * R *", "else: pdf = self.pdf['ncol'] time = self.pdf['nts'] return pdf, time class ModelPlot(object): \"\"\"", "ADE(object): \"\"\" Class to calculate macroscopic advection dispersion equation parameters for field scale", "Returns: ------- :return: scipy least squares dictionary. Answer in dict['x'] \"\"\" from scipy.optimize", "for plotting'.format(key)) elif key in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr =", "def __init__(self, hdf5): if not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5", "args=(l, v, t, bt), ftol=ftol, max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars, l, v, t,", "4 * R * D * t x = (eq0 / eq1) *", "str filename: <>.endpoint file Attributes: ---------- :ivar df: (pandas DataFrame): dataframe of endpoint", "profiles Parameters: ---------- :param str key: valid data key :param *args: matplotlib plotting", "plotting :param float width: matplotlib bar width. :param *args: matplotlib plotting args :param", "None, 'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution': None, 'porosity': None, 'pore_diameter': None, 'conversion_factor': None,", "= arr def pore_volume_conversion(self): \"\"\" Method to retrieve the pore volume calculation conversion", "kwargs Returns: ------- :return: scipy least squares dictionary. Answer in dict['x'] \"\"\" #", "1986 Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length v: (float) mean", "x[center, center:] y = colcol[center, center:] elif key == \"col_col_y\": x = self.__hdf5.get_data('distance_y')", "np.float, 'y-position': np.float, 'x-model': np.float, 'y-model': np.float, 'start-ts': np.int, 'end-ts': np.int, 'delta-ts': np.int,", "yy, mesh, norm=LogNorm(vmin=mesh.min(), vmax=mesh.max()), *args, **kwargs) else: p = ax.pcolormesh(xx, yy, mesh, *args,", "= self.__hdf.get_data(key) if masked: img = self.__hdf.get_data(\"image\") arr = np.ma.masked_where(img == 1, a=arr)", "a histogram of velocities using a bar chart. Parameters: ---------- :param int nbin:", "zeros. Returns: pdf = (np.array) stripped pdf t = (np.array) times \"\"\" strip_idx", "else: mesh = self.__hdf5.get_data(key) # find center and set to nearby value to", "key in ('dlvo_fine', 'edl_fine', 'attractive_fine'): x_axis = self.__hdf.get_data('distance_fine') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr,", "np.std(self.velocity['velocity']) @property def cv(self): \"\"\" :return: coeficient of variance of colloid velocities \"\"\"", "key == \"col_col_fine_x\": x = self.__hdf5.get_data('distance_fine_x') x = x[center, center:] # * 1e-6", "Parameters ---------- :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\"", "== 3] pdf_colloids = pdf_colloids.sort_values('delta-ts') for upper_nts in range(0, int(max_ts) + 1, nbin):", "plt.bar(velocity, ncols, width, *args, **kwargs) # todo: think about this one. Does it", "= self.__hdf5.get_data('distance_x') x = x[center, center:] y = colcol[center, center:] elif key ==", "crashing shape = mesh.shape center = shape[0] // 2 mesh[center, center] = mesh[center,", "The Colloid_output module contains classes to read LB Colloids simulation outputs and perform", "line.startswith('ylen'): t = line.split() self.ylen = float(t[-1].rstrip()) elif line.startswith('ux'): t = line.split() self.ux", "max_nfev=max_nfev, **kwargs) def __van_genuchten_residuals(self, vars, l, v, t, bt): \"\"\" Method to estimate", "**kwargs) class ADE(object): \"\"\" Class to calculate macroscopic advection dispersion equation parameters for", "supplied') self.__hdf5 = Hdf5Reader(hdf5) @property def keys(self): \"\"\" Property method to return valid", "int nbin: number of bins for pdf calculation Attributes: ---------- :ivar df: (pandas", "\"\"\" max_ts = self.df['nts'].max() if self.__breakthrough_curve is None: if not self.continuous: bt_colloids =", "ncol += 1 else: if lower_nts < row['end-ts'] <= upper_nts: ncol += 1", "is None: ax = plt.gca() if key not in ('col_col', 'col_col_fine', 'col_col_x', 'col_col_y',", "specific hdf5 path Parameters: ---------- :param str path: hdf5 directory path to data", "for 1d charts \"\"\" if time: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] /", "return self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\" Method to retrieve hdf5 data by specific", ":param str key: valid dictionary key from self.keys :param object ax: matplotlib pyplot", "1d charts \"\"\" if time: if self.__normalize: plt.plot(self.pdf['nts'] * self.timestep, self.pdf['ncol'] / self.total_ncol,", "np.abs(self.__hdf5.get_data('col_col_fine_x')) ccy = np.abs(self.__hdf5.get_data('col_col_fine_y')) mesh = ccx + ccy else: mesh = self.__hdf5.get_data(key)", "\"\"\" dtypes = {'colloid': np.int, 'flag': np.int, 'nts': np.int, 'x-position': np.float, 'y-position': np.float,", "as f: t = [] for idx, line in enumerate(f): if idx <", "valid dictionary key from self.keys :param object ax: matplotlib pyplot axes object (optional)", "ftol: scipy function tolerance for solution :param int max_nfev: maximum number of function", "L, t, v, pdf): \"\"\" Method to estimate residuals from jury 1991 equation", "to plot pdf data with pore volumes (non-dimensional time) Parameters: ---------- :param *args:", "else: p = ax.pcolormesh(xx, yy, mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center", "quiver plot to display the magnitude and direction of velocity vectors within the", "mean colloid velocity \"\"\" return self.velocity['velocity'].mean() @property def var(self): \"\"\" :return: variance of", "/ self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.ncol, *args, **kwargs) plt.ylim([0, 1])", "= float(self.df.shape[0]) self.bin = nbin self.pdf = None self.reset_pdf(nbin) self.__normalize = False self.__reader", "class DistributionFunction(object): \"\"\" Class to plot a probablity distribution function of colloid breakthrough", "t = (np.array) times \"\"\" strip_idx = None seq = False bt =", "data as 1d or as a meshgrid object More sophisticated than standard ModelPlot", "elif line.startswith(\"#\"*10): self.__data_startline = idx + 1 break else: pass def read_ascii(self, filename):", "file must be supplied') self.file_name = hdf5 @property def keys(self): \"\"\" :return: list", "+ \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0] elif key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y',", "arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif key == \"image\": arr =", "= {self.__header[idx]: data for idx, data in enumerate(temp)} df = pd.DataFrame(temp) df =", "= np.arange(0, x.shape[0]) xx, yy = np.meshgrid(xx, yy) if masked: img = self.__hdf.get_data('image')", "data in enumerate(temp)} df = pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid')", "per release in simulation :ivar total_ncol: (int): total number of colloids in simulation", "reader.ylen self.df = reader.df self.ncol = reader.df.shape[0] self.max_time = max(reader.df['nts']) * self.timestep self.velocity", "reader.timestep self.resolution = reader.resolution self.ylen = reader.ylen self.ncol = reader.ncol self.total_ncol = float(reader.df.shape[0])", "colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)]) for idx, value in enumerate(colloid):", "files for LB-Colloids Sets data to pandas dataframe Parameters: ---------- :param str filename:", "np.ma.masked_where(img == 1, a=xx) yy = np.ma.masked_where(img == 1, a=yy) x = np.ma.masked_where(img", "int nbin: number of time steps to base bin on :param bool normalize:", "DistributionFunction(object): \"\"\" Class to plot a probablity distribution function of colloid breakthrough from", "to read in text based output files <endpoint, timestep, pathline> to a pandas", "forces and colloid-colloid forces respectively. example import of the Colloid_output.py module is as", "dataframe Parameters: ---------- :param str filename: colloid model output filename (ie. endpoint, timestep,", "'gravity': 'colloids/gravity', 'bouyancy': 'colloids/bouyancy', 'ionic': 'colloids/chemical_dict/I', 'distance_array': 'colloids/distance_arr', 'dlvo_x': None, 'dlvo_y': None, 'col_col_x':", "1] time = self.pdf['nts'][:strip_idx + 1] else: pdf = self.pdf['ncol'] time = self.pdf['nts']", "More sophisticated than standard ModelPlot Parameters: ---------- :param str hdf5: hdf5 file name", "pdf: (np.recarray) colloid probability distribution function \"\"\" def __init__(self, filename, nbin=1000): if not", "elif key == 'dlvo_fine': data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\ hdf[Hdf5Reader.data_paths['attractive_fine']][()] data = data[0]", "Winerega 1986 to calculate Dispersivity and Retardation from breakthrough data. Parameters: vars: (np.array)", "= None self.reset_pdf(nbin) self.__normalize = False self.__reader = reader def reset_pdf(self, nbin, normalize=False):", "dlvo profiles Parameters: ---------- :param str key: valid data key :param *args: matplotlib", "reader.continuous self.ncol = float(reader.ncol) self.total_ncol = float(self.df.shape[0]) self.bin = nbin self.pdf = None", "'col_col_fine_y': 'colloid_colloid/fine/y', 'col_col_fine': None} def __init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'):", "CCModelPlot are useful for visualizing colloid-surface forces and colloid-colloid forces respectively. example import", "= ASCIIReader(filename) self.timestep = reader.timestep self.resolution = reader.resolution self.xlen = reader.xlen self.ylen =", "residence time or end time \"\"\" self.__dist_func.reset_pdf(nbin, normalize) self.pdf = self.__dist_func.pdf def solve_jury_1991(self,", "curve data \"\"\" max_ts = self.df['nts'].max() if self.__breakthrough_curve is None: if not self.continuous:", "= self.ylen * self.resolution v = self.uy pdf, t = self.__prep_data() x0 =", "t, bt): \"\"\" Method to estimate residuals from vanGenuchten and Winerega 1986 Parameters:", "p = ax.pcolormesh(xx, yy, mesh, *args, **kwargs) ax.set_ylim([0, mesh.shape[0]]) ax.set_xlim([0, mesh.shape[1]]) center =", "todo: create a function_fmt for axis options mesh = None if ax is", "!= 0: bt = True else: pass else: if rec['ncol'] == 0: if", "access data Parameters: ---------- :param str hdf5: LB-Colloid hdf5 file name \"\"\" data_paths", "* t ** 3) eq2 = -(R * L - v * t)", "ncols, 'ncpr': ncol_per_release}).set_index('ncol') self.__breakthrough_curve = df return self.__breakthrough_curve def pore_volume_conversion(self): \"\"\" Method to", "* self.timestep)) colloid.append(index) arr = np.recarray(len(colloid,), dtype=[('colloid', np.int), ('velocity', np.float)]) for idx, value", "boltzmann data key Returns: ------- :return: data \"\"\" if key in (\"velocity_x\", \"velocity_y\"):", "int(t[-1].rstrip()) elif line.startswith('Resolution'): t = line.split() self.resolution = float(t[-1].rstrip()) elif line.startswith('xlen'): t =", "residuals from vanGenuchten and Winerega 1986 Parameters: vars: (np.array) [dispersivity, retardation] x: (float)", "float(reader.df.shape[0]) self.uy = reader.uy self.pdf = None self.__dist_func = DistributionFunction(filename, nbin) self.bt =", "H.File(self.file_name, 'r') if key == 'lb_velocity_x': data = hdf[Hdf5Reader.data_paths[key]][()][1] elif key == 'lb_velocity_y':", "self.continuous) & (bt_colloids['end-ts'] <= max_ts)])) df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr': ncol_per_release}).set_index('ncol')", "np.ma.masked_where(img == 1, a=arr) mesh = ax.imshow(arr, *args, **kwargs) if mesh is not", "Parameters: ---------- :param str hdf5: hdf5 file name \"\"\" data_paths = {'col_col_x': 'colloidcolloid/x',", "*args, **kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep, self.breakthrough_curve['ncpr'] / float(self.ncol), *args, **kwargs)", "as np import matplotlib.pyplot as plt import pandas as pd import h5py as", "<= upper_nts: ncol += 1 ts.append(upper_nts) ncols.append(ncol) lower_nts = upper_nts arr = np.recarray((len(ts),),", "* self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs) else: if self.continuous: plt.plot(self.breakthrough_curve['nts'] * self.timestep,", "str filename: endpoint file name \"\"\" def __init__(self, filename): if not filename.endswith(\".endpoint\"): raise", "respectively. example import of the Colloid_output.py module is as follows >>> from lb_colloids", "pass else: if rec['ncol'] == 0: if not seq: strip_idx = idx seq", "= reader.continuous # todo: replace this call with something from the header later!", "self.__normalize = normalize ts = [] ncols = [] lower_nts = 0 max_ts", "'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor', 'pore_diameter': 'results/pore_diameter',", ":param str hdf5: LB-Colloid hdf5 file name \"\"\" data_paths = {'ac': \"colloids/model_dict/ac\", 'image':", "*args, **kwargs) else: plt.plot(self.breakthrough_curve['nts'] * pv_factor * self.timestep, self.breakthrough_curve.index.values / float(self.ncol), *args, **kwargs)", "Colloid_output module contains classes to read LB Colloids simulation outputs and perform post", "hdf5): if not hdf5.endswith('.hdf') and not\\ hdf5.endswith('.hdf5'): raise FileTypeError('hdf or hdf5 file must", "key, ax=None, masked=False, *args, **kwargs): \"\"\" Hdf array plotting using Hdf5Reader keys Parameters:", "from self.keys :param object ax: matplotlib pyplot axes object (optional) :param *args: matplotlib", "to return valid keys to obtain data \"\"\" return CCModelPlot.keys def get_data(self, key):", "Parameters: ---------- :param str key: lattice boltzmann data key Returns: ------- :return: data", "*args, **kwargs) else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'],", "reader.resolution self.ylen = reader.ylen self.ncol = reader.ncol self.total_ncol = float(reader.df.shape[0]) self.uy = reader.uy", "if mesh is not None: return mesh else: return ax def plot_velocity_magnitude(self, nbin=10,", "length v: (float) mean fluid velocity t: (float) time bt: (np.array) breakthrough curve", "ax.plot(x_axis, arr, *args, **kwargs) elif key in ('conversion_factor', 'gravity', 'bouyancy'): raise KeyError('{}: key", "if filename.split('.')[-1] not in ('endpoint', 'timeseries', 'pathline'): raise FileTypeError(\"{}: not in supported filetypes\".format(filename))", "Class to plot a probablity distribution function of colloid breakthrough from endpoint files.", "endpoint, timestep, or pathline) \"\"\" dtypes = {'colloid': np.int, 'flag': np.int, 'nts': np.int,", "Returns: ------- :return: data <varies> \"\"\" return self.__hdf.get_data(key) def get_data_by_path(self, path): \"\"\" Method", "x = np.ma.masked_where(img == 1, a=x) y = np.ma.masked_where(img == 1, a=y) Q", "= 0 for index, row in bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol))", "self.bt['nts'].as_matrix() * self.timestep bt = self.bt['ncpr'].as_matrix() / self.ncol x0 = np.array([D, R]) return", "t ** 3) eq2 = -(R * L - v * t) **", ":param str key: valid dictionary key from self.keys Returns: ------- :return: data <varies>", "plt.plot(x, y * -1, *args, **kwargs) def plot_mesh(self, key, ax=None, *args, **kwargs): \"\"\"", "__init__(self, hdf5): if not hdf5.endswith('hdf') and\\ not hdf5.endswith('hdf5'): raise FileTypeError('hdf or hdf5 file", "todo: test this method! look up references for clearer examples! from scipy.optimize import", "**kwargs): \"\"\" Hdf array plotting using Hdf5Reader keys Parameters: ---------- :param str key:", "not None: pdf = self.pdf['ncol'][:strip_idx + 1] time = self.pdf['nts'][:strip_idx + 1] else:", "as 1d or as a meshgrid object More sophisticated than standard ModelPlot Parameters:", "hdf5 path Parameters: ---------- :param str path: valid HDF5 data path \"\"\" return", "Dispersivity and Retardation from breakthrough data. Parameters: vars: (np.array) [dispersivity, retardation] x: (float)", "'brownian_y': 'colloids/brownian/y', 'lvdw_x': 'colloids/lvdw/x', 'lvdw_y': 'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y':", "upper_ts)] ncol += 1 ncols.append(float(ncol)) ncol_per_release.append(len(t)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) ncol_per_release.append(len(bt_colloids.loc[(bt_colloids['end-ts'] >= max_ts -", "to retrieve hdf5 data by specific path Parameters: ---------- :param str path: hdf5", "__init__(self, filename): if not filename.endswith(\".endpoint\"): raise FileTypeError('.endpoint file must be supplied') reader =", "the mean velocity of each colloid in the simulation \"\"\" colloid = []", "a probablity distribution function of colloid breakthrough from endpoint files. Parameters: ---------- :param", "== 0: if not seq: strip_idx = idx seq = True else: pass", "plot for data analysis. Parameters: ---------- :param str hdf5: hdf5 file name \"\"\"", "pyplot axes object (optional) :param *args: matplotlib plotting args :param **kwargs: matplotlib plotting", "velocities \"\"\" return np.var(self.velocity['velocity']) @property def stdev(self): \"\"\" :return: standard deviation of colloid", ":param *args: matplotlib args for 1d plotting :param **kwargs: matplotlib kwargs for 1d", ":param *args: matplotlib plotting args :param **kwargs: matplotlib plotting kwargs \"\"\" if key", "= pd.DataFrame(temp) df = df.reindex_axis(self.__header, axis=1) df = df.set_index('colloid') return df @staticmethod def", ":param **kwargs: matplotlib keyword arguments for 1d charts \"\"\" if time: if self.__normalize:", "CCModelPlot(object): \"\"\" Class to query colloid-colloid interactions and plot data as 1d or", "for idx, value in enumerate(colloid): arr[idx] = tuple([value, velocity[idx]]) self.velocity = arr @property", "LB fluid/solid properties Parameters: ---------- :param str hdf: hdf5 output filename \"\"\" data_paths", "bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df = pd.DataFrame({'nts': nts, 'ncol':", "Class to query colloid-colloid interactions and plot data as 1d or as a", "arrays and plot for data analysis. Parameters: ---------- :param str hdf5: hdf5 file", "value in enumerate(ts): arr[idx] = tuple([value, ncols[idx]]) self.pdf = arr def pore_volume_conversion(self): \"\"\"", "and Winerega 1986 Parameters: vars: (np.array) [dispersivity, retardation] x: (float) column length v:", "'colloids/lvdw/y', 'edl_x': 'colloids/edl/x', 'edl_y': 'colloids/edl/y', 'attractive_x': 'colloids/attractive/x', 'attractive_y': 'colloids/attractive/y', 'lewis_x': 'colloids/lewis_acid_base/x', 'lewis_y': 'colloids/lewis_acid_base/y',", "= self.__hdf5.get_data(\"conversion_factor\") key = \"lb_{}\".format(key) data = self.__hdf5.get_data(key) * factor else: data =", "bt_colloids.iterrows(): lower_ts = row['end-ts'] - self.continuous upper_ts = row['end-ts'] t = bt_colloids.loc[(bt_colloids['end-ts'] >=", "Parameters: ---------- :param int nbin: refinement for quiver plotting :param *args: matplotlib plotting", ":param float ftol: scipy function tolerance for solution :param int max_nfev: maximum number", "nbin=1000): if not filename.endswith('.endpoint'): raise FileTypeError('.endpoint file must be supplied') reader = ASCIIReader(filename)", "for index, row in bt_colloids.iterrows(): ncol += 1 ncols.append(float(ncol)) nts.append(row['end-ts']) ncols.append(float(ncol)) nts.append(max_ts) df", "= float(t[-1].rstrip()) elif line.startswith(\"Ncols\"): t = line.split() self.ncol = int(t[-1].rstrip()) elif line.startswith('Resolution'): t", "rec['ncol'] != 0: bt = True else: pass else: if rec['ncol'] == 0:", "line.startswith(\"#\"*10): self.__data_startline = idx + 1 break else: pass def read_ascii(self, filename): \"\"\"", "kwargs \"\"\" from matplotlib.colors import LogNorm if ax is None: ax = plt.gca()", "endpoint data :ivar resolution: (float): model resolution :ivar timestep: (float): model timestep :ivar", "= float(t[-1].rstrip()) elif line.startswith('velocity_factor'): t = line.split() self.velocity_factor = float(t[-1].rstrip()) elif line.startswith('Continuous'): t", "else: if self.__normalize: plt.plot(self.pdf['nts'], self.pdf['ncol'] / self.total_ncol, *args, **kwargs) else: plt.plot(self.pdf['nts'], self.pdf['ncol'] /", "'lb_velocity_x': None, 'lb_velocity_y': None, 'resolution': None, 'porosity': None, 'pore_diameter': None, 'conversion_factor': None, 'reynolds_number':", "\"colloids/model_dict/ac\", 'image': 'Binary_image', 'lb_velocity_x': 'results/uarray', 'lb_velocity_y': 'results/uarray', 'lb_mean_velocity_x': 'results/mean_ux', 'lb_mean_velocity_y': 'results/mean_uy', 'conversion_factor': 'results/velocity_factor',", "float(self.df.shape[0]) self.__breakthrough_curve = None self.__reader = reader @property def breakthrough_curve(self): \"\"\" Property method", "**kwargs): \"\"\" Method to create a quiver plot to display the magnitude and", "object ax: matplotlib pyplot axes object (optional) :param *args: matplotlib plotting args :param", "0.5 * special.erfc(eq0/eq1) if np.isnan(x[0]): x[0] = 0 return x def __prep_data(self): \"\"\"", "float(self.ncol), *args, **kwargs) plt.ylim([0, 1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot", "mesh.shape[1]]) center = mesh.shape[0] / 2. ax.plot([center], [center], 'ko') return p class ColloidVelocity(object):", "**kwargs) qk = plt.quiverkey(Q, 0.9, 0.9, 0.01, r'$1 \\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0],", "calculate the mean velocity of each colloid in the simulation \"\"\" colloid =", "1]) def plot_pv(self, *args, **kwargs): \"\"\" Method to plot breakthrough data with pore", "= max(reader.df['nts']) * self.timestep self.velocity = None self.__get_velocity_array() def __get_velocity_array(self): \"\"\" Built in", "t x = (eq0 / eq1) * np.exp(eq2 / eq3) x[0] = 0", "= 0 self.__data_startline = 0 self.__header = [] if filename.split('.')[-1] not in ('endpoint',", ":return: coeficient of variance of colloid velocities \"\"\" return (self.stdev / self.mean) *", "ax is None: ax = plt.gca() if key in ('lvdw_x', 'lvdw_y', 'lewis_x', 'lewis_y',", "* t) ** 2 eq3 = 4 * R * D * t", "---------- :param bool time: if true x-axis is time, false is nts :param", "D=0.01, R=0.01, ftol=1e-10, max_nfev=1000, **kwargs): \"\"\" Scipy optimize method to solve least sqares", "as H class Breakthrough(object): \"\"\" Class to prepare and plot breakthrough curve data", "hdf[Hdf5Reader.data_paths[key]][()][0] elif key == 'dlvo_x': data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\ hdf[Hdf5Reader.data_paths['attractive_x']][()] # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\", "'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr, *args, **kwargs) elif", "self.total_ncol = float(self.df.shape[0]) self.bin = nbin self.pdf = None self.reset_pdf(nbin) self.__normalize = False", "'edl_y', 'dlvo_x', 'dlvo_y', 'attractive_x', 'attractive_y'): x_axis = self.__hdf.get_data('distance_array') arr = self.__hdf.get_data(key) ax.plot(x_axis, arr,", "\\frac{cm}{s}$', coordinates='figure') plt.xlim(0, x.shape[1]) plt.ylim(x.shape[0], 0) class CCModelPlot(object): \"\"\" Class to query colloid-colloid", "width, *args, **kwargs) # todo: think about this one. Does it belong here?", "reader.df self.resolution = reader.resolution self.timestep = reader.timestep self.continuous = reader.continuous # todo: replace", "pdf t = (np.array) times \"\"\" strip_idx = None seq = False bt", "np.arange(0, x.shape[1]) yy = np.arange(0, x.shape[0]) xx, yy = np.meshgrid(xx, yy) if masked:", "- self.continuous) & (bt_colloids['end-ts'] <= max_ts)])) df = pd.DataFrame({'nts': nts, 'ncol': ncols, 'ncpr':", "center = shape[0] // 2 if key == \"<KEY>\": x = self.__hdf5.get_data('distance_x') x", "data from hdf5 file based on key, instead of data path Parameters: ----------", ":param str path: hdf5 directory path to data Returns: ------ :return: data <varies>", "= mesh.shape center = shape[0] // 2 mesh[center, center] = mesh[center, center +", "= reader.ylen self.df = reader.df self.ncol = reader.df.shape[0] self.max_time = max(reader.df['nts']) * self.timestep", "method to reset values based on changing the pdf bin values Parameters: ----------", "least_squares(self.__jury_residuals, x0, args=(a, l, t, v, pdf), ftol=ftol, max_nfev=max_nfev, **kwargs) def __jury_residuals(self, vars,", "*args, **kwargs) if mesh is not None: return mesh else: return ax def", "of continuous release, 0 means pulse :ivar ncol: (float): number of colloids per" ]
[ "def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function a() -> int", "test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\",", "self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self): self.assertParsesTo(parser.if_, \"if(x){}elif(x){}else{}\", ast.If) if __name__ == '__main__': unittest.main()", "test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1", "x) -> int {}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x, int y)", "test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x := 10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def", "a(int x) -> int {}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x, int", "+ 1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int)", "def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x := x + 1;}\", ast.While) def test_if_0(self):", "\"function a() -> int {}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x) ->", "test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop) def test_expression_1(self):", "src import parser from src import ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data,", "-> int {}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x, int y) ->", "self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function", "10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function)", "% 6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self): self.assertParsesTo(parser.if_, \"if(x){}elif(x){}else{}\", ast.If)", "3 * 4 / 5 % 6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If)", "+ 2 - 3 * 4 / 5 % 6\", ast.Binop) def test_if_0(self):", "\"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1", "\"1 + 2 - 3 * 4 / 5 % 6\", ast.Binop) def", "list) def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function a() ->", "ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x, int y) -> int {}\", ast.Function)", "test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x := x + 1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_,", "ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self): self.assertParsesTo(parser.if_, \"if(x){}elif(x){}else{}\", ast.If) if __name__", "\"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\",", "x, int y) -> int {}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def", "import parser from src import ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data, type_):", "self.assertParsesTo(parser.decl, \"decl(int) x := 10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self):", "int {}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x, int y) -> int", "2 - 3 * 4 / 5 % 6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_,", "self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x := 10\", ast.Declare) def", "sys import os sys.path.append(os.path.join(os.path.dirname(__file__), \"..\")) from src import parser from src import ast", "+ 2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2 - 3 * 4", "class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x := x + 1;}\", ast.While)", "def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call,", "def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2 - 3 * 4 / 5 %", "\"..\")) from src import parser from src import ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self,", "def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self): self.assertParsesTo(parser.if_, \"if(x){}elif(x){}else{}\", ast.If) if __name__ ==", "-> int {}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\",", "self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x := x", "-> int {}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x) -> int {}\",", "ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2 - 3 * 4 / 5", "test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2 -", "from src import parser from src import ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func,", "ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def", "\"function a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function a() -> int {}\", ast.Function) def", "self.assertParsesTo(parser.function, \"function a(int x, int y) -> int {}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call,", "from src import ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data, func),", "test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x) -> int {}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function", ":= x + 1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def test_decl_0(self):", "type_): self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x :=", "\"1 + 2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2 - 3 *", "\"decl(int) x := 10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function,", "self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2 - 3", "sys.path.append(os.path.join(os.path.dirname(__file__), \"..\")) from src import parser from src import ast class ParserTestCase(unittest.TestCase): def", "func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x := x +", "self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall)", "def test_function_1(self): self.assertParsesTo(parser.function, \"function a() -> int {}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function", "test_function_1(self): self.assertParsesTo(parser.function, \"function a() -> int {}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int", "def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop) def", "self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function a() -> int {}\", ast.Function)", "- 3 * 4 / 5 % 6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\",", "class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def", "self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 +", "def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2", "def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x := 10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list)", "\"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function a()", "Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x := x + 1;}\", ast.While) def", "def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x, int y) -> int {}\", ast.Function) def", "self.assertParsesTo(parser.function, \"function a() -> int {}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x)", "a() -> int {}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x) -> int", "< 10){x := x + 1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If)", "x := 10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function, \"function", "{}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def", "self.assertParsesTo(parser.expression, \"1 + 2 - 3 * 4 / 5 % 6\", ast.Binop)", "y) -> int {}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call,", "unittest import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), \"..\")) from src import parser from src", "10){x := x + 1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def", "def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression,", "def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x := 10\",", "def assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_,", "5 % 6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self): self.assertParsesTo(parser.if_, \"if(x){}elif(x){}else{}\",", "4 / 5 % 6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self):", "import os sys.path.append(os.path.join(os.path.dirname(__file__), \"..\")) from src import parser from src import ast class", "ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x) -> int {}\", ast.Function) def test_function_3(self):", "x + 1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl,", "a(int x, int y) -> int {}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall)", "parser from src import ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data,", "test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x, int y) -> int {}\", ast.Function) def test_func_call_0(self):", "2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2 - 3 * 4 /", "{}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x, int y) -> int {}\",", "ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression, \"1 +", "\"function a(int x, int y) -> int {}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\",", "assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x", "test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x := 10\", ast.Declare)", "ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function a() -> int {}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function,", "func, data, type_): self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x <", "\"if(a){return a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x := 10\", ast.Declare) def test_braced_stmt_list_0(self):", "self.assertParsesTo(parser.function, \"function a(int x) -> int {}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int", "6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self): self.assertParsesTo(parser.if_, \"if(x){}elif(x){}else{}\", ast.If) if", ":= 10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\",", "* 4 / 5 % 6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def", "test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self): self.assertParsesTo(parser.if_, \"if(x){}elif(x){}else{}\", ast.If) if __name__ == '__main__':", "\"while(x < 10){x := x + 1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\",", "def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def test_function_1(self):", "ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop)", "def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x) -> int {}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function,", "ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self):", "ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x := 10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\",", "int {}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall)", "/ 5 % 6\", ast.Binop) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(x){}else{}\", ast.If) def test_if_1(self): self.assertParsesTo(parser.if_,", "a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function a() -> int {}\", ast.Function) def test_function_2(self):", "int y) -> int {}\", ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self):", "test_expression_1(self): self.assertParsesTo(parser.expression, \"1 + 2 - 3 * 4 / 5 % 6\",", "os sys.path.append(os.path.join(os.path.dirname(__file__), \"..\")) from src import parser from src import ast class ParserTestCase(unittest.TestCase):", "ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self):", "test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function, \"function a() -> int {}\",", "self.assertParsesTo(parser.while_, \"while(x < 10){x := x + 1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return", "{}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x) -> int {}\", ast.Function) def", "\"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self): self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def", "data, type_): self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x", "\"function a(int x) -> int {}\", ast.Function) def test_function_3(self): self.assertParsesTo(parser.function, \"function a(int x,", "import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), \"..\")) from src import parser from src import", "1;}\", ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x", "type_) class Test_parse_fragment_unit_tests(ParserTestCase): def test_while_0(self): self.assertParsesTo(parser.while_, \"while(x < 10){x := x + 1;}\",", "import ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data, func), type_) class", "a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x := 10\", ast.Declare) def test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list,", "ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data, func), type_) class Test_parse_fragment_unit_tests(ParserTestCase):", "import unittest import sys import os sys.path.append(os.path.join(os.path.dirname(__file__), \"..\")) from src import parser from", "test_braced_stmt_list_0(self): self.assertParsesTo(parser.braced_stmt_list, \"{}\", list) def test_function_0(self): self.assertParsesTo(parser.function, \"function a(){}\", ast.Function) def test_function_1(self): self.assertParsesTo(parser.function,", "src import ast class ParserTestCase(unittest.TestCase): def assertParsesTo(self, func, data, type_): self.assertIsInstance(parser.parse(data, func), type_)", "self.assertParsesTo(parser.func_call, \"call(1,2,3)\", ast.FuncCall) def test_expression_0(self): self.assertParsesTo(parser.expression, \"1 + 2\", ast.Binop) def test_expression_1(self): self.assertParsesTo(parser.expression,", "ast.While) def test_if_0(self): self.assertParsesTo(parser.if_, \"if(a){return a;}\", ast.If) def test_decl_0(self): self.assertParsesTo(parser.decl, \"decl(int) x :=", "int {}\", ast.Function) def test_function_2(self): self.assertParsesTo(parser.function, \"function a(int x) -> int {}\", ast.Function)", "ast.Function) def test_func_call_0(self): self.assertParsesTo(parser.func_call, \"call()\", ast.FuncCall) def test_func_call_1(self): self.assertParsesTo(parser.func_call, \"call(other())\", ast.FuncCall) def test_func_call_2(self):" ]
[ "''' import pysam ''' Read a FASTA file as a dict if a", "name is given. If not, return an empty dict. ''' def read_fasta(ref_fn: str)", "2021 ''' import pysam ''' Read a FASTA file as a dict if", "{} if ref_fn != '': f = pysam.FastaFile(ref_fn) for r in f.references: ref[r]", "read_fasta(ref_fn: str) -> dict: ref = {} if ref_fn != '': f =", "'': f = pysam.FastaFile(ref_fn) for r in f.references: ref[r] = f[r].upper() return ref", "''' Utils for levioSAM <NAME> Johns Hopkins University 2021 ''' import pysam '''", "as a dict if a file name is given. If not, return an", "= {} if ref_fn != '': f = pysam.FastaFile(ref_fn) for r in f.references:", "dict: ref = {} if ref_fn != '': f = pysam.FastaFile(ref_fn) for r", "file as a dict if a file name is given. If not, return", "dict if a file name is given. If not, return an empty dict.", "''' Read a FASTA file as a dict if a file name is", "given. If not, return an empty dict. ''' def read_fasta(ref_fn: str) -> dict:", "<filename>scripts/leviosam_utils.py ''' Utils for levioSAM <NAME> Johns Hopkins University 2021 ''' import pysam", "If not, return an empty dict. ''' def read_fasta(ref_fn: str) -> dict: ref", "Johns Hopkins University 2021 ''' import pysam ''' Read a FASTA file as", "FASTA file as a dict if a file name is given. If not,", "if ref_fn != '': f = pysam.FastaFile(ref_fn) for r in f.references: ref[r] =", "!= '': f = pysam.FastaFile(ref_fn) for r in f.references: ref[r] = f[r].upper() return", "not, return an empty dict. ''' def read_fasta(ref_fn: str) -> dict: ref =", "Read a FASTA file as a dict if a file name is given.", "for levioSAM <NAME> Johns Hopkins University 2021 ''' import pysam ''' Read a", "pysam ''' Read a FASTA file as a dict if a file name", "a file name is given. If not, return an empty dict. ''' def", "''' def read_fasta(ref_fn: str) -> dict: ref = {} if ref_fn != '':", "a dict if a file name is given. If not, return an empty", "import pysam ''' Read a FASTA file as a dict if a file", "str) -> dict: ref = {} if ref_fn != '': f = pysam.FastaFile(ref_fn)", "is given. If not, return an empty dict. ''' def read_fasta(ref_fn: str) ->", "dict. ''' def read_fasta(ref_fn: str) -> dict: ref = {} if ref_fn !=", "University 2021 ''' import pysam ''' Read a FASTA file as a dict", "levioSAM <NAME> Johns Hopkins University 2021 ''' import pysam ''' Read a FASTA", "ref = {} if ref_fn != '': f = pysam.FastaFile(ref_fn) for r in", "ref_fn != '': f = pysam.FastaFile(ref_fn) for r in f.references: ref[r] = f[r].upper()", "-> dict: ref = {} if ref_fn != '': f = pysam.FastaFile(ref_fn) for", "return an empty dict. ''' def read_fasta(ref_fn: str) -> dict: ref = {}", "Hopkins University 2021 ''' import pysam ''' Read a FASTA file as a", "empty dict. ''' def read_fasta(ref_fn: str) -> dict: ref = {} if ref_fn", "a FASTA file as a dict if a file name is given. If", "an empty dict. ''' def read_fasta(ref_fn: str) -> dict: ref = {} if", "Utils for levioSAM <NAME> Johns Hopkins University 2021 ''' import pysam ''' Read", "file name is given. If not, return an empty dict. ''' def read_fasta(ref_fn:", "def read_fasta(ref_fn: str) -> dict: ref = {} if ref_fn != '': f", "if a file name is given. If not, return an empty dict. '''", "<NAME> Johns Hopkins University 2021 ''' import pysam ''' Read a FASTA file" ]
[ "= path_multi_label + '/error.csv' # 路径抽象层 path_label = path_multi_label_labels path_train = path_multi_label_train path_valid", "path_model = path_model_dir + '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters = path_model_dir + '/hyper_parameters.json' #", "!/usr/bin/python # @time :2019/6/5 21:04 # @author :Mo # @function :file of path", "path_valid = path_multi_label_valid path_tests = path_multi_label_tests path_edata = path_multi_label_error # 模型目录 path_model_dir =", "超参数保存地址 path_hyper_parameters = path_model_dir + '/hyper_parameters.json' # embedding微调保存地址 path_fineture = path_model_dir + \"/embedding_trainable.h5\"", "= path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding +", "# @time :2019/6/5 21:04 # @author :Mo # @function :file of path import", "'/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin' path_dataset =", "# @function :file of path import os # 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))", "'/user_dict.txt' path_embedding_random_char = path_embedding + '/term_char.txt' path_embedding_random_word = path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char =", "path_model_dir = path_out + \"data/model\" # 语料地址 path_model = path_model_dir + '/model_fast_text.h5' #", "path_multi_label + '/valid.csv' path_multi_label_labels = path_multi_label + '/labels.csv' path_multi_label_tests = path_multi_label + '/tests.csv'", "\"/out/\" # path of embedding path_embedding = path_out + 'data/embeddings' path_embedding_user_dict = path_embedding", "+ 'data/multi_label' path_multi_label_train = path_multi_label + '/train.csv' path_multi_label_valid = path_multi_label + '/valid.csv' path_multi_label_labels", "= path_multi_label + '/train.csv' path_multi_label_valid = path_multi_label + '/valid.csv' path_multi_label_labels = path_multi_label +", "path_multi_label_labels path_train = path_multi_label_train path_valid = path_multi_label_valid path_tests = path_multi_label_tests path_edata = path_multi_label_error", "'/multi_label_word.bin' path_dataset = path_root +'/dataset' path_category = path_dataset + '/category2labels.json' path_l2i_i2l = path_dataset", "= path_dataset + '/l2i_i2l.json' # classfiy multi labels 2021 path_multi_label = path_out +", "+ \"/out/\" # path of embedding path_embedding = path_out + 'data/embeddings' path_embedding_user_dict =", "path_root + \"/out/\" # path of embedding path_embedding = path_out + 'data/embeddings' path_embedding_user_dict", "# 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\', '/') # train out", "+ '/user_dict.txt' path_embedding_random_char = path_embedding + '/term_char.txt' path_embedding_random_word = path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char", "os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\', '/') # train out path_out = path_root +", "= path_multi_label + '/valid.csv' path_multi_label_labels = path_multi_label + '/labels.csv' path_multi_label_tests = path_multi_label +", "+ '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin", "path_multi_label_valid path_tests = path_multi_label_tests path_edata = path_multi_label_error # 模型目录 path_model_dir = path_out +", "+ '/multi_label_word.bin' path_dataset = path_root +'/dataset' path_category = path_dataset + '/category2labels.json' path_l2i_i2l =", "embedding path_embedding = path_out + 'data/embeddings' path_embedding_user_dict = path_embedding + '/user_dict.txt' path_embedding_random_char =", "path_embedding_random_word = path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding", "= path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin' path_dataset = path_root +'/dataset'", "path_multi_label = path_out + 'data/multi_label' path_multi_label_train = path_multi_label + '/train.csv' path_multi_label_valid = path_multi_label", "os.pardir)) path_root = path_root.replace('\\\\', '/') # train out path_out = path_root + \"/out/\"", "path_dataset = path_root +'/dataset' path_category = path_dataset + '/category2labels.json' path_l2i_i2l = path_dataset +", "= path_root + \"/out/\" # path of embedding path_embedding = path_out + 'data/embeddings'", "+ '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin", "path_multi_label_labels = path_multi_label + '/labels.csv' path_multi_label_tests = path_multi_label + '/tests.csv' path_multi_label_error = path_multi_label", "path_out + 'data/multi_label' path_multi_label_train = path_multi_label + '/train.csv' path_multi_label_valid = path_multi_label + '/valid.csv'", "= path_embedding + '/term_char.txt' path_embedding_random_word = path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding +", "@time :2019/6/5 21:04 # @author :Mo # @function :file of path import os", "= path_out + 'data/multi_label' path_multi_label_train = path_multi_label + '/train.csv' path_multi_label_valid = path_multi_label +", "path_tests = path_multi_label_tests path_edata = path_multi_label_error # 模型目录 path_model_dir = path_out + \"data/model\"", "模型目录 path_model_dir = path_out + \"data/model\" # 语料地址 path_model = path_model_dir + '/model_fast_text.h5'", "path_multi_label + '/error.csv' # 路径抽象层 path_label = path_multi_label_labels path_train = path_multi_label_train path_valid =", "语料地址 path_model = path_model_dir + '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters = path_model_dir + '/hyper_parameters.json'", "+ '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin' path_dataset", "= path_multi_label + '/tests.csv' path_multi_label_error = path_multi_label + '/error.csv' # 路径抽象层 path_label =", "path_multi_label + '/tests.csv' path_multi_label_error = path_multi_label + '/error.csv' # 路径抽象层 path_label = path_multi_label_labels", "# 模型目录 path_model_dir = path_out + \"data/model\" # 语料地址 path_model = path_model_dir +", "path_root.replace('\\\\', '/') # train out path_out = path_root + \"/out/\" # path of", "'/tests.csv' path_multi_label_error = path_multi_label + '/error.csv' # 路径抽象层 path_label = path_multi_label_labels path_train =", "@function :file of path import os # 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root", "# -*- coding: UTF-8 -*- # !/usr/bin/python # @time :2019/6/5 21:04 # @author", ":2019/6/5 21:04 # @author :Mo # @function :file of path import os #", "= path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding +", "+ '/term_char.txt' path_embedding_random_word = path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word", "= path_multi_label_error # 模型目录 path_model_dir = path_out + \"data/model\" # 语料地址 path_model =", "+ '/valid.csv' path_multi_label_labels = path_multi_label + '/labels.csv' path_multi_label_tests = path_multi_label + '/tests.csv' path_multi_label_error", "+ '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin' path_dataset = path_root +'/dataset' path_category =", "'/term_word.txt' path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin =", "+ '/train.csv' path_multi_label_valid = path_multi_label + '/valid.csv' path_multi_label_labels = path_multi_label + '/labels.csv' path_multi_label_tests", "+ '/category2labels.json' path_l2i_i2l = path_dataset + '/l2i_i2l.json' # classfiy multi labels 2021 path_multi_label", "= path_dataset + '/category2labels.json' path_l2i_i2l = path_dataset + '/l2i_i2l.json' # classfiy multi labels", "coding: UTF-8 -*- # !/usr/bin/python # @time :2019/6/5 21:04 # @author :Mo #", "os # 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\', '/') # train", "'/valid.csv' path_multi_label_labels = path_multi_label + '/labels.csv' path_multi_label_tests = path_multi_label + '/tests.csv' path_multi_label_error =", "labels 2021 path_multi_label = path_out + 'data/multi_label' path_multi_label_train = path_multi_label + '/train.csv' path_multi_label_valid", "2021 path_multi_label = path_out + 'data/multi_label' path_multi_label_train = path_multi_label + '/train.csv' path_multi_label_valid =", "= path_root +'/dataset' path_category = path_dataset + '/category2labels.json' path_l2i_i2l = path_dataset + '/l2i_i2l.json'", "import os # 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\', '/') #", "path_multi_label + '/labels.csv' path_multi_label_tests = path_multi_label + '/tests.csv' path_multi_label_error = path_multi_label + '/error.csv'", "path_multi_label_error # 模型目录 path_model_dir = path_out + \"data/model\" # 语料地址 path_model = path_model_dir", "path_label = path_multi_label_labels path_train = path_multi_label_train path_valid = path_multi_label_valid path_tests = path_multi_label_tests path_edata", "-*- coding: UTF-8 -*- # !/usr/bin/python # @time :2019/6/5 21:04 # @author :Mo", "path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin' path_dataset = path_root", "-*- # !/usr/bin/python # @time :2019/6/5 21:04 # @author :Mo # @function :file", "= path_multi_label_labels path_train = path_multi_label_train path_valid = path_multi_label_valid path_tests = path_multi_label_tests path_edata =", "= path_multi_label_valid path_tests = path_multi_label_tests path_edata = path_multi_label_error # 模型目录 path_model_dir = path_out", "path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec'", "项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\', '/') # train out path_out", "path_out + \"data/model\" # 语料地址 path_model = path_model_dir + '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters", "+ '/labels.csv' path_multi_label_tests = path_multi_label + '/tests.csv' path_multi_label_error = path_multi_label + '/error.csv' #", "path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding", "path_category = path_dataset + '/category2labels.json' path_l2i_i2l = path_dataset + '/l2i_i2l.json' # classfiy multi", "multi labels 2021 path_multi_label = path_out + 'data/multi_label' path_multi_label_train = path_multi_label + '/train.csv'", "+ \"data/model\" # 语料地址 path_model = path_model_dir + '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters =", "path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin' path_dataset = path_root +'/dataset' path_category = path_dataset +", "of embedding path_embedding = path_out + 'data/embeddings' path_embedding_user_dict = path_embedding + '/user_dict.txt' path_embedding_random_char", "path_edata = path_multi_label_error # 模型目录 path_model_dir = path_out + \"data/model\" # 语料地址 path_model", "path_out = path_root + \"/out/\" # path of embedding path_embedding = path_out +", "path_multi_label_train = path_multi_label + '/train.csv' path_multi_label_valid = path_multi_label + '/valid.csv' path_multi_label_labels = path_multi_label", "'/category2labels.json' path_l2i_i2l = path_dataset + '/l2i_i2l.json' # classfiy multi labels 2021 path_multi_label =", "path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding", "path_train = path_multi_label_train path_valid = path_multi_label_valid path_tests = path_multi_label_tests path_edata = path_multi_label_error #", "path import os # 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\', '/')", "path_multi_label_valid = path_multi_label + '/valid.csv' path_multi_label_labels = path_multi_label + '/labels.csv' path_multi_label_tests = path_multi_label", "'/train.csv' path_multi_label_valid = path_multi_label + '/valid.csv' path_multi_label_labels = path_multi_label + '/labels.csv' path_multi_label_tests =", "'/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters = path_model_dir + '/hyper_parameters.json' # embedding微调保存地址 path_fineture = path_model_dir", "path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin' path_dataset = path_root +'/dataset' path_category", "= path_out + 'data/embeddings' path_embedding_user_dict = path_embedding + '/user_dict.txt' path_embedding_random_char = path_embedding +", "path_root = path_root.replace('\\\\', '/') # train out path_out = path_root + \"/out/\" #", "# !/usr/bin/python # @time :2019/6/5 21:04 # @author :Mo # @function :file of", "= path_embedding + '/multi_label_word.bin' path_dataset = path_root +'/dataset' path_category = path_dataset + '/category2labels.json'", "path_embedding = path_out + 'data/embeddings' path_embedding_user_dict = path_embedding + '/user_dict.txt' path_embedding_random_char = path_embedding", "'/l2i_i2l.json' # classfiy multi labels 2021 path_multi_label = path_out + 'data/multi_label' path_multi_label_train =", "path_out + 'data/embeddings' path_embedding_user_dict = path_embedding + '/user_dict.txt' path_embedding_random_char = path_embedding + '/term_char.txt'", "+ '/tests.csv' path_multi_label_error = path_multi_label + '/error.csv' # 路径抽象层 path_label = path_multi_label_labels path_train", "'/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin =", "path_embedding + '/user_dict.txt' path_embedding_random_char = path_embedding + '/term_char.txt' path_embedding_random_word = path_embedding + '/term_word.txt'", "path_multi_label_tests path_edata = path_multi_label_error # 模型目录 path_model_dir = path_out + \"data/model\" # 语料地址", "'/') # train out path_out = path_root + \"/out/\" # path of embedding", "= os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\', '/') # train out path_out = path_root", "= path_root.replace('\\\\', '/') # train out path_out = path_root + \"/out/\" # path", "path_dataset + '/category2labels.json' path_l2i_i2l = path_dataset + '/l2i_i2l.json' # classfiy multi labels 2021", "+ '/l2i_i2l.json' # classfiy multi labels 2021 path_multi_label = path_out + 'data/multi_label' path_multi_label_train", "+ 'data/embeddings' path_embedding_user_dict = path_embedding + '/user_dict.txt' path_embedding_random_char = path_embedding + '/term_char.txt' path_embedding_random_word", "path_embedding + '/term_char.txt' path_embedding_random_word = path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec'", "+'/dataset' path_category = path_dataset + '/category2labels.json' path_l2i_i2l = path_dataset + '/l2i_i2l.json' # classfiy", "path_multi_label_error = path_multi_label + '/error.csv' # 路径抽象层 path_label = path_multi_label_labels path_train = path_multi_label_train", "= path_model_dir + '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters = path_model_dir + '/hyper_parameters.json' # embedding微调保存地址", "21:04 # @author :Mo # @function :file of path import os # 项目的根目录", "train out path_out = path_root + \"/out/\" # path of embedding path_embedding =", "# 超参数保存地址 path_hyper_parameters = path_model_dir + '/hyper_parameters.json' # embedding微调保存地址 path_fineture = path_model_dir +", "path_multi_label_tests = path_multi_label + '/tests.csv' path_multi_label_error = path_multi_label + '/error.csv' # 路径抽象层 path_label", "path_l2i_i2l = path_dataset + '/l2i_i2l.json' # classfiy multi labels 2021 path_multi_label = path_out", "path_embedding + '/multi_label_word.bin' path_dataset = path_root +'/dataset' path_category = path_dataset + '/category2labels.json' path_l2i_i2l", "path_model_dir + '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters = path_model_dir + '/hyper_parameters.json' # embedding微调保存地址 path_fineture", "# path of embedding path_embedding = path_out + 'data/embeddings' path_embedding_user_dict = path_embedding +", "+ '/error.csv' # 路径抽象层 path_label = path_multi_label_labels path_train = path_multi_label_train path_valid = path_multi_label_valid", "\"data/model\" # 语料地址 path_model = path_model_dir + '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters = path_model_dir", "'/labels.csv' path_multi_label_tests = path_multi_label + '/tests.csv' path_multi_label_error = path_multi_label + '/error.csv' # 路径抽象层", "path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin'", "path_embedding_random_char = path_embedding + '/term_char.txt' path_embedding_random_word = path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding", "path_multi_label + '/train.csv' path_multi_label_valid = path_multi_label + '/valid.csv' path_multi_label_labels = path_multi_label + '/labels.csv'", ":Mo # @function :file of path import os # 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__),", "# 路径抽象层 path_label = path_multi_label_labels path_train = path_multi_label_train path_valid = path_multi_label_valid path_tests =", "path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\', '/') # train out path_out =", "classfiy multi labels 2021 path_multi_label = path_out + 'data/multi_label' path_multi_label_train = path_multi_label +", "= path_multi_label_train path_valid = path_multi_label_valid path_tests = path_multi_label_tests path_edata = path_multi_label_error # 模型目录", ":file of path import os # 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root =", "path_multi_label_train path_valid = path_multi_label_valid path_tests = path_multi_label_tests path_edata = path_multi_label_error # 模型目录 path_model_dir", "= path_multi_label_tests path_edata = path_multi_label_error # 模型目录 path_model_dir = path_out + \"data/model\" #", "路径抽象层 path_label = path_multi_label_labels path_train = path_multi_label_train path_valid = path_multi_label_valid path_tests = path_multi_label_tests", "+ '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters = path_model_dir + '/hyper_parameters.json' # embedding微调保存地址 path_fineture =", "path_embedding_user_dict = path_embedding + '/user_dict.txt' path_embedding_random_char = path_embedding + '/term_char.txt' path_embedding_random_word = path_embedding", "'/term_char.txt' path_embedding_random_word = path_embedding + '/term_word.txt' path_embedding_vector_word2vec_char = path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word =", "'data/embeddings' path_embedding_user_dict = path_embedding + '/user_dict.txt' path_embedding_random_char = path_embedding + '/term_char.txt' path_embedding_random_word =", "# train out path_out = path_root + \"/out/\" # path of embedding path_embedding", "= path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding +", "'data/multi_label' path_multi_label_train = path_multi_label + '/train.csv' path_multi_label_valid = path_multi_label + '/valid.csv' path_multi_label_labels =", "out path_out = path_root + \"/out/\" # path of embedding path_embedding = path_out", "= path_out + \"data/model\" # 语料地址 path_model = path_model_dir + '/model_fast_text.h5' # 超参数保存地址", "# @author :Mo # @function :file of path import os # 项目的根目录 path_root", "path_embedding + '/multi_label_char.vec' path_embedding_vector_word2vec_word = path_embedding + '/multi_label_word.vec' path_embedding_vector_word2vec_char_bin = path_embedding + '/multi_label_char.bin'", "# classfiy multi labels 2021 path_multi_label = path_out + 'data/multi_label' path_multi_label_train = path_multi_label", "of path import os # 项目的根目录 path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) path_root = path_root.replace('\\\\',", "= path_embedding + '/user_dict.txt' path_embedding_random_char = path_embedding + '/term_char.txt' path_embedding_random_word = path_embedding +", "path_dataset + '/l2i_i2l.json' # classfiy multi labels 2021 path_multi_label = path_out + 'data/multi_label'", "path_root +'/dataset' path_category = path_dataset + '/category2labels.json' path_l2i_i2l = path_dataset + '/l2i_i2l.json' #", "'/error.csv' # 路径抽象层 path_label = path_multi_label_labels path_train = path_multi_label_train path_valid = path_multi_label_valid path_tests", "# 语料地址 path_model = path_model_dir + '/model_fast_text.h5' # 超参数保存地址 path_hyper_parameters = path_model_dir +", "path of embedding path_embedding = path_out + 'data/embeddings' path_embedding_user_dict = path_embedding + '/user_dict.txt'", "@author :Mo # @function :file of path import os # 项目的根目录 path_root =", "= path_multi_label + '/labels.csv' path_multi_label_tests = path_multi_label + '/tests.csv' path_multi_label_error = path_multi_label +", "UTF-8 -*- # !/usr/bin/python # @time :2019/6/5 21:04 # @author :Mo # @function", "'/multi_label_char.bin' path_embedding_vector_word2vec_word_bin = path_embedding + '/multi_label_word.bin' path_dataset = path_root +'/dataset' path_category = path_dataset" ]
[ "0 T = int(input()) for i in range(0,T): inp = input() if(inp ==", "res = 0 T = int(input()) for i in range(0,T): inp = input()", "= 0 T = int(input()) for i in range(0,T): inp = input() if(inp", "= input() if(inp == 'report'): print(res) else: inp_arr = inp.split(' ') res +=", "= int(input()) for i in range(0,T): inp = input() if(inp == 'report'): print(res)", "int(input()) for i in range(0,T): inp = input() if(inp == 'report'): print(res) else:", "for i in range(0,T): inp = input() if(inp == 'report'): print(res) else: inp_arr", "in range(0,T): inp = input() if(inp == 'report'): print(res) else: inp_arr = inp.split('", "inp = input() if(inp == 'report'): print(res) else: inp_arr = inp.split(' ') res", "range(0,T): inp = input() if(inp == 'report'): print(res) else: inp_arr = inp.split(' ')", "T = int(input()) for i in range(0,T): inp = input() if(inp == 'report'):", "i in range(0,T): inp = input() if(inp == 'report'): print(res) else: inp_arr =", "input() if(inp == 'report'): print(res) else: inp_arr = inp.split(' ') res += int(inp_arr[1])" ]
[ "retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key':", "client = get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT =", "= get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\",", "client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION =", "ClientSecretCredential from azure.keyvault.secrets import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\")", "env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential", "_credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri", "client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri)", "STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type':", "COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS = { 'api-version': '2020-06-30' } BLOB_CONTAINER", "\"\") def get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential)", "env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri,", "return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return client.get_secret(key).value", "COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\"", "azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id =", "env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id,", "retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR", "env from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\")", "return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION", "\"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS = {", "def get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential) def", "\"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\")", "COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY}", "= env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri):", "__tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name", "as env from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\",", "ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\"", "= \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS = { 'api-version': '2020-06-30'", "get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\")", "= env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS", "client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client =", "os import environ as env from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient", "env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\",", "\"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret", "SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY", "credential=_credential) def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY =", "= retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS", "\"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential =", "tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client", "kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR =", "= retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json',", "= ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri =", "= env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return", "{'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS = { 'api-version': '2020-06-30' } BLOB_CONTAINER = \"collateral\"", "environ as env from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient __tenant_id =", "= f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\")", "import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\",", "\"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS = { 'api-version': '2020-06-30' }", "= {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS = { 'api-version': '2020-06-30' } BLOB_CONTAINER =", "from azure.keyvault.secrets import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret", "import ClientSecretCredential from azure.keyvault.secrets import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\",", "__client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def", "<gh_stars>0 from os import environ as env from azure.identity import ClientSecretCredential from azure.keyvault.secrets", "from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id", "= env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name =", "def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\")", "f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return client.get_secret(key).value COG_SEARCH_KEY = retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT", "__client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential = ClientSecretCredential(", "= env.get(\"AZURE_CLIENT_SECRET\", \"\") __key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id,", "get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret ) return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key):", ") return SecretClient(vault_url=kv_uri, credential=_credential) def retreive_secret(key): kv_uri = f\"https://{__key_vault_name}.vault.azure.net\" client = get_kv_client(kv_uri) return", "from os import environ as env from azure.identity import ClientSecretCredential from azure.keyvault.secrets import", "retreive_secret(\"cog-search-admin-key\") STORAGE_CONN_STR = retreive_secret(\"storage-conn-string\") COG_SEARCH_END_POINT = env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS =", "azure.keyvault.secrets import SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret =", "env.get(\"AZURE_COG_SEARCH_END_POINT\", \"\") COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS =", "SecretClient __tenant_id = env.get(\"AZURE_TENANT_ID\", \"\") __client_id = env.get(\"AZURE_CLIENT_ID\", \"\") __client_secret = env.get(\"AZURE_CLIENT_SECRET\", \"\")", "COG_SEARCH_API_VERSION = \"?api-version=2020-06-30\" COG_SEARCH_API_HEADERS = {'Content-Type': 'application/json', 'api-key': COG_SEARCH_KEY} COG_SEARCH_API_PARAMS = { 'api-version':", "__key_vault_name = env.get(\"AZURE_KEYVAULT_NAME\", \"\") def get_kv_client(kv_uri): _credential = ClientSecretCredential( tenant_id=__tenant_id, client_id=__client_id, client_secret=__client_secret )", "import environ as env from azure.identity import ClientSecretCredential from azure.keyvault.secrets import SecretClient __tenant_id" ]
[ "django.urls import reverse from isimip_data.caveats.models import Caveat, Comment def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>')", "= client.post(url, { 'title': 'New Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur", "args=[1]) response = client.get(url) assert response.status_code == 200 def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>')", "'Send email' }) assert response.status_code == 302 assert len(mail.outbox) == 2 assert mail.outbox[0].subject", "assert response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers == { 'model': ['model3'] } def test_caveat_add_get(db,", "= client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email' })", "'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200 assert b'No email", "assert response.status_code == 200 def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response", "response = client.get(url) assert response.status_code == 200 def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat", "amet, consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS':", "'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' })", "[] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_caveat_send_post_error(db, client): client.login(username='admin',", "def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert", "from isimip_data.caveats.models import Caveat, Comment def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add')", "reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send':", "response.status_code == 200 def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email =", "'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(title='New Caveat').specifiers == { 'model':", "def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, {", "client.get(url) assert response.status_code == 200 def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add')", "mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc == [] assert mail.outbox[0].bcc", "flag was set before.' in response.content assert len(mail.outbox) == 0 def test_comment_send_post_back(db, client):", "client): client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email = False caveat.save() url = reverse('admin:caveats_caveat_send',", "'title': 'Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr', 'creator': 1,", "test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code", "0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '',", "0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0,", "0 def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url,", "'<EMAIL>\\<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200 assert b'No email can", "assert mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc == [] assert", "assert response.status_code == 302 assert len(mail.outbox) == 0 def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>')", "== 2 assert mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email ==", "password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message',", "'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers", "= reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>',", "'low', 'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000,", "reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back':", "'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS':", "client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.post(url, { 'title': 'New Caveat',", "1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0,", "client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.get(url) assert response.status_code == 200", "assert Caveat.objects.get(title='New Caveat').specifiers == { 'model': ['model'] } def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>')", "before.' in response.content assert len(mail.outbox) == 0 def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url", "reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url) assert response.status_code == 200 def test_caveat_send_post(db, client): client.login(username='admin',", "args=[1]) response = client.get(url) assert response.status_code == 200 def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>')", "client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message':", "been send, since the email flag was set before.' in response.content assert len(mail.outbox)", "= reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code == 200 def test_caveat_send_get(db, client):", "200 def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email = False comment.save()", "= Comment.objects.get(pk=1) comment.email = False comment.save() url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url,", "from django.core import mail from django.urls import reverse from isimip_data.caveats.models import Caveat, Comment", "'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS':", "flag was set before.' in response.content assert len(mail.outbox) == 0 def test_caveat_send_post_back(db, client):", "'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '',", "mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to", "client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code == 200", "client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject',", "'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>']", "'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302", "test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject':", "<filename>isimip_data/caveats/tests/test_admin.py<gh_stars>1-10 from django.conf import settings from django.core import mail from django.urls import reverse", "0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert", "client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url, { 'title': 'Caveat', 'description':", "'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email' }) assert response.status_code ==", "'_send': 'Send email' }) assert response.status_code == 302 assert len(mail.outbox) == 2 assert", "import mail from django.urls import reverse from isimip_data.caveats.models import Caveat, Comment def test_annotation_add_get(db,", "response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' })", "== 0 def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response =", "== [] assert mail.outbox[0].attachments == [] def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url =", "'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 302 assert len(mail.outbox) ==", "'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id':", "email can been send, since the email flag was set before.' in response.content", "'_back': 'Back' }) assert response.status_code == 302 assert len(mail.outbox) == 0 def test_comment_send_get(db,", "= False comment.save() url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject',", "client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message':", "dolor sit amet, consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model':", "response.status_code == 200 def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response", "email flag was set before.' in response.content assert len(mail.outbox) == 0 def test_comment_send_post_back(db,", "'title': 'New Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr', 'creator':", "response = client.get(url) assert response.status_code == 200 def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url", "'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers == { 'model': ['model3']", "assert response.status_code == 200 def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1])", "'', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000,", "client.get(url) assert response.status_code == 200 def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change',", "= client.get(url) assert response.status_code == 200 def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url =", "in response.content assert len(mail.outbox) == 0 def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url =", "ipsum dolor sit amet, consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new',", "was set before.' in response.content assert len(mail.outbox) == 0 def test_comment_send_post_back(db, client): client.login(username='admin',", "caveat.email = False caveat.save() url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject':", "password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email = False caveat.save() url = reverse('admin:caveats_caveat_send', args=[1]) response", "= reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url) assert response.status_code == 200 def test_caveat_send_post(db, client):", "assert mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>'] assert", "client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' }) assert response.status_code", "def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, {", "response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email'", "{ 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email' }) assert response.status_code", "200 def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url,", "mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url", "def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, {", "def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.post(url, { 'title':", "}) assert response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers == { 'model': ['model3'] } def", "'model': ['model3'] } def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response", "client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert response.status_code", "['<EMAIL>'] assert mail.outbox[0].cc == [] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == []", "assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>')", "def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.get(url) assert", "{ 'title': 'Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr', 'creator':", "reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code == 200 def test_caveat_send_get(db, client): client.login(username='admin',", "'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '',", "Caveat.objects.get(pk=1) caveat.email = False caveat.save() url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, {", "sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS':", "args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back'", "before.' in response.content assert len(mail.outbox) == 0 def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url", "b'No email can been send, since the email flag was set before.' in", "'_send': 'Send email' }) assert response.status_code == 200 assert b'No email can been", "test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.post(url, { 'title': 'New", "= client.get(url) assert response.status_code == 200 def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat =", "assert len(mail.outbox) == 0 def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1])", "test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.get(url) assert response.status_code", "email' }) assert response.status_code == 200 assert b'No email can been send, since", "'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200 assert", "{ 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code", "'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert len(mail.outbox)", "from django.conf import settings from django.core import mail from django.urls import reverse from", "response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers == { 'model': ['model3'] } def test_caveat_add_get(db, client):", "can been send, since the email flag was set before.' in response.content assert", "password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url) assert response.status_code == 200 def", "import reverse from isimip_data.caveats.models import Caveat, Comment def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url", "}) assert response.status_code == 302 assert len(mail.outbox) == 0 def test_comment_send_get(db, client): client.login(username='admin',", "== 302 assert Caveat.objects.get(pk=1).specifiers == { 'model': ['model3'] } def test_caveat_add_get(db, client): client.login(username='admin',", "302 assert Caveat.objects.get(title='New Caveat').specifiers == { 'model': ['model'] } def test_annotation_change_get(db, client): client.login(username='admin',", "'', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers ==", "'Back' }) assert response.status_code == 302 assert len(mail.outbox) == 0 def test_comment_send_get(db, client):", "the email flag was set before.' in response.content assert len(mail.outbox) == 0 def", "elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0,", "'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 302", "== 0 def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response =", "302 assert len(mail.outbox) == 2 assert mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body == 'Message'", "args=[1]) response = client.get(url) assert response.status_code == 200 def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>')", "settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc == []", "Comment.objects.get(pk=1) comment.email = False comment.save() url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, {", "since the email flag was set before.' in response.content assert len(mail.outbox) == 0", "'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS':", "'model': ['model'] } def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response", "== { 'model': ['model3'] } def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change',", "== 'Subject' assert mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to ==", "'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200 assert", "client.get(url) assert response.status_code == 200 def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send',", "response = client.post(url, { 'title': 'New Caveat', 'description': 'Lorem ipsum dolor sit amet,", "url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code == 200 def test_annotation_change_post(db,", "consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0,", "'severity': 'low', 'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS':", "response.status_code == 200 def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response", "= False caveat.save() url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject',", "'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS':", "'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code", "== 302 assert len(mail.outbox) == 2 assert mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body ==", "client): client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email = False comment.save() url = reverse('admin:caveats_comment_send',", "django.core import mail from django.urls import reverse from isimip_data.caveats.models import Caveat, Comment def", "Caveat.objects.get(title='New Caveat').specifiers == { 'model': ['model'] } def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url", "def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email = False comment.save() url", "== [] assert mail.outbox[0].attachments == [] def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url =", "'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id':", "password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.get(url) assert response.status_code == 200 def test_annotation_add_post(db,", "['model'] } def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response =", "url = reverse('admin:caveats_caveat_add') response = client.post(url, { 'title': 'New Caveat', 'description': 'Lorem ipsum", "mail.outbox[0].attachments == [] def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response", "'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200 assert b'No", "'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200 assert b'No email", "== [] def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response =", "= Caveat.objects.get(pk=1) caveat.email = False caveat.save() url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url,", "{ 'title': 'New Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr',", "test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email = False caveat.save() url =", "mail from django.urls import reverse from isimip_data.caveats.models import Caveat, Comment def test_annotation_add_get(db, client):", "email flag was set before.' in response.content assert len(mail.outbox) == 0 def test_caveat_send_post_back(db,", "200 def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.post(url, {", "'Subject' assert mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>']", "'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat':", "password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.get(url) assert response.status_code == 200 def", "0 def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url,", "url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code == 200 def test_caveat_send_get(db,", "302 assert Caveat.objects.get(pk=1).specifiers == { 'model': ['model3'] } def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>')", "client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert", "client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email' }) assert", "False comment.save() url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message':", "'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id':", "assert len(mail.outbox) == 0 def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1])", "mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to", "'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert response.status_code == 302", "'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 302 assert len(mail.outbox)", "import settings from django.core import mail from django.urls import reverse from isimip_data.caveats.models import", "amet, consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS':", "'Lorem ipsum dolor sit amet, consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status':", "== ['<EMAIL>'] assert mail.outbox[0].cc == [] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments ==", "302 assert len(mail.outbox) == 0 def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send',", "args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send", "response = client.get(url) assert response.status_code == 200 def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url", "= reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>',", "assert response.status_code == 200 def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email", "'<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert len(mail.outbox) == 0 def", "client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code ==", "assert response.status_code == 302 assert Caveat.objects.get(title='New Caveat').specifiers == { 'model': ['model'] } def", "client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url, { 'title': 'Caveat',", "url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients':", "mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url", "1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0,", "'', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '',", "set before.' in response.content assert len(mail.outbox) == 0 def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>')", "Caveat').specifiers == { 'model': ['model'] } def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url =", "{ 'model': ['model'] } def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1])", "== 200 def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email = False", "Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr', 'creator': 1, 'severity':", "test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email = False comment.save() url =", "= reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>',", "'<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 302 assert len(mail.outbox) == 2", "== 200 def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.post(url,", "'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200 assert b'No", "'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '',", "'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert len(mail.outbox)", "settings from django.core import mail from django.urls import reverse from isimip_data.caveats.models import Caveat,", "'' }) assert response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers == { 'model': ['model3'] }", "assert b'No email can been send, since the email flag was set before.'", "client.get(url) assert response.status_code == 200 def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1)", "'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat':", "url = reverse('admin:caveats_caveat_add') response = client.get(url) assert response.status_code == 200 def test_annotation_add_post(db, client):", "len(mail.outbox) == 0 def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response", "def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.get(url) assert response.status_code", "response.content assert len(mail.outbox) == 0 def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send',", "password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code == 200 def", "response = client.post(url, { 'title': 'Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur", "reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code == 200 def test_annotation_change_post(db, client): client.login(username='admin',", "= client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert", "} def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url)", "password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.post(url, { 'title': 'New Caveat', 'description': 'Lorem", "args=[1]) response = client.post(url, { 'title': 'Caveat', 'description': 'Lorem ipsum dolor sit amet,", "client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.get(url) assert response.status_code == 200", "url = reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url) assert response.status_code == 200 def test_caveat_send_post(db,", "password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message',", "[] def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url,", "'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat':", "assert mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc == [] assert mail.outbox[0].bcc == [] assert", "} def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url)", "response.status_code == 302 assert len(mail.outbox) == 0 def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url", "= reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url, { 'title': 'Caveat', 'description': 'Lorem ipsum dolor", "assert len(mail.outbox) == 2 assert mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body == 'Message' assert", "'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download':", "args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back'", "len(mail.outbox) == 0 def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response", "was set before.' in response.content assert len(mail.outbox) == 0 def test_caveat_send_post_back(db, client): client.login(username='admin',", "comment.email = False comment.save() url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject':", "len(mail.outbox) == 2 assert mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email", "client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject',", "response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email'", "def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert", "0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0,", "response.status_code == 302 assert len(mail.outbox) == 2 assert mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body", "test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url) assert response.status_code", "= client.get(url) assert response.status_code == 200 def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url =", "password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email = False comment.save() url = reverse('admin:caveats_comment_send', args=[1]) response", "reverse from isimip_data.caveats.models import Caveat, Comment def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url =", "'', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers == { 'model':", "assert response.status_code == 200 def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email", "assert mail.outbox[0].attachments == [] def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1])", "== 200 def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response =", "set before.' in response.content assert len(mail.outbox) == 0 def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>')", "'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200", "== ['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc == [] assert mail.outbox[0].bcc ==", "response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' })", "def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url, {", "reverse('admin:caveats_caveat_add') response = client.get(url) assert response.status_code == 200 def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>')", "import Caveat, Comment def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response =", "mail.outbox[0].cc == [] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_caveat_send_post_error(db,", "args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send", "assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>')", "Caveat, Comment def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.get(url)", "client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email = False caveat.save() url = reverse('admin:caveats_caveat_send', args=[1])", "response.content assert len(mail.outbox) == 0 def test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send',", "assert response.status_code == 200 assert b'No email can been send, since the email", "'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(title='New", "= client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' })", "client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.post(url, { 'title': 'New Caveat', 'description':", "== 302 assert len(mail.outbox) == 0 def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url =", "'' }) assert response.status_code == 302 assert Caveat.objects.get(title='New Caveat').specifiers == { 'model': ['model']", "assert mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert", "test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject':", "'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert", "2 assert mail.outbox[0].subject == 'Subject' assert mail.outbox[0].body == 'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL", "}) assert response.status_code == 302 assert Caveat.objects.get(title='New Caveat').specifiers == { 'model': ['model'] }", "reverse('admin:caveats_comment_send', args=[1]) response = client.get(url) assert response.status_code == 200 def test_comment_send_post(db, client): client.login(username='admin',", "from django.urls import reverse from isimip_data.caveats.models import Caveat, Comment def test_annotation_add_get(db, client): client.login(username='admin',", "'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert len(mail.outbox) ==", "'', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(title='New Caveat').specifiers", "sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS':", "response = client.get(url) assert response.status_code == 200 def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url", "url = reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url, { 'title': 'Caveat', 'description': 'Lorem ipsum", "mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc", "= client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' }) assert", "'Send email' }) assert response.status_code == 200 assert b'No email can been send,", "client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url) assert response.status_code == 200", "mail.outbox[0].attachments == [] def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response", "= reverse('admin:caveats_caveat_add') response = client.post(url, { 'title': 'New Caveat', 'description': 'Lorem ipsum dolor", "client.post(url, { 'title': 'New Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing", "= client.get(url) assert response.status_code == 200 def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url =", "password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url, { 'title': 'Caveat', 'description': 'Lorem", "[] assert mail.outbox[0].attachments == [] def test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send',", "200 assert b'No email can been send, since the email flag was set", "'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' }) assert response.status_code == 302", "'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '',", "def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, {", "client.get(url) assert response.status_code == 200 def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1)", "client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email = False comment.save() url = reverse('admin:caveats_comment_send', args=[1])", "== 200 def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response =", "= reverse('admin:caveats_comment_send', args=[1]) response = client.get(url) assert response.status_code == 200 def test_comment_send_post(db, client):", "= reverse('admin:caveats_caveat_add') response = client.get(url) assert response.status_code == 200 def test_annotation_add_post(db, client): client.login(username='admin',", "200 def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email = False caveat.save()", "sit amet, consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model3',", "200 def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url)", "== 0 def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response =", "'severity': 'low', 'status': 'new', 'specifiers_model': 'model', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS':", "response.status_code == 200 assert b'No email can been send, since the email flag", "'low', 'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000,", "'Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr', 'creator': 1, 'severity':", "response.status_code == 200 def test_annotation_add_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response =", "in response.content assert len(mail.outbox) == 0 def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url =", "url = reverse('admin:caveats_comment_send', args=[1]) response = client.get(url) assert response.status_code == 200 def test_comment_send_post(db,", "send, since the email flag was set before.' in response.content assert len(mail.outbox) ==", "reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url, { 'title': 'Caveat', 'description': 'Lorem ipsum dolor sit", "assert response.status_code == 200 def test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1])", "isimip_data.caveats.models import Caveat, Comment def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response", "'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200", "{ 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert response.status_code ==", "assert Caveat.objects.get(pk=1).specifiers == { 'model': ['model3'] } def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url", "'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 302 assert", "client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.get(url) assert response.status_code == 200 def", "consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0,", "reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back':", "'<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code == 200 assert b'No email can", "test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.get(url) assert response.status_code ==", "'', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '',", "comment.save() url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message',", "['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc == [] assert mail.outbox[0].bcc == []", "reverse('admin:caveats_caveat_add') response = client.post(url, { 'title': 'New Caveat', 'description': 'Lorem ipsum dolor sit", "= client.get(url) assert response.status_code == 200 def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment =", "== 'Message' assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to ==", "mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc == [] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments", "{ 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' }) assert response.status_code ==", "mail.outbox[0].cc == [] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_comment_send_post_error(db,", "'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS':", "'New Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr', 'creator': 1,", "== 200 assert b'No email can been send, since the email flag was", "{ 'model': ['model3'] } def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1])", "url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients':", "assert response.status_code == 302 assert len(mail.outbox) == 2 assert mail.outbox[0].subject == 'Subject' assert", "'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS':", "test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code", "assert len(mail.outbox) == 0 def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1])", "test_caveat_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject':", "assert mail.outbox[0].cc == [] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def", "'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr', 'creator': 1, 'severity': 'low',", "'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0, 'Caveat_figures-MIN_NUM_FORMS': 0, 'Caveat_figures-MAX_NUM_FORMS': 1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure':", "email' }) assert response.status_code == 302 assert len(mail.outbox) == 2 assert mail.outbox[0].subject ==", "'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send': 'Send email' }) assert response.status_code ==", "def test_caveat_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url) assert", "def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email = False caveat.save() url", "== [] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_caveat_send_post_error(db, client):", "assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>'] assert", "== [] def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response =", "reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_send':", "[] assert mail.outbox[0].attachments == [] def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send',", "response = client.get(url) assert response.status_code == 200 def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment", "response.status_code == 200 def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>') comment = Comment.objects.get(pk=1) comment.email =", "Caveat.objects.get(pk=1).specifiers == { 'model': ['model3'] } def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url =", "== settings.DEFAULT_FROM_EMAIL assert mail.outbox[0].to == ['<EMAIL>'] assert mail.outbox[1].to == ['<EMAIL>'] assert mail.outbox[0].cc ==", "0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code ==", "1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert", "1000, 'Caveat_figures-__prefix__-id': '', 'Caveat_figures-__prefix__-caveat': '', 'Caveat_figures-__prefix__-figure': '', 'Caveat_downloads-TOTAL_FORMS': 0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0,", "test_annotation_change_post(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response = client.post(url, { 'title':", "'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert len(mail.outbox) == 0", "args=[1]) response = client.get(url) assert response.status_code == 200 def test_comment_send_post(db, client): client.login(username='admin', password='<PASSWORD>')", "== 200 def test_caveat_send_post(db, client): client.login(username='admin', password='<PASSWORD>') caveat = Caveat.objects.get(pk=1) caveat.email = False", "'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(title='New Caveat').specifiers ==", "[] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_comment_send_post_error(db, client): client.login(username='admin',", "'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert len(mail.outbox) == 0", "'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(pk=1).specifiers == {", "'Message', 'recipients': '<EMAIL>\\<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert len(mail.outbox) ==", "0 def test_comment_send_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.get(url)", "reverse('admin:caveats_comment_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_send':", "assert mail.outbox[0].attachments == [] def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1])", "Comment def test_annotation_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_add') response = client.get(url) assert", "client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.get(url) assert response.status_code ==", "0, 'Caveat_downloads-INITIAL_FORMS': 0, 'Caveat_downloads-MIN_NUM_FORMS': 0, 'Caveat_downloads-MAX_NUM_FORMS': 1000, 'Caveat_downloads-__prefix__-id': '', 'Caveat_downloads-__prefix__-caveat': '', 'Caveat_downloads-__prefix__-download': ''", "response.status_code == 302 assert Caveat.objects.get(title='New Caveat').specifiers == { 'model': ['model'] } def test_annotation_change_get(db,", "== [] assert mail.outbox[0].bcc == [] assert mail.outbox[0].attachments == [] def test_comment_send_post_error(db, client):", "caveat = Caveat.objects.get(pk=1) caveat.email = False caveat.save() url = reverse('admin:caveats_caveat_send', args=[1]) response =", "len(mail.outbox) == 0 def test_comment_send_post_back(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response", "== { 'model': ['model'] } def test_annotation_change_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change',", "caveat.save() url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message': 'Message',", "[] def test_comment_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_comment_send', args=[1]) response = client.post(url,", "}) assert response.status_code == 302 assert len(mail.outbox) == 2 assert mail.outbox[0].subject == 'Subject'", "== 302 assert Caveat.objects.get(title='New Caveat').specifiers == { 'model': ['model'] } def test_annotation_change_get(db, client):", "False caveat.save() url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject': 'Subject', 'message':", "= reverse('admin:caveats_caveat_change', args=[1]) response = client.get(url) assert response.status_code == 200 def test_annotation_change_post(db, client):", "'Subject', 'message': 'Message', 'recipients': '<EMAIL>\\n<EMAIL>', '_back': 'Back' }) assert response.status_code == 302 assert", "elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model3', 'Caveat_figures-TOTAL_FORMS': 0, 'Caveat_figures-INITIAL_FORMS': 0,", "}) assert response.status_code == 200 assert b'No email can been send, since the", "= client.post(url, { 'title': 'Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing", "'', 'Caveat_downloads-__prefix__-download': '' }) assert response.status_code == 302 assert Caveat.objects.get(title='New Caveat').specifiers == {", "client.post(url, { 'title': 'Caveat', 'description': 'Lorem ipsum dolor sit amet, consetetur sadipscing elitr',", "test_caveat_send_post_error(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.post(url, { 'subject':", "sit amet, consetetur sadipscing elitr', 'creator': 1, 'severity': 'low', 'status': 'new', 'specifiers_model': 'model',", "django.conf import settings from django.core import mail from django.urls import reverse from isimip_data.caveats.models", "client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_send', args=[1]) response = client.get(url) assert response.status_code ==", "['model3'] } def test_caveat_add_get(db, client): client.login(username='admin', password='<PASSWORD>') url = reverse('admin:caveats_caveat_change', args=[1]) response =", "comment = Comment.objects.get(pk=1) comment.email = False comment.save() url = reverse('admin:caveats_comment_send', args=[1]) response =" ]
[ "self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod def setUpTestData(cls): cls.user = User.objects.create_user(username=cls.USER, password=cls.PASSWORD,", "rest_framework.authtoken.models import Token from rest_framework.test import APITestCase from users.models import User, GitHubProfile class", "BaseTestCase(APITestCase): USER = 'admin' PASSWORD = '<PASSWORD>' EMAIL = '<EMAIL>' def setUp(self): super(BaseTestCase,", "rest_framework.test import APITestCase from users.models import User, GitHubProfile class BaseTestCase(APITestCase): USER = 'admin'", "EMAIL = '<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod", "APITestCase from users.models import User, GitHubProfile class BaseTestCase(APITestCase): USER = 'admin' PASSWORD =", "def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod def setUpTestData(cls): cls.user", "setUp(self): super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod def setUpTestData(cls): cls.user =", "Token from rest_framework.test import APITestCase from users.models import User, GitHubProfile class BaseTestCase(APITestCase): USER", "USER = 'admin' PASSWORD = '<PASSWORD>' EMAIL = '<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp()", "'admin' PASSWORD = '<PASSWORD>' EMAIL = '<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token =", "from users.models import User, GitHubProfile class BaseTestCase(APITestCase): USER = 'admin' PASSWORD = '<PASSWORD>'", "GitHubProfile class BaseTestCase(APITestCase): USER = 'admin' PASSWORD = '<PASSWORD>' EMAIL = '<EMAIL>' def", "from rest_framework.authtoken.models import Token from rest_framework.test import APITestCase from users.models import User, GitHubProfile", "'<PASSWORD>' EMAIL = '<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}')", "from rest_framework.test import APITestCase from users.models import User, GitHubProfile class BaseTestCase(APITestCase): USER =", "import Token from rest_framework.test import APITestCase from users.models import User, GitHubProfile class BaseTestCase(APITestCase):", "User, GitHubProfile class BaseTestCase(APITestCase): USER = 'admin' PASSWORD = '<PASSWORD>' EMAIL = '<EMAIL>'", "= 'admin' PASSWORD = '<PASSWORD>' EMAIL = '<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token", "import APITestCase from users.models import User, GitHubProfile class BaseTestCase(APITestCase): USER = 'admin' PASSWORD", "import User, GitHubProfile class BaseTestCase(APITestCase): USER = 'admin' PASSWORD = '<PASSWORD>' EMAIL =", "user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod def setUpTestData(cls): cls.user = User.objects.create_user(username=cls.USER, password=cls.PASSWORD, email=cls.EMAIL)", "= '<PASSWORD>' EMAIL = '<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token", "users.models import User, GitHubProfile class BaseTestCase(APITestCase): USER = 'admin' PASSWORD = '<PASSWORD>' EMAIL", "= Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod def setUpTestData(cls): cls.user = User.objects.create_user(username=cls.USER, password=cls.PASSWORD, email=cls.EMAIL) GitHubProfile.objects.create(user=cls.user)", "'<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod def setUpTestData(cls):", "class BaseTestCase(APITestCase): USER = 'admin' PASSWORD = '<PASSWORD>' EMAIL = '<EMAIL>' def setUp(self):", "PASSWORD = '<PASSWORD>' EMAIL = '<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user)", "= '<EMAIL>' def setUp(self): super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod def", "super(BaseTestCase, self).setUp() user_authentication_token = Token.objects.create(user=self.user) self.client.credentials(HTTP_AUTHORIZATION=f'Token {user_authentication_token.key}') @classmethod def setUpTestData(cls): cls.user = User.objects.create_user(username=cls.USER," ]
[ "expected by sdg-build. df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') # We also rename", "inputs and schema, for JSON for Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema,", "JSON for Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations) # Validate", "translations=translations) # Validate the indicators. validation_successful = opensdg_output.validate() # If everything was valid,", "so we need to # add a \"data alteration\" function to correct it.", "in '_site/es', '_site/ru', and '_site/en') # 3. Untranslated: opensdg_output.execute() # (the build will", "# The data in this example is not exactly what sdg-build expects, so", "we need to convert it # to the \"tidy\" format expected by sdg-build.", "sdg import pandas as pd # Input data from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search'", "the Open SDG reporting platform. \"\"\" import os import sdg import pandas as", "converting it into the JSON output suitable for the Open SDG reporting platform.", "for the Open SDG reporting platform. \"\"\" import os import sdg import pandas", "resource ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map", "some columns to match what sdg-build expects. df = df.rename(columns={'year': 'Year', 'value': 'Value'})", "it. def data_alteration(df): # The data in this example is in a \"wide\"", "you can generate the build: # 1. Translated into a single language, like", "1. Translated into a single language, like English: opensdg_output.execute('en') # (the build will", "builds will appear in '_site/es', '_site/ru', and '_site/en') # 3. Untranslated: opensdg_output.execute() #", "is not exactly what sdg-build expects, so we need to # add a", "# (the build will appear in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else: raise Exception('There", "= sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations) # Validate the indicators. validation_successful = opensdg_output.validate()", "df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') # We also rename some columns to", "format, so we need to convert it # to the \"tidy\" format expected", "\"tidy\" format expected by sdg-build. df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') # We", "appear in '_site/es', '_site/ru', and '_site/en') # 3. Untranslated: opensdg_output.execute() # (the build", "# Use SDG Translations for translations tag = '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) #", "inputs into one list. inputs = [data_input] # Use a Prose.io file for", "'_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations for translations tag = '0.8.1'", "language, like English: opensdg_output.execute('en') # (the build will appear in '_site/en') # 2.", "[data_input] # Use a Prose.io file for the metadata schema. schema_path = os.path.join('tests',", "instance and converting it into the JSON output suitable for the Open SDG", "CKAN instance and converting it into the JSON output suitable for the Open", "these inputs and schema, for JSON for Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs,", "to the \"tidy\" format expected by sdg-build. df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value')", "in a \"wide\" format, so we need to convert it # to the", "expects. df = df.rename(columns={'year': 'Year', 'value': 'Value'}) return df data_input.add_data_alteration(data_alteration) # Combine the", "# Combine the inputs into one list. inputs = [data_input] # Use a", "indicators. validation_successful = opensdg_output.validate() # If everything was valid, perform the build. if", "suitable for the Open SDG reporting platform. \"\"\" import os import sdg import", "import os import sdg import pandas as pd # Input data from CKAN", "opensdg_output.validate() # If everything was valid, perform the build. if validation_successful: # Here", "this example is in a \"wide\" format, so we need to convert it", "everything was valid, perform the build. if validation_successful: # Here are several ways", "= opensdg_output.validate() # If everything was valid, perform the build. if validation_successful: #", "# The resource ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input = sdg.inputs.InputCkan(", "this example is not exactly what sdg-build expects, so we need to #", "the build: # 1. Translated into a single language, like English: opensdg_output.execute('en') #", "(the build will appear in '_site/en') # 2. Translated into several languages: opensdg_output.execute_per_language(['es',", "sdg-build. df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') # We also rename some columns", "return df data_input.add_data_alteration(data_alteration) # Combine the inputs into one list. inputs = [data_input]", "in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else: raise Exception('There were validation errors. See output", "'_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else: raise Exception('There were validation errors. See output above.')", "data_alteration(df): # The data in this example is in a \"wide\" format, so", "also rename some columns to match what sdg-build expects. df = df.rename(columns={'year': 'Year',", "We also rename some columns to match what sdg-build expects. df = df.rename(columns={'year':", "perform the build. if validation_successful: # Here are several ways you can generate", "indicator_id_map = { # The resource ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' }", "# to the \"tidy\" format expected by sdg-build. df = pd.melt(df, id_vars=['year'], var_name='gender',", "# 1. Translated into a single language, like English: opensdg_output.execute('en') # (the build", "opensdg_output.execute() # (the build will appear in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else: raise", "a \"data alteration\" function to correct it. def data_alteration(df): # The data in", "The resource ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input = sdg.inputs.InputCkan( endpoint=endpoint,", "need to convert it # to the \"tidy\" format expected by sdg-build. df", "the JSON output suitable for the Open SDG reporting platform. \"\"\" import os", "tag = '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\" from these inputs", "\"wide\" format, so we need to convert it # to the \"tidy\" format", "for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) #", "'_site/en') # 2. Translated into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three builds", "data from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { # The resource ID", "\"\"\" This is an example of importing data from a CKAN instance and", "translations tag = '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\" from these", "file for the metadata schema. schema_path = os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) #", "output_folder='_site', translations=translations) # Validate the indicators. validation_successful = opensdg_output.validate() # If everything was", "an example of importing data from a CKAN instance and converting it into", "sdg-build expects. df = df.rename(columns={'year': 'Year', 'value': 'Value'}) return df data_input.add_data_alteration(data_alteration) # Combine", "match what sdg-build expects. df = df.rename(columns={'year': 'Year', 'value': 'Value'}) return df data_input.add_data_alteration(data_alteration)", "# Validate the indicators. validation_successful = opensdg_output.validate() # If everything was valid, perform", "what sdg-build expects. df = df.rename(columns={'year': 'Year', 'value': 'Value'}) return df data_input.add_data_alteration(data_alteration) #", "can generate the build: # 1. Translated into a single language, like English:", "list. inputs = [data_input] # Use a Prose.io file for the metadata schema.", ") # The data in this example is not exactly what sdg-build expects,", "expects, so we need to # add a \"data alteration\" function to correct", "one list. inputs = [data_input] # Use a Prose.io file for the metadata", "= os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations for translations tag", "'_site/en') # 3. Untranslated: opensdg_output.execute() # (the build will appear in '_site') opensdg_output.execute_per_language(['es',", "# Here are several ways you can generate the build: # 1. Translated", "to correct it. def data_alteration(df): # The data in this example is in", "exactly what sdg-build expects, so we need to # add a \"data alteration\"", "= df.rename(columns={'year': 'Year', 'value': 'Value'}) return df data_input.add_data_alteration(data_alteration) # Combine the inputs into", "os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations for translations tag =", "the build. if validation_successful: # Here are several ways you can generate the", "'Year', 'value': 'Value'}) return df data_input.add_data_alteration(data_alteration) # Combine the inputs into one list.", "into one list. inputs = [data_input] # Use a Prose.io file for the", "and schema, for JSON for Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site',", "and '_site/en') # 3. Untranslated: opensdg_output.execute() # (the build will appear in '_site')", "schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations for translations tag = '0.8.1' translations", "build will appear in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else: raise Exception('There were validation", "for the metadata schema. schema_path = os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use", "like English: opensdg_output.execute('en') # (the build will appear in '_site/en') # 2. Translated", "schema, for JSON for Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations)", "function to correct it. def data_alteration(df): # The data in this example is", "# 2. Translated into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three builds will", "'_site/es', '_site/ru', and '_site/en') # 3. Untranslated: opensdg_output.execute() # (the build will appear", "\"output\" from these inputs and schema, for JSON for Open SDG. opensdg_output =", "Open SDG reporting platform. \"\"\" import os import sdg import pandas as pd", "English: opensdg_output.execute('en') # (the build will appear in '_site/en') # 2. Translated into", "Untranslated: opensdg_output.execute() # (the build will appear in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else:", "into a single language, like English: opensdg_output.execute('en') # (the build will appear in", "will appear in '_site/en') # 2. Translated into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en'])", "example of importing data from a CKAN instance and converting it into the", "languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three builds will appear in '_site/es', '_site/ru', and", "df.rename(columns={'year': 'Year', 'value': 'Value'}) return df data_input.add_data_alteration(data_alteration) # Combine the inputs into one", "# Use a Prose.io file for the metadata schema. schema_path = os.path.join('tests', '_prose.yml')", "to match what sdg-build expects. df = df.rename(columns={'year': 'Year', 'value': 'Value'}) return df", "what sdg-build expects, so we need to # add a \"data alteration\" function", "Translated into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three builds will appear in", "sdg-build expects, so we need to # add a \"data alteration\" function to", "in this example is not exactly what sdg-build expects, so we need to", "from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { # The resource ID for", "add a \"data alteration\" function to correct it. def data_alteration(df): # The data", "# The data in this example is in a \"wide\" format, so we", "# We also rename some columns to match what sdg-build expects. df =", "for translations tag = '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\" from", "sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) # The data in this example is not exactly", "convert it # to the \"tidy\" format expected by sdg-build. df = pd.melt(df,", "translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\" from these inputs and schema, for", "'0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\" from these inputs and schema,", "was valid, perform the build. if validation_successful: # Here are several ways you", "# 3. Untranslated: opensdg_output.execute() # (the build will appear in '_site') opensdg_output.execute_per_language(['es', 'ru',", "data in this example is not exactly what sdg-build expects, so we need", "sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations) # Validate the indicators. validation_successful = opensdg_output.validate() #", "(three builds will appear in '_site/es', '_site/ru', and '_site/en') # 3. Untranslated: opensdg_output.execute()", "} data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) # The data in this example", "df = df.rename(columns={'year': 'Year', 'value': 'Value'}) return df data_input.add_data_alteration(data_alteration) # Combine the inputs", "# (the build will appear in '_site/en') # 2. Translated into several languages:", "from a CKAN instance and converting it into the JSON output suitable for", "Use SDG Translations for translations tag = '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create", "indicator_id_map=indicator_id_map ) # The data in this example is not exactly what sdg-build", "a \"wide\" format, so we need to convert it # to the \"tidy\"", "= 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { # The resource ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b':", "'Value'}) return df data_input.add_data_alteration(data_alteration) # Combine the inputs into one list. inputs =", "several languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three builds will appear in '_site/es', '_site/ru',", "3. Untranslated: opensdg_output.execute() # (the build will appear in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en'])", "metadata schema. schema_path = os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations", "(the build will appear in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else: raise Exception('There were", "2. Translated into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three builds will appear", "df data_input.add_data_alteration(data_alteration) # Combine the inputs into one list. inputs = [data_input] #", "generate the build: # 1. Translated into a single language, like English: opensdg_output.execute('en')", "from these inputs and schema, for JSON for Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg(", "4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) # The data", "as pd # Input data from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = {", "var_name='gender', value_name='value') # We also rename some columns to match what sdg-build expects.", "value_name='value') # We also rename some columns to match what sdg-build expects. df", "the indicators. validation_successful = opensdg_output.validate() # If everything was valid, perform the build.", "in this example is in a \"wide\" format, so we need to convert", "schema. schema_path = os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations for", "columns to match what sdg-build expects. df = df.rename(columns={'year': 'Year', 'value': 'Value'}) return", "= { # The resource ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input", "\"data alteration\" function to correct it. def data_alteration(df): # The data in this", "inputs = [data_input] # Use a Prose.io file for the metadata schema. schema_path", "inputs=inputs, schema=schema, output_folder='_site', translations=translations) # Validate the indicators. validation_successful = opensdg_output.validate() # If", "pandas as pd # Input data from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map =", "sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations for translations tag = '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag)", "several ways you can generate the build: # 1. Translated into a single", "Translations for translations tag = '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\"", "= '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\" from these inputs and", "it # to the \"tidy\" format expected by sdg-build. df = pd.melt(df, id_vars=['year'],", "if validation_successful: # Here are several ways you can generate the build: #", "'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) # The data in", "will appear in '_site/es', '_site/ru', and '_site/en') # 3. Untranslated: opensdg_output.execute() # (the", "endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { # The resource ID for indicator 4.2.2.", "= sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\" from these inputs and schema, for JSON", "JSON output suitable for the Open SDG reporting platform. \"\"\" import os import", "alteration\" function to correct it. def data_alteration(df): # The data in this example", "are several ways you can generate the build: # 1. Translated into a", "SDG reporting platform. \"\"\" import os import sdg import pandas as pd #", "ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map )", "appear in '_site/en') # 2. Translated into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) #", "data in this example is in a \"wide\" format, so we need to", "validation_successful: # Here are several ways you can generate the build: # 1.", "If everything was valid, perform the build. if validation_successful: # Here are several", "schema_path = os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations for translations", "Translated into a single language, like English: opensdg_output.execute('en') # (the build will appear", "a CKAN instance and converting it into the JSON output suitable for the", "# If everything was valid, perform the build. if validation_successful: # Here are", "'ru', 'en']) # (three builds will appear in '_site/es', '_site/ru', and '_site/en') #", "is in a \"wide\" format, so we need to convert it # to", "'4-2-2' } data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) # The data in this", "to # add a \"data alteration\" function to correct it. def data_alteration(df): #", "CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { # The resource ID for indicator", "correct it. def data_alteration(df): # The data in this example is in a", "format expected by sdg-build. df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') # We also", "def data_alteration(df): # The data in this example is in a \"wide\" format,", "# Create an \"output\" from these inputs and schema, for JSON for Open", "ways you can generate the build: # 1. Translated into a single language,", "of importing data from a CKAN instance and converting it into the JSON", "The data in this example is not exactly what sdg-build expects, so we", "'en']) # (three builds will appear in '_site/es', '_site/ru', and '_site/en') # 3.", "data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) # The data in this example is", "so we need to convert it # to the \"tidy\" format expected by", "endpoint=endpoint, indicator_id_map=indicator_id_map ) # The data in this example is not exactly what", "Validate the indicators. validation_successful = opensdg_output.validate() # If everything was valid, perform the", "a Prose.io file for the metadata schema. schema_path = os.path.join('tests', '_prose.yml') schema =", "pd # Input data from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { #", "Create an \"output\" from these inputs and schema, for JSON for Open SDG.", "indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input = sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) # The", "opensdg_output.execute('en') # (the build will appear in '_site/en') # 2. Translated into several", "is an example of importing data from a CKAN instance and converting it", "valid, perform the build. if validation_successful: # Here are several ways you can", "output suitable for the Open SDG reporting platform. \"\"\" import os import sdg", "example is not exactly what sdg-build expects, so we need to # add", "need to # add a \"data alteration\" function to correct it. def data_alteration(df):", "import sdg import pandas as pd # Input data from CKAN endpoint =", "an \"output\" from these inputs and schema, for JSON for Open SDG. opensdg_output", "by sdg-build. df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') # We also rename some", "into the JSON output suitable for the Open SDG reporting platform. \"\"\" import", "= sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG Translations for translations tag = '0.8.1' translations =", "pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') # We also rename some columns to match what", "reporting platform. \"\"\" import os import sdg import pandas as pd # Input", "we need to # add a \"data alteration\" function to correct it. def", "'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { # The resource ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2'", "= pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') # We also rename some columns to match", "= [data_input] # Use a Prose.io file for the metadata schema. schema_path =", "The data in this example is in a \"wide\" format, so we need", "# (three builds will appear in '_site/es', '_site/ru', and '_site/en') # 3. Untranslated:", "rename some columns to match what sdg-build expects. df = df.rename(columns={'year': 'Year', 'value':", "to convert it # to the \"tidy\" format expected by sdg-build. df =", "data_input.add_data_alteration(data_alteration) # Combine the inputs into one list. inputs = [data_input] # Use", "Input data from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { # The resource", "Use a Prose.io file for the metadata schema. schema_path = os.path.join('tests', '_prose.yml') schema", "in '_site/en') # 2. Translated into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three", "build will appear in '_site/en') # 2. Translated into several languages: opensdg_output.execute_per_language(['es', 'ru',", "'_site/ru', and '_site/en') # 3. Untranslated: opensdg_output.execute() # (the build will appear in", "not exactly what sdg-build expects, so we need to # add a \"data", "Combine the inputs into one list. inputs = [data_input] # Use a Prose.io", "Here are several ways you can generate the build: # 1. Translated into", "data from a CKAN instance and converting it into the JSON output suitable", "into several languages: opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three builds will appear in '_site/es',", "it into the JSON output suitable for the Open SDG reporting platform. \"\"\"", "will appear in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else: raise Exception('There were validation errors.", "the \"tidy\" format expected by sdg-build. df = pd.melt(df, id_vars=['year'], var_name='gender', value_name='value') #", "id_vars=['year'], var_name='gender', value_name='value') # We also rename some columns to match what sdg-build", "sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an \"output\" from these inputs and schema, for JSON for", "build. if validation_successful: # Here are several ways you can generate the build:", "Prose.io file for the metadata schema. schema_path = os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path)", "appear in '_site') opensdg_output.execute_per_language(['es', 'ru', 'en']) else: raise Exception('There were validation errors. See", "\"\"\" import os import sdg import pandas as pd # Input data from", "the metadata schema. schema_path = os.path.join('tests', '_prose.yml') schema = sdg.schemas.SchemaInputOpenSdg(schema_path=schema_path) # Use SDG", "'value': 'Value'}) return df data_input.add_data_alteration(data_alteration) # Combine the inputs into one list. inputs", "# add a \"data alteration\" function to correct it. def data_alteration(df): # The", "the inputs into one list. inputs = [data_input] # Use a Prose.io file", "= sdg.inputs.InputCkan( endpoint=endpoint, indicator_id_map=indicator_id_map ) # The data in this example is not", "schema=schema, output_folder='_site', translations=translations) # Validate the indicators. validation_successful = opensdg_output.validate() # If everything", "a single language, like English: opensdg_output.execute('en') # (the build will appear in '_site/en')", "single language, like English: opensdg_output.execute('en') # (the build will appear in '_site/en') #", "and converting it into the JSON output suitable for the Open SDG reporting", "validation_successful = opensdg_output.validate() # If everything was valid, perform the build. if validation_successful:", "# Input data from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map = { # The", "import pandas as pd # Input data from CKAN endpoint = 'https://inventory.data.gov/api/action/datastore_search' indicator_id_map", "for Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations) # Validate the", "SDG Translations for translations tag = '0.8.1' translations = sdg.translations.TranslationInputSdgTranslations(tag=tag) # Create an", "build: # 1. Translated into a single language, like English: opensdg_output.execute('en') # (the", "SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations) # Validate the indicators. validation_successful", "This is an example of importing data from a CKAN instance and converting", "{ # The resource ID for indicator 4.2.2. 'f78445b3-e017-43b2-857f-b39d2004546b': '4-2-2' } data_input =", "opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations) # Validate the indicators. validation_successful =", "os import sdg import pandas as pd # Input data from CKAN endpoint", "opensdg_output.execute_per_language(['es', 'ru', 'en']) # (three builds will appear in '_site/es', '_site/ru', and '_site/en')", "importing data from a CKAN instance and converting it into the JSON output", "platform. \"\"\" import os import sdg import pandas as pd # Input data", "example is in a \"wide\" format, so we need to convert it #", "Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations) # Validate the indicators.", "for JSON for Open SDG. opensdg_output = sdg.outputs.OutputOpenSdg( inputs=inputs, schema=schema, output_folder='_site', translations=translations) #" ]
[ "= ymax YbottomOrigin = ymax - low_resolution lowres_cells = [] exception_cells = []", "#assign veh_avail and block_groups the same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx = []", "= grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] =", "= Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs = point_latlon.to_crs(city_crs) poly_crs =", "pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon =", "blocks, block_groups): #blocks first, for simple population blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0) blocks_weights", "Ybottom), (XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs is not None: if not", "side # END INTRO actual code def summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if total_pop", "YtopOrigin = ymax YbottomOrigin = ymax - low_resolution lowres_cells = [] exception_cells =", "= grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] = centroid.x for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']: points.loc[idx,", "= 1000 #m to a side # END INTRO actual code def summarize_veh_avail(row):", "bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this is the are within which the grid will", "#INTRO - need to edit values here for new city deployment data_source =", "= xmin + low_resolution YtopOrigin = ymax YbottomOrigin = ymax - low_resolution lowres_cells", "None: for exception_cell in exception_cells: highres_cells += build_grid(exception_cell, high_resolution) return lowres_cells + highres_cells", "32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry", "# required for MAUP: https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import pandas as pd import", "crs = 4326) point_crs = point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry =", "build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs =", "new city deployment data_source = \"census\" #\"census\" or \"ghsl\" city_crs = 32712 blocks_gdf_crs", "1000, exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid(", "/ total_pop pct_twopluscars = 1 - pct_carfree - pct_onecar return pct_carfree, pct_onecar, pct_twopluscars", "maup.normalize(bg_weights, level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean',", "block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs", "#if no population, assume all 0 households have 2 cars pct_carfree = (int(row['B25044_003E'])", "= YbottomOrigin for j in range(rows): cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin,", "= pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs =", "block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs)", "points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] = centroid.x for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']: points.loc[idx, col] =", "aggregate_by='mean', ) return grid #clip blocks and block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs)", "point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs = point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs", "= \"census\" #\"census\" or \"ghsl\" city_crs = 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs =", "#this is the are within which the grid will be higher-resolution point =", "maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0) columns = ['pct_carfree',", "block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells", "1000 #m to a side # END INTRO actual code def summarize_veh_avail(row): total_pop", "to a side # END INTRO actual code def summarize_veh_avail(row): total_pop = int(row['B25044_001E'])", "block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx = [] for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index =", "level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', )", "blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'],", "= pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs =", "INTRO actual code def summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if total_pop < 1: return", "bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces,", "= gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs =", "2 cars pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop pct_onecar = (int(row['B25044_004E']) +", "/ low_resolution)) XleftOrigin = xmin XrightOrigin = xmin + low_resolution YtopOrigin = ymax", "0,0,1 #if no population, assume all 0 households have 2 cars pct_carfree =", "(XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs is not", "/ total_pop pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop pct_twopluscars = 1 -", "and block_groups the same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx = [] for bgidx", "groups for car ownership bg_pieces = maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights", "be higher-resolution point = Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs =", "Ytop = YtopOrigin Ybottom = YbottomOrigin for j in range(rows): cell = Polygon([(XleftOrigin,", "veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs", "1: return 0,0,1 #if no population, assume all 0 households have 2 cars", "block_groups_gdf_crs.GEOID newidx = [] for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx for", "pd import numpy as np import shapely import shapely.geometry from shapely.geometry import Polygon,", "range(rows): cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell =", "total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union,", "pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank you", "- pct_carfree - pct_onecar return pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None):", "bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx for bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar,", "= gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs = point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs =", "< 1: return 0,0,1 #if no population, assume all 0 households have 2", "YtopOrigin Ybottom = YbottomOrigin for j in range(rows): cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin,", "grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon", "in range(rows): cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell", "not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop = Ytop - low_resolution Ybottom", "centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] = centroid.x for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']:", "blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, )", "4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this is the are within which", "np import shapely import shapely.geometry from shapely.geometry import Polygon, Point from tqdm import", "bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define", "(https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) / low_resolution)) cols = int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin =", "= populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon =", "Polygon, Point from tqdm import tqdm import maup import os #INTRO - need", "return grid #clip blocks and block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs =", "grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326)", "grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] = idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon']", "block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and", "None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop = Ytop -", "+ highres_cells def populate_grid(grid, blocks, block_groups): #blocks first, for simple population blocks_pieces =", "[] for i in range(cols): Ytop = YtopOrigin Ybottom = YbottomOrigin for j", "pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank you Faraz", "crs = 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this is the are", "= grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for idx in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id']", "exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin)", "shapely import shapely.geometry from shapely.geometry import Polygon, Point from tqdm import tqdm import", "pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree", "maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then block groups for car ownership bg_pieces =", "crs=city_crs) high_res = 250 #m to a side low_res = 1000 #m to", "for simple population blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights", "idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] = centroid.x for col in", "population blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights,", "import pandas as pd import numpy as np import shapely import shapely.geometry from", "1 - pct_carfree - pct_onecar return pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None,", ") #then block groups for car ownership bg_pieces = maup.intersections(block_groups, grid) bg_weights =", "cars pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E']))", "for exception_cell in exception_cells: highres_cells += build_grid(exception_cell, high_resolution) return lowres_cells + highres_cells def", "grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs,", "END INTRO actual code def summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if total_pop < 1:", "= idx points.loc[idx,'id'] = idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] =", "if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] = idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat']", "= idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] = centroid.x for col", "= [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this", "point = Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs = point_latlon.to_crs(city_crs) poly_crs", "= point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res = 250 #m to", "\"census\" #\"census\" or \"ghsl\" city_crs = 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs)", "shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this is the", "grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then block groups for car ownership", "blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then block", "import shapely import shapely.geometry from shapely.geometry import Polygon, Point from tqdm import tqdm", "250 #m to a side low_res = 1000 #m to a side #", "= newidx for bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop =", "= [] for i in range(cols): Ytop = YtopOrigin Ybottom = YbottomOrigin for", "+ low_resolution highres_cells = [] if exception_gdf_crs is not None: for exception_cell in", "cols = int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin = xmin XrightOrigin = xmin + low_resolution", "else: lowres_cells.append(cell) Ytop = Ytop - low_resolution Ybottom = Ybottom - low_resolution XleftOrigin", "to a side low_res = 1000 #m to a side # END INTRO", "= (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop pct_twopluscars = 1 - pct_carfree - pct_onecar", "gpd.options.use_pygeos = False import pandas as pd import numpy as np import shapely", "city deployment data_source = \"census\" #\"census\" or \"ghsl\" city_crs = 32712 blocks_gdf_crs =", "columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return", "= [poly_crs], crs=city_crs) high_res = 250 #m to a side low_res = 1000", "exception_cells: highres_cells += build_grid(exception_cell, high_resolution) return lowres_cells + highres_cells def populate_grid(grid, blocks, block_groups):", "= gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [", "from tqdm import tqdm import maup import os #INTRO - need to edit", "from shapely.geometry import Polygon, Point from tqdm import tqdm import maup import os", "side low_res = 1000 #m to a side # END INTRO actual code", "gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens']", "= centroid.x for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']: points.loc[idx, col] = grid_pop_gdf_latlon.loc[idx, col] points.to_csv('pop_points.csv') grid_pop_gdf_latlon.to_file('grid_pop.geojson',driver='GeoJSON')", "not None: for exception_cell in exception_cells: highres_cells += build_grid(exception_cell, high_resolution) return lowres_cells +", "high_res = 250 #m to a side low_res = 1000 #m to a", "= maup.normalize(bg_weights, level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights,", "# END INTRO actual code def summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if total_pop <", "int(row['B25044_001E']) if total_pop < 1: return 0,0,1 #if no population, assume all 0", "gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res = 250 #m to a side low_res =", "build_grid(exception_cell, high_resolution) return lowres_cells + highres_cells def populate_grid(grid, blocks, block_groups): #blocks first, for", "if exception_gdf_crs is not None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell)", "tqdm import maup import os #INTRO - need to edit values here for", "['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return grid #clip", "grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for idx in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] =", "int(np.ceil((ymax-ymin) / low_resolution)) cols = int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin = xmin XrightOrigin =", "False import pandas as pd import numpy as np import shapely import shapely.geometry", "= [] exception_cells = [] for i in range(cols): Ytop = YtopOrigin Ybottom", "for i in range(cols): Ytop = YtopOrigin Ybottom = YbottomOrigin for j in", "if total_pop < 1: return 0,0,1 #if no population, assume all 0 households", "blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then block groups for car ownership bg_pieces = maup.intersections(block_groups,", "+= build_grid(exception_cell, high_resolution) return lowres_cells + highres_cells def populate_grid(grid, blocks, block_groups): #blocks first,", "XrightOrigin = xmin + low_resolution YtopOrigin = ymax YbottomOrigin = ymax - low_resolution", "= pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs,", "a side # END INTRO actual code def summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if", "/ low_resolution)) cols = int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin = xmin XrightOrigin = xmin", "= block_groups_gdf_crs.GEOID newidx = [] for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx", "exception_gdf_crs is not None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop", "= 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon =", "no population, assume all 0 households have 2 cars pct_carfree = (int(row['B25044_003E']) +", "4326) point_crs = point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs)", "np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] = idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y", "exception_cell in exception_cells: highres_cells += build_grid(exception_cell, high_resolution) return lowres_cells + highres_cells def populate_grid(grid,", "in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx for bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars", "else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop = Ytop - low_resolution Ybottom = Ybottom -", "numpy as np import shapely import shapely.geometry from shapely.geometry import Polygon, Point from", "newidx.append(bgidx[9:]) veh_avail.index = newidx for bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx])", "Ybottom = YbottomOrigin for j in range(rows): cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop),", "#clip blocks and block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs)", "geopandas as gpd # required for MAUP: https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import pandas", "low_resolution XleftOrigin = XleftOrigin + low_resolution XrightOrigin = XrightOrigin + low_resolution highres_cells =", "= maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then block groups", "bounds_gdf_crs) #assign veh_avail and block_groups the same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx =", "total_pop < 1: return 0,0,1 #if no population, assume all 0 households have", "= int(np.ceil((ymax-ymin) / low_resolution)) cols = int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin = xmin XrightOrigin", "bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this is the are within which the", "= (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop", "exception_cells = [] for i in range(cols): Ytop = YtopOrigin Ybottom = YbottomOrigin", "gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and block_groups the same index", "pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] =", "newidx = [] for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx for bgidx", "grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for idx in grid_pop_gdf_latlon.index: if not", "point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res = 250", "crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] =", "pct_twopluscars = 1 - pct_carfree - pct_onecar return pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs,", "rows = int(np.ceil((ymax-ymin) / low_resolution)) cols = int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin = xmin", "the are within which the grid will be higher-resolution point = Point(-71.411479,41.823544) point_latlon", "(XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs is not None: if", "in exception_cells: highres_cells += build_grid(exception_cell, high_resolution) return lowres_cells + highres_cells def populate_grid(grid, blocks,", "grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars']", "lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop = Ytop - low_resolution Ybottom = Ybottom", "same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx = [] for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:])", "higher-resolution point = Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs = point_latlon.to_crs(city_crs)", "= 250 #m to a side low_res = 1000 #m to a side", "grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area", "here for new city deployment data_source = \"census\" #\"census\" or \"ghsl\" city_crs =", "block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame()", "weights=blocks_weights, ) #then block groups for car ownership bg_pieces = maup.intersections(block_groups, grid) bg_weights", "return 0,0,1 #if no population, assume all 0 households have 2 cars pct_carfree", "for MAUP: https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import pandas as pd import numpy as", "= False import pandas as pd import numpy as np import shapely import", "= XleftOrigin + low_resolution XrightOrigin = XrightOrigin + low_resolution highres_cells = [] if", "a side low_res = 1000 #m to a side # END INTRO actual", "def summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if total_pop < 1: return 0,0,1 #if no", "(int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop pct_twopluscars = 1 - pct_carfree - pct_onecar return", "XleftOrigin + low_resolution XrightOrigin = XrightOrigin + low_resolution highres_cells = [] if exception_gdf_crs", "point_crs = point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res", "in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop", "block_groups): #blocks first, for simple population blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0) blocks_weights =", "pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop pct_twopluscars = 1 - pct_carfree -", "import maup import os #INTRO - need to edit values here for new", "build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows", "import Polygon, Point from tqdm import tqdm import maup import os #INTRO -", "XrightOrigin + low_resolution highres_cells = [] if exception_gdf_crs is not None: for exception_cell", "[poly_crs], crs=city_crs) high_res = 250 #m to a side low_res = 1000 #m", "index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx = [] for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index", "veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx for bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars =", "pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250)", "i in range(cols): Ytop = YtopOrigin Ybottom = YbottomOrigin for j in range(rows):", "[] exception_cells = [] for i in range(cols): Ytop = YtopOrigin Ybottom =", "you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) / low_resolution)) cols = int(np.ceil((xmax-xmin) / low_resolution))", "deployment data_source = \"census\" #\"census\" or \"ghsl\" city_crs = 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs)", "+ low_resolution XrightOrigin = XrightOrigin + low_resolution highres_cells = [] if exception_gdf_crs is", "= total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells =", "blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry =", "not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] = idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] =", "points.loc[idx,'lon'] = centroid.x for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']: points.loc[idx, col] = grid_pop_gdf_latlon.loc[idx, col] points.to_csv('pop_points.csv')", "= int(row['B25044_001E']) if total_pop < 1: return 0,0,1 #if no population, assume all", "ymax - low_resolution lowres_cells = [] exception_cells = [] for i in range(cols):", "= maup.intersections(blocks, grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10']", "simple population blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights =", "pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar']", "as gpd # required for MAUP: https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import pandas as", "import geopandas as gpd # required for MAUP: https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import", "block groups for car ownership bg_pieces = maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum()", "Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs", "low_resolution lowres_cells = [] exception_cells = [] for i in range(cols): Ytop =", "= gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res = 250 #m to a side low_res", "xmin + low_resolution YtopOrigin = ymax YbottomOrigin = ymax - low_resolution lowres_cells =", "int(row['B25044_011E'])) / total_pop pct_twopluscars = 1 - pct_carfree - pct_onecar return pct_carfree, pct_onecar,", "population, assume all 0 households have 2 cars pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E']))", "https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import pandas as pd import numpy as np import", "groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and block_groups", "grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for idx", "veh_avail.index = newidx for bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop", "import numpy as np import shapely import shapely.geometry from shapely.geometry import Polygon, Point", "low_resolution highres_cells = [] if exception_gdf_crs is not None: for exception_cell in exception_cells:", "grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate(", "'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return grid #clip blocks", "MAUP: https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import pandas as pd import numpy as np", "bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] =", "area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate( blocks_pieces,", "data_source = \"census\" #\"census\" or \"ghsl\" city_crs = 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs", "= grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for idx in", "250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs,", "lowres_cells = [] exception_cells = [] for i in range(cols): Ytop = YtopOrigin", "block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree']", "exception_cells.append(cell) else: lowres_cells.append(cell) Ytop = Ytop - low_resolution Ybottom = Ybottom - low_resolution", "cell.intersection(bounds_poly_crs) if exception_gdf_crs is not None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else:", "actual code def summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if total_pop < 1: return 0,0,1", "poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res = 250 #m", "within which the grid will be higher-resolution point = Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point],", "gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)],", "maup.intersections(blocks, grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10'] =", "bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and block_groups the same index block_groups_gdf_crs.index", "def populate_grid(grid, blocks, block_groups): #blocks first, for simple population blocks_pieces = maup.intersections(blocks, grid,", "bg_pieces = maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0) columns", "grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points", "maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then block groups for", "car ownership bg_pieces = maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights,", "gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs = point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry", "populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326)", "= 1 - pct_carfree - pct_onecar return pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution,", "bg_weights = maup.normalize(bg_weights, level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces, block_groups[columns],", "grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10']", "pct_carfree - pct_onecar return pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax", "= bounds_poly_crs.bounds # thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) / low_resolution)) cols", "- low_resolution Ybottom = Ybottom - low_resolution XleftOrigin = XleftOrigin + low_resolution XrightOrigin", "total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars']", "for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx for bgidx in block_groups_gdf_crs.index: pct_carfree,", "XrightOrigin = XrightOrigin + low_resolution highres_cells = [] if exception_gdf_crs is not None:", "#\"census\" or \"ghsl\" city_crs = 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail", "= YtopOrigin Ybottom = YbottomOrigin for j in range(rows): cell = Polygon([(XleftOrigin, Ytop),", "for bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop']", "total_pop = int(row['B25044_001E']) if total_pop < 1: return 0,0,1 #if no population, assume", "is not None: for exception_cell in exception_cells: highres_cells += build_grid(exception_cell, high_resolution) return lowres_cells", "populate_grid(grid, blocks, block_groups): #blocks first, for simple population blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0)", "= maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0) columns =", "= Ybottom - low_resolution XleftOrigin = XleftOrigin + low_resolution XrightOrigin = XrightOrigin +", "return pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds #", "\"ghsl\" city_crs = 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,]", "- pct_onecar return pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax =", "pandas as pd import numpy as np import shapely import shapely.geometry from shapely.geometry", "high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) /", "= summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] =", "#define exception polygon #this is the are within which the grid will be", "is the are within which the grid will be higher-resolution point = Point(-71.411479,41.823544)", "= blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights,", "points = pd.DataFrame() for idx in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx", "tqdm import tqdm import maup import os #INTRO - need to edit values", "low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows =", "[] for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx for bgidx in block_groups_gdf_crs.index:", "exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res = 250 #m to a side", "ymax YbottomOrigin = ymax - low_resolution lowres_cells = [] exception_cells = [] for", "are within which the grid will be higher-resolution point = Point(-71.411479,41.823544) point_latlon =", "all 0 households have 2 cars pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop", "Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs is", "highres_cells def populate_grid(grid, blocks, block_groups): #blocks first, for simple population blocks_pieces = maup.intersections(blocks,", "as pd import numpy as np import shapely import shapely.geometry from shapely.geometry import", "block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and block_groups the same index block_groups_gdf_crs.index =", "bounds_poly_crs.bounds # thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) / low_resolution)) cols =", "= int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin = xmin XrightOrigin = xmin + low_resolution YtopOrigin", "grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for idx in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']):", "grid[columns] = maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return grid #clip blocks and", "and block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail", "cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs is not None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else:", "which the grid will be higher-resolution point = Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs", "or \"ghsl\" city_crs = 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail =", "return lowres_cells + highres_cells def populate_grid(grid, blocks, block_groups): #blocks first, for simple population", "bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns]", "range(cols): Ytop = YtopOrigin Ybottom = YbottomOrigin for j in range(rows): cell =", "= gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, )", "Ybottom = Ybottom - low_resolution XleftOrigin = XleftOrigin + low_resolution XrightOrigin = XrightOrigin", "= grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs, blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] /", "low_res = 1000 #m to a side # END INTRO actual code def", "os #INTRO - need to edit values here for new city deployment data_source", "- low_resolution XleftOrigin = XleftOrigin + low_resolution XrightOrigin = XrightOrigin + low_resolution highres_cells", "Ybottom - low_resolution XleftOrigin = XleftOrigin + low_resolution XrightOrigin = XrightOrigin + low_resolution", "grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] = centroid.x for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']: points.loc[idx, col]", "= cell.intersection(bounds_poly_crs) if exception_gdf_crs is not None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell)", "= float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] =", "low_resolution Ybottom = Ybottom - low_resolution XleftOrigin = XleftOrigin + low_resolution XrightOrigin =", "points.loc[idx,'id'] = idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] = centroid.x for", "high_resolution) return lowres_cells + highres_cells def populate_grid(grid, blocks, block_groups): #blocks first, for simple", "Ytop = Ytop - low_resolution Ybottom = Ybottom - low_resolution XleftOrigin = XleftOrigin", "code def summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if total_pop < 1: return 0,0,1 #if", "maup import os #INTRO - need to edit values here for new city", "if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop = Ytop - low_resolution", "exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs = populate_grid( grid_gdf_crs,", "import tqdm import maup import os #INTRO - need to edit values here", "as np import shapely import shapely.geometry from shapely.geometry import Polygon, Point from tqdm", "in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] = idx centroid =", "gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and block_groups the same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx", "gpd # required for MAUP: https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import pandas as pd", "blocks_gdf_crs, block_groups_gdf_crs, ) grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points =", "highres_cells += build_grid(exception_cell, high_resolution) return lowres_cells + highres_cells def populate_grid(grid, blocks, block_groups): #blocks", "#then block groups for car ownership bg_pieces = maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid,", "shapely.geometry import Polygon, Point from tqdm import tqdm import maup import os #INTRO", "YbottomOrigin for j in range(rows): cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom),", "lowres_cells + highres_cells def populate_grid(grid, blocks, block_groups): #blocks first, for simple population blocks_pieces", "xmin XrightOrigin = xmin + low_resolution YtopOrigin = ymax YbottomOrigin = ymax -", "Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs is not None:", "Point from tqdm import tqdm import maup import os #INTRO - need to", "for car ownership bg_pieces = maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights =", "maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return grid #clip blocks and block groups", "pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs)", "(int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop pct_twopluscars", "will be higher-resolution point = Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs", "= 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this is the are within", "+ low_resolution YtopOrigin = ymax YbottomOrigin = ymax - low_resolution lowres_cells = []", "(XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs is not None: if not cell.intersects(exception_gdf_crs.unary_union):", "pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank", "idx points.loc[idx,'id'] = idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid points.loc[idx,'lat'] = centroid.y points.loc[idx,'lon'] = centroid.x", "XleftOrigin = xmin XrightOrigin = xmin + low_resolution YtopOrigin = ymax YbottomOrigin =", "newidx for bgidx in block_groups_gdf_crs.index: pct_carfree, pct_onecar, pct_twopluscars = summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E'])", "blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0) grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then", "Ytop - low_resolution Ybottom = Ybottom - low_resolution XleftOrigin = XleftOrigin + low_resolution", "block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return grid #clip blocks and block groups blocks_gdf_crs =", "city_crs = 32712 blocks_gdf_crs = gpd.read_file('prep_pop/tabblock2010_04_pophu.zip').to_crs(city_crs) block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon", "0 households have 2 cars pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop pct_onecar", "for j in range(rows): cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin,", "not None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop = Ytop", "cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs)", "pct_onecar return pct_carfree, pct_onecar, pct_twopluscars def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds", "low_resolution)) XleftOrigin = xmin XrightOrigin = xmin + low_resolution YtopOrigin = ymax YbottomOrigin", "point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res = 250 #m to a", "pd.DataFrame() for idx in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] =", "total_pop pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop pct_twopluscars = 1 - pct_carfree", "block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000,", "ownership bg_pieces = maup.intersections(block_groups, grid) bg_weights = grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0)", "def build_grid(bounds_poly_crs, low_resolution, exception_gdf_crs=None, high_resolution=None): xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas)", "= 4326) point_crs = point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs],", "required for MAUP: https://github.com/geopandas/geopandas/issues/2199 gpd.options.use_pygeos = False import pandas as pd import numpy", "= point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union exception_gdf_crs = gpd.GeoDataFrame(geometry = [poly_crs], crs=city_crs) high_res =", ") grid_pop_gdf_crs['pop_dens'] = grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for", "= maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return grid #clip blocks and block", "= Ytop - low_resolution Ybottom = Ybottom - low_resolution XleftOrigin = XleftOrigin +", "Ybottom)]) cell = cell.intersection(bounds_poly_crs) if exception_gdf_crs is not None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell)", "[ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this is", ") return grid #clip blocks and block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs", "polygon #this is the are within which the grid will be higher-resolution point", "pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) /", "+ int(row['B25044_010E'])) / total_pop pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop pct_twopluscars =", "block_groups the same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx = [] for bgidx in", "if exception_gdf_crs is not None: for exception_cell in exception_cells: highres_cells += build_grid(exception_cell, high_resolution)", "= ymax - low_resolution lowres_cells = [] exception_cells = [] for i in", "= centroid.y points.loc[idx,'lon'] = centroid.x for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']: points.loc[idx, col] = grid_pop_gdf_latlon.loc[idx,", "int(row['B25044_010E'])) / total_pop pct_onecar = (int(row['B25044_004E']) + int(row['B25044_011E'])) / total_pop pct_twopluscars = 1", "# thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) / low_resolution)) cols = int(np.ceil((xmax-xmin)", "need to edit values here for new city deployment data_source = \"census\" #\"census\"", "summarize_veh_avail(row): total_pop = int(row['B25044_001E']) if total_pop < 1: return 0,0,1 #if no population,", "blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum() blocks_weights = maup.normalize(blocks_weights, level=0)", "veh_avail and block_groups the same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx = [] for", "Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326) point_crs = point_latlon.to_crs(city_crs) poly_crs = point_crs.buffer(1000).unary_union", "thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) / low_resolution)) cols = int(np.ceil((xmax-xmin) /", "= xmin XrightOrigin = xmin + low_resolution YtopOrigin = ymax YbottomOrigin = ymax", "#m to a side # END INTRO actual code def summarize_veh_avail(row): total_pop =", "bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return grid #clip blocks and block groups blocks_gdf_crs", "low_resolution)) cols = int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin = xmin XrightOrigin = xmin +", "= build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon = grid_gdf_crs.to_crs(4326) grid_pop_gdf_crs", "grid_pop_gdf_crs['POP10'] / grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for idx in grid_pop_gdf_latlon.index:", "have 2 cars pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop pct_onecar = (int(row['B25044_004E'])", "grid['POP10'].groupby(maup.assign(grid, bg_pieces)).sum() bg_weights = maup.normalize(bg_weights, level=0) columns = ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate(", "= pd.DataFrame() for idx in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id']", "grid will be higher-resolution point = Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs = 4326)", "= maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then block groups for car ownership bg_pieces", "low_resolution XrightOrigin = XrightOrigin + low_resolution highres_cells = [] if exception_gdf_crs is not", "import os #INTRO - need to edit values here for new city deployment", "blocks['POP10'], weights=blocks_weights, ) #then block groups for car ownership bg_pieces = maup.intersections(block_groups, grid)", "households have 2 cars pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) / total_pop pct_onecar =", "- low_resolution lowres_cells = [] exception_cells = [] for i in range(cols): Ytop", "YbottomOrigin = ymax - low_resolution lowres_cells = [] exception_cells = [] for i", "for new city deployment data_source = \"census\" #\"census\" or \"ghsl\" city_crs = 32712", "for idx in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] = idx", "= XrightOrigin + low_resolution highres_cells = [] if exception_gdf_crs is not None: for", "first, for simple population blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks, blocks_pieces)).sum()", "= gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and block_groups the same", "j in range(rows): cell = Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)])", "lowres_cells.append(cell) Ytop = Ytop - low_resolution Ybottom = Ybottom - low_resolution XleftOrigin =", "centroid.y points.loc[idx,'lon'] = centroid.x for col in ['POP10','pct_carfree','pct_onecar','pct_twopluscars','pop_dens']: points.loc[idx, col] = grid_pop_gdf_latlon.loc[idx, col]", "= pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells, crs=city_crs) grid_gdf_latlon", "XleftOrigin = XleftOrigin + low_resolution XrightOrigin = XrightOrigin + low_resolution highres_cells = []", "values here for new city deployment data_source = \"census\" #\"census\" or \"ghsl\" city_crs", "gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326)", "xmin,ymin,xmax,ymax = bounds_poly_crs.bounds # thank you Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) / low_resolution))", "cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop = Ytop - low_resolution Ybottom =", "= [] if exception_gdf_crs is not None: for exception_cell in exception_cells: highres_cells +=", "blocks and block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign", "float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars", "shapely.geometry from shapely.geometry import Polygon, Point from tqdm import tqdm import maup import", "level=0) grid['POP10'] = maup.prorate( blocks_pieces, blocks['POP10'], weights=blocks_weights, ) #then block groups for car", "import shapely.geometry from shapely.geometry import Polygon, Point from tqdm import tqdm import maup", "grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] = idx centroid = grid_pop_gdf_latlon.loc[idx,'geometry'].centroid", "= ['pct_carfree', 'pct_onecar','pct_twopluscars'] grid[columns] = maup.prorate( bg_pieces, block_groups[columns], weights=bg_weights, aggregate_by='mean', ) return grid", "block_groups_gdf_crs = gpd.read_file('prep_pop/tl_2019_04_bg.zip').to_crs(city_crs) veh_avail = pd.read_csv('prep_pop/B25044.csv').iloc[1:,] bounds_gdf_latlon = gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs", "the same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID newidx = [] for bgidx in veh_avail.GEO_ID:", "int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin = xmin XrightOrigin = xmin + low_resolution YtopOrigin =", "idx in grid_pop_gdf_latlon.index: if not np.isnan(grid_pop_gdf_latlon.loc[idx,'POP10']): grid_pop_gdf_latlon.loc[idx,'id'] = idx points.loc[idx,'id'] = idx centroid", "exception_gdf_crs is not None: for exception_cell in exception_cells: highres_cells += build_grid(exception_cell, high_resolution) return", "/ grid_pop_gdf_crs.geometry.area grid_pop_gdf_latlon = grid_pop_gdf_crs.to_crs(4326) points = pd.DataFrame() for idx in grid_pop_gdf_latlon.index: if", "assume all 0 households have 2 cars pct_carfree = (int(row['B25044_003E']) + int(row['B25044_010E'])) /", "weights=bg_weights, aggregate_by='mean', ) return grid #clip blocks and block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs,", "+ int(row['B25044_011E'])) / total_pop pct_twopluscars = 1 - pct_carfree - pct_onecar return pct_carfree,", "blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and block_groups the", "gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception polygon", "low_resolution YtopOrigin = ymax YbottomOrigin = ymax - low_resolution lowres_cells = [] exception_cells", "#m to a side low_res = 1000 #m to a side # END", "#blocks first, for simple population blocks_pieces = maup.intersections(blocks, grid, area_cutoff=0) blocks_weights = blocks['POP10'].groupby(maup.assign(blocks,", "- need to edit values here for new city deployment data_source = \"census\"", "is not None: if not cell.intersects(exception_gdf_crs.unary_union): lowres_cells.append(cell) else: exception_cells.append(cell) else: lowres_cells.append(cell) Ytop =", "the grid will be higher-resolution point = Point(-71.411479,41.823544) point_latlon = gpd.GeoDataFrame(geometry=[point], crs =", "total_pop pct_twopluscars = 1 - pct_carfree - pct_onecar return pct_carfree, pct_onecar, pct_twopluscars def", "[] if exception_gdf_crs is not None: for exception_cell in exception_cells: highres_cells += build_grid(exception_cell,", "pct_onecar block_groups_gdf_crs.loc[bgidx,'pct_twopluscars'] = pct_twopluscars grid_cells = build_grid(bounds_gdf_crs.unary_union, 1000, exception_gdf_crs, 250) grid_gdf_crs = gpd.GeoDataFrame(geometry=grid_cells,", "summarize_veh_avail(veh_avail.loc[bgidx]) total_pop = float(veh_avail.loc[bgidx,'B25044_001E']) block_groups_gdf_crs.loc[bgidx,'total_pop'] = total_pop block_groups_gdf_crs.loc[bgidx,'pct_carfree'] = pct_carfree block_groups_gdf_crs.loc[bgidx,'pct_onecar'] = pct_onecar", "= Polygon([(XleftOrigin, Ytop), (XrightOrigin, Ytop), (XrightOrigin, Ybottom), (XleftOrigin, Ybottom)]) cell = cell.intersection(bounds_poly_crs) if", "edit values here for new city deployment data_source = \"census\" #\"census\" or \"ghsl\"", "= gpd.GeoDataFrame(geometry = [ shapely.geometry.box(-111.124649,32.059300,-110.690002,32.366043)], crs = 4326) bounds_gdf_crs = bounds_gdf_latlon.to_crs(city_crs) #define exception", "to edit values here for new city deployment data_source = \"census\" #\"census\" or", "highres_cells = [] if exception_gdf_crs is not None: for exception_cell in exception_cells: highres_cells", "in range(cols): Ytop = YtopOrigin Ybottom = YbottomOrigin for j in range(rows): cell", "grid #clip blocks and block groups blocks_gdf_crs = gpd.clip(blocks_gdf_crs, bounds_gdf_crs) block_groups_gdf_crs = gpd.clip(block_groups_gdf_crs,", "= [] for bgidx in veh_avail.GEO_ID: newidx.append(bgidx[9:]) veh_avail.index = newidx for bgidx in", "exception polygon #this is the are within which the grid will be higher-resolution", "= gpd.clip(block_groups_gdf_crs, bounds_gdf_crs) #assign veh_avail and block_groups the same index block_groups_gdf_crs.index = block_groups_gdf_crs.GEOID", "= bounds_gdf_latlon.to_crs(city_crs) #define exception polygon #this is the are within which the grid", "Faraz (https://gis.stackexchange.com/questions/269243/creating-polygon-grid-using-geopandas) rows = int(np.ceil((ymax-ymin) / low_resolution)) cols = int(np.ceil((xmax-xmin) / low_resolution)) XleftOrigin" ]
[ "print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece = list_of_numbers[:4] #beginning to 4", "<reponame>mnishitha/INF502-Fall2020 list_of_numbers = [1,2,3,4,5] len(list_of_numbers) list_of_numbers[0] print(list_of_numbers) list_of_numbers[4] print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it", "= list1 + list2 print(list3) list1.extend(list2) print(list1) #sorting list1 = [-1,4,0,9,2,7] list1.sort() print", "print (piece) piece = list_of_numbers[2:6] #from position 2 to 6 print (piece) #shrinking", "list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece = list_of_numbers[:4] #beginning to", "print(list_of_numbers) #merging list1 = [1,2,3] list2 = [4,5,6] list3 = list1 + list2", "list3 = list1 + list2 print(list3) list1.extend(list2) print(list1) #sorting list1 = [-1,4,0,9,2,7] list1.sort()", "to 4 print (piece) piece = list_of_numbers[2:6] #from position 2 to 6 print", "piece = list_of_numbers[2:6] #from position 2 to 6 print (piece) #shrinking it del", "#shrinking it del list_of_numbers [2:5] print(list_of_numbers) #merging list1 = [1,2,3] list2 = [4,5,6]", "(piece) piece = list_of_numbers[2:6] #from position 2 to 6 print (piece) #shrinking it", "= [1,2,3] list2 = [4,5,6] list3 = list1 + list2 print(list3) list1.extend(list2) print(list1)", "list_of_numbers [2:5] print(list_of_numbers) #merging list1 = [1,2,3] list2 = [4,5,6] list3 = list1", "#merging list1 = [1,2,3] list2 = [4,5,6] list3 = list1 + list2 print(list3)", "list_of_numbers[2:6] #from position 2 to 6 print (piece) #shrinking it del list_of_numbers [2:5]", "#extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece = list_of_numbers[:4] #beginning to 4 print", "4 print (piece) piece = list_of_numbers[2:6] #from position 2 to 6 print (piece)", "it del list_of_numbers [2:5] print(list_of_numbers) #merging list1 = [1,2,3] list2 = [4,5,6] list3", "len(list_of_numbers) list_of_numbers[0] print(list_of_numbers) list_of_numbers[4] print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it", "to 6 print (piece) #shrinking it del list_of_numbers [2:5] print(list_of_numbers) #merging list1 =", "list_of_numbers[0] print(list_of_numbers) list_of_numbers[4] print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece", "piece = list_of_numbers[:4] #beginning to 4 print (piece) piece = list_of_numbers[2:6] #from position", "= list_of_numbers[:4] #beginning to 4 print (piece) piece = list_of_numbers[2:6] #from position 2", "6 print (piece) #shrinking it del list_of_numbers [2:5] print(list_of_numbers) #merging list1 = [1,2,3]", "[1,2,3,4,5] len(list_of_numbers) list_of_numbers[0] print(list_of_numbers) list_of_numbers[4] print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing", "#slicing it piece = list_of_numbers[:4] #beginning to 4 print (piece) piece = list_of_numbers[2:6]", "list1 = [1,2,3] list2 = [4,5,6] list3 = list1 + list2 print(list3) list1.extend(list2)", "#from position 2 to 6 print (piece) #shrinking it del list_of_numbers [2:5] print(list_of_numbers)", "2 to 6 print (piece) #shrinking it del list_of_numbers [2:5] print(list_of_numbers) #merging list1", "= [1,2,3,4,5] len(list_of_numbers) list_of_numbers[0] print(list_of_numbers) list_of_numbers[4] print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers)", "it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece = list_of_numbers[:4] #beginning to 4 print (piece)", "list1 + list2 print(list3) list1.extend(list2) print(list1) #sorting list1 = [-1,4,0,9,2,7] list1.sort() print (list1)", "list_of_numbers[:4] #beginning to 4 print (piece) piece = list_of_numbers[2:6] #from position 2 to", "[2:5] print(list_of_numbers) #merging list1 = [1,2,3] list2 = [4,5,6] list3 = list1 +", "= list_of_numbers[2:6] #from position 2 to 6 print (piece) #shrinking it del list_of_numbers", "list_of_numbers = [1,2,3,4,5] len(list_of_numbers) list_of_numbers[0] print(list_of_numbers) list_of_numbers[4] print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8])", "print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece = list_of_numbers[:4] #beginning", "print (piece) #shrinking it del list_of_numbers [2:5] print(list_of_numbers) #merging list1 = [1,2,3] list2", "[1,2,3] list2 = [4,5,6] list3 = list1 + list2 print(list3) list1.extend(list2) print(list1) #sorting", "it piece = list_of_numbers[:4] #beginning to 4 print (piece) piece = list_of_numbers[2:6] #from", "list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece = list_of_numbers[:4] #beginning to 4 print (piece) piece", "(piece) #shrinking it del list_of_numbers [2:5] print(list_of_numbers) #merging list1 = [1,2,3] list2 =", "= [4,5,6] list3 = list1 + list2 print(list3) list1.extend(list2) print(list1) #sorting list1 =", "list2 = [4,5,6] list3 = list1 + list2 print(list3) list1.extend(list2) print(list1) #sorting list1", "[4,5,6] list3 = list1 + list2 print(list3) list1.extend(list2) print(list1) #sorting list1 = [-1,4,0,9,2,7]", "#beginning to 4 print (piece) piece = list_of_numbers[2:6] #from position 2 to 6", "del list_of_numbers [2:5] print(list_of_numbers) #merging list1 = [1,2,3] list2 = [4,5,6] list3 =", "print(list_of_numbers) list_of_numbers[4] print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece =", "list_of_numbers[4] print(list_of_numbers) list_of_numbers[-2] print(list_of_numbers) #extending it list_of_numbers.extend([6,7,8]) print(list_of_numbers) #slicing it piece = list_of_numbers[:4]", "position 2 to 6 print (piece) #shrinking it del list_of_numbers [2:5] print(list_of_numbers) #merging", "print(list_of_numbers) #slicing it piece = list_of_numbers[:4] #beginning to 4 print (piece) piece =" ]
[ "MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions based on", "user choice\"\"\" def has_object_permission(self, request, view, obj): print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice ==", "choice\"\"\" def has_object_permission(self, request, view, obj): print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\":", "(request.method in BASE_SAFE_METHODS ): return True elif obj.user.user_choice == \"MEDIUM\": print('medium') if (request.method", "== \"MEDIUM\": print('medium') if (request.method in MEDIUM_SAFE_METHODS ): return True elif obj.user.user_choice ==", "has_object_permission(self, request, view, obj): print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\": print('base') if", "in permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\": print('base') if (request.method in BASE_SAFE_METHODS ): return", "== \"BASE\": print('base') if (request.method in BASE_SAFE_METHODS ): return True elif obj.user.user_choice ==", "print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\": print('base') if (request.method in BASE_SAFE_METHODS ):", "elif obj.user.user_choice == \"MEDIUM\": print('medium') if (request.method in MEDIUM_SAFE_METHODS ): return True elif", "permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\": print('base') if (request.method in BASE_SAFE_METHODS ): return True", "obj.user.user_choice == \"MEDIUM\": print('medium') if (request.method in MEDIUM_SAFE_METHODS ): return True elif obj.user.user_choice", "= ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions based on user", "'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions based on user choice\"\"\" def", "print('medium') if (request.method in MEDIUM_SAFE_METHODS ): return True elif obj.user.user_choice == \"TOP\": print('Top')", "import permissions BASE_SAFE_METHODS = ['GET','HEAD','OPTIONS'] MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class", "TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions based on user choice\"\"\" def has_object_permission(self,", "BASE_SAFE_METHODS = ['GET','HEAD','OPTIONS'] MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage", "\"\"\"manage permissions based on user choice\"\"\" def has_object_permission(self, request, view, obj): print(request.method in", "permissions based on user choice\"\"\" def has_object_permission(self, request, view, obj): print(request.method in permissions.SAFE_METHODS,'--')", "\"MEDIUM\": print('medium') if (request.method in MEDIUM_SAFE_METHODS ): return True elif obj.user.user_choice == \"TOP\":", "obj): print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\": print('base') if (request.method in BASE_SAFE_METHODS", "= ['GET','HEAD','OPTIONS'] MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions", "['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions based on user choice\"\"\" def has_object_permission(self, request, view,", "view, obj): print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\": print('base') if (request.method in", "return True elif obj.user.user_choice == \"MEDIUM\": print('medium') if (request.method in MEDIUM_SAFE_METHODS ): return", "obj.user.user_choice == \"BASE\": print('base') if (request.method in BASE_SAFE_METHODS ): return True elif obj.user.user_choice", "True elif obj.user.user_choice == \"TOP\": print('Top') if (request.method in TOP_SAFE_METHODS ): return True", "= ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions based on user choice\"\"\" def has_object_permission(self, request,", "if (request.method in BASE_SAFE_METHODS ): return True elif obj.user.user_choice == \"MEDIUM\": print('medium') if", "request, view, obj): print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\": print('base') if (request.method", "MEDIUM_SAFE_METHODS ): return True elif obj.user.user_choice == \"TOP\": print('Top') if (request.method in TOP_SAFE_METHODS", "['GET','HEAD','OPTIONS'] MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions based", "rest_framework import permissions BASE_SAFE_METHODS = ['GET','HEAD','OPTIONS'] MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS']", "in BASE_SAFE_METHODS ): return True elif obj.user.user_choice == \"MEDIUM\": print('medium') if (request.method in", "from rest_framework import permissions BASE_SAFE_METHODS = ['GET','HEAD','OPTIONS'] MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS =", "class Permission(permissions.BasePermission): \"\"\"manage permissions based on user choice\"\"\" def has_object_permission(self, request, view, obj):", "on user choice\"\"\" def has_object_permission(self, request, view, obj): print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice", "def has_object_permission(self, request, view, obj): print(request.method in permissions.SAFE_METHODS,'--') if obj.user.user_choice == \"BASE\": print('base')", "(request.method in MEDIUM_SAFE_METHODS ): return True elif obj.user.user_choice == \"TOP\": print('Top') if (request.method", "Permission(permissions.BasePermission): \"\"\"manage permissions based on user choice\"\"\" def has_object_permission(self, request, view, obj): print(request.method", "['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission): \"\"\"manage permissions based on user choice\"\"\"", "based on user choice\"\"\" def has_object_permission(self, request, view, obj): print(request.method in permissions.SAFE_METHODS,'--') if", "BASE_SAFE_METHODS ): return True elif obj.user.user_choice == \"MEDIUM\": print('medium') if (request.method in MEDIUM_SAFE_METHODS", "if (request.method in MEDIUM_SAFE_METHODS ): return True elif obj.user.user_choice == \"TOP\": print('Top') if", "permissions BASE_SAFE_METHODS = ['GET','HEAD','OPTIONS'] MEDIUM_SAFE_METHODS = ['GET', 'PUT','PATCH','HEAD','OPTIONS'] TOP_SAFE_METHODS = ['GET','DELETE','PUT','PATCH','HEAD','OPTIONS'] class Permission(permissions.BasePermission):", "\"BASE\": print('base') if (request.method in BASE_SAFE_METHODS ): return True elif obj.user.user_choice == \"MEDIUM\":", "): return True elif obj.user.user_choice == \"TOP\": print('Top') if (request.method in TOP_SAFE_METHODS ):", "return True elif obj.user.user_choice == \"TOP\": print('Top') if (request.method in TOP_SAFE_METHODS ): return", "print('base') if (request.method in BASE_SAFE_METHODS ): return True elif obj.user.user_choice == \"MEDIUM\": print('medium')", "True elif obj.user.user_choice == \"MEDIUM\": print('medium') if (request.method in MEDIUM_SAFE_METHODS ): return True", "in MEDIUM_SAFE_METHODS ): return True elif obj.user.user_choice == \"TOP\": print('Top') if (request.method in", "): return True elif obj.user.user_choice == \"MEDIUM\": print('medium') if (request.method in MEDIUM_SAFE_METHODS ):", "if obj.user.user_choice == \"BASE\": print('base') if (request.method in BASE_SAFE_METHODS ): return True elif" ]
[ "current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].read_id_line = line if current_read in", "reference database if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end +", "+ parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile, 'wt') as outfile: for read_id in sorted(parser.reads.keys()):", "project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output for the sample or", "+ '_' + parser.end + '_' + parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile, 'wt')", "import ENDS, STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils import run_external_program from fama.project.sample", ") * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads,", "parser2.end + '_' + parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx') # Process output of", "parser2.parse_reference_output() # Import sequence data for selected sequence reads print('Reading FASTQ file') (parser1,", "file and save current version project.save_project_options() return project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling", "run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND search Args: parser (:obj:DiamondParser):", "== 3: if current_read in parser1.reads: parser1.reads[current_read].line3 = line if current_read in parser2.reads:", "end reads created') make_functions_chart(parser1) print('Krona chart for forward end reads created') result[ENDS[0]] =", "reads') result[ENDS[1]] = {} return result def main(): \"\"\"Main function\"\"\" print('This program is", "int(config.threads) args['no_equivs'] = True if sample.fastq_fwd_readcount < 1500000: # MicrobeCensus subsamples 2M reads", "'--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_'", "current_read = None if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb') else: infile_handle = open(fastq_file2,", "metric = 'readcount' # Generate output for all samples for sample_id in project.list_samples():", "search for reverse end reads finished') parser2.parse_background_output() print('Classification DB search results for reverse", "os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.background_output_name )", "export_sample from fama.third_party.microbe_census import run_pipeline, report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser, command):", ") if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount", "gzipped FASTQ file, finds sequences of selected reads and stores them Returns: read_count", "run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output for the sample or delete sample from", "and save current version project.save_project_options() return project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline", "line infile_handle.close() return (parser1, parser2, read_count1, read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For", "(read_id, read) in parser2.reads.items() if read.status == STATUS_GOOD} else: # No hits found", "print('Pre-selection search did not find any hits for reverse end reads') result[ENDS[1]] =", "if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits for", "if current_read in parser1.reads: parser1.reads[current_read].pe_sequence = line if current_read in parser2.reads: parser2.reads[current_read].sequence =", "import run_pipeline, report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND", "finished') def run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND search Args: parser (:obj:DiamondParser): parser object", "but sequence library # must have more reads as some reads are always", "total number of bases in all reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter", "in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality +", "FASTQ entry has exactly four lines line_counter += 1 if line_counter == 5:", "if read.status == STATUS_GOOD} else: # No hits found print('Pre-selection search did not", "report_results(args, est_ags, None) def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or gzipped FASTQ file, finds", "sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor ==", "= sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount < 3000000: args['nreads'] = sample.fastq_fwd_readcount - 1000000", "None infile_handle = None if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb') else: infile_handle =", "2 elif sample.fastq_fwd_readcount < 3000000: args['nreads'] = sample.fastq_fwd_readcount - 1000000 else: args['nreads'] =", "report for reverse end reads created') make_functions_chart(parser2) print('Krona chart for reverse end reads", "samples for sample_id in project.list_samples(): generate_sample_report(project, sample_id, metric=metric) # Generate output for the", "run_pipeline(args) report_results(args, est_ags, None) def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or gzipped FASTQ file,", "read_id if current_read in parser1.reads: parser1.reads[current_read].read_id_line = line if current_read in parser2.reads: parser2.reads[current_read].pe_id", "if current_read in parser1.reads: parser1.reads[current_read].line3 = line if current_read in parser2.reads: parser2.reads[current_read].pe_line3 =", "reverse end reads created') result[ENDS[1]] = {read_id: read for (read_id, read) in parser2.reads.items()", "command (str): either 'blastx' or 'blastp' (see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args", "fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util import export_annotated_reads, export_sample from fama.third_party.microbe_census import run_pipeline, report_results", "current_read in parser1.reads: parser1.reads[current_read].pe_sequence = line if current_read in parser2.reads: parser2.reads[current_read].sequence = line", "gzip from fama.utils.const import ENDS, STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils import", "for sample_id in project.list_samples(): if project.is_paired_end(): metric = 'efpkg' for sample_id in project.list_samples():", "'readcount' # Generate output for all samples for sample_id in project.list_samples(): generate_sample_report(project, sample_id,", "either 'blastx' or 'blastp' (see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path,", "file') (parser1, parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe( parser1, parser2 ) if", "args['nreads'] = sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount < 3000000: args['nreads'] = sample.fastq_fwd_readcount -", "parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch',", "base_count2 = 0 current_read = None if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb') else:", "Process output of reference DB search parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence data for", "), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6',", "None for sample_id in project.list_samples(): if project.is_paired_end(): metric = 'efpkg' for sample_id in", "'--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'slen', 'qstart', 'qend',", "not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.ref_output_name", "if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' +", "finds sequences of selected reads and stores them Returns: read_count (int): number of", "for forward end reads exported in FASTQ format') parser1.export_hit_list() print('List of hits fo", "end_identifier=None): \"\"\"Functional profiling pipeline for entire project Args: project (:obj:Project): current project sample_identifier", "'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue',", "\"\"\" args = {} if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] =", "5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count2 +=", "os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.ref_output_name )", "sample_id in project.list_samples(): if sample_identifier and sample_identifier != sample_id: continue sample = Sample(sample_id)", "1500000: # MicrobeCensus subsamples 2M reads by default, but sequence library # must", "+ parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx') # Process output of reference DB search", "format') generate_fastq_report(parser2) print('Text report for reverse end reads created') generate_pdf_report(parser2) print('PDF report for", "in parser1.reads: parser1.reads[current_read].pe_line3 = line if current_read in parser2.reads: parser2.reads[current_read].line3 = line elif", "search parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence data for selected sequence reads print('Reading FASTQ", "os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.ref_output_name )", "+ '_' + parser2.end + '_' + parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx') #", "options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1])", "), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.background_output_name", "'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND search Args:", "'_' + parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff(", "and stores them Returns: read_count (int): number of reads in the file base_count", "else: infile_handle = open(fastq_file1, 'rb') for line in infile_handle: # count lines as", "for reverse end reads created') make_functions_chart(parser2) print('Krona chart for reverse end reads created')", "parser2.end) line_counter = 0 read_count2 = 0 base_count2 = 0 current_read = None", "+ '_' + parser1.end + '_' + parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx') if", "parser2.reads[current_read].pe_line3 = line elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].quality =", "= base_count1 if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount == 0:", "lines line_counter += 1 if line_counter == 5: line_counter = 1 line =", "output of reference DB search parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence data for selected", "parser2, read_count1, read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence reads, write", "running for only one sample project.generate_report() # Rename existing project file and save", "= read_count1 if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount == 0:", "reads created') generate_pdf_report(parser1) print('PDF report for forward end reads created') make_functions_chart(parser1) print('Krona chart", "line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count1 += 1 (read_id, _) = parse_fastq_seqid(line) current_read", "generate_pdf_report(parser1) print('PDF report for forward end reads created') make_functions_chart(parser1) print('Krona chart for forward", "in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'fragmentcount' else: metric = 'erpkg'", "exported in JSON format') generate_fastq_report(parser2) print('Text report for reverse end reads created') generate_pdf_report(parser2)", "+ parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_'", "= line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0 read_count2 = 0", "if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search", "parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx')", "2000000 print(args) est_ags, args = run_pipeline(args) report_results(args, est_ags, None) def import_fastq_pe(parser1, parser2): \"\"\"Reads", "base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence reads, write paired-end reads for pre-selected", "not find any hits for forward end reads') result[ENDS[0]] = {} if parser2.reads:", "default, but sequence library # must have more reads as some reads are", "import Sample from fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report import generate_fastq_report, generate_sample_report from fama.output.pdf_report", "for reverse end reads created') generate_pdf_report(parser2) print('PDF report for reverse end reads created')", "end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) #", "all samples for sample_id in project.list_samples(): generate_sample_report(project, sample_id, metric=metric) # Generate output for", "parser1.reads: parser1.reads[current_read].pe_sequence = line if current_read in parser2.reads: parser2.reads[current_read].sequence = line elif line_counter", "print('Paired reads for classified reverse end reads exported') export_annotated_reads(parser2) print('Classified reverse end reads", "= line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count2 += 1 (read_id, _) = parse_fastq_seqid(line)", "project if sample_identifier is None: # Skip project report if the pipeline is", "# Generate output for the sample or delete sample from memory project.options.set_sample_data(project.samples[sample_id]) metric", "created') result[ENDS[1]] = {read_id: read for (read_id, read) in parser2.reads.items() if read.status ==", "args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] = True args['diamond'] = config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile']", "generate_sample_report(project, sample_id, metric=metric) # Generate output for the project if sample_identifier is None:", "= read_id if current_read in parser1.reads: parser1.reads[current_read].pe_id = line if current_read in parser2.reads:", "them Returns: read_count (int): number of reads in the file base_count (int): total", "configuration object \"\"\" args = {} if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else:", "== 0: sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor ==", "if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'readcount' # Generate output for all samples", "= parser.sample.work_directory read_ids = {} for read_id in sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile", "if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in reference database if not", "parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id),", "(int): number of reads in the file base_count (int): total number of bases", "parser (:obj:DiamondParser): parser object processing an input sequence file command (str): either 'blastx'", "def run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND search Args: parser (:obj:DiamondParser): parser object processing", "parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_output_name", "sequence file command (str): either 'blastx' or 'blastp' (see DIAMOND manual) \"\"\" print('Starting", "\"\"\"Runs MicrobeCensus Args: sample (:obj:Sample): sample analyzed config (:obj:ProgramConfig): program configuration object \"\"\"", "output for all samples for sample_id in project.list_samples(): generate_sample_report(project, sample_id, metric=metric) # Generate", "if sample_identifier and sample_identifier != sample_id: continue sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] =", "= None for sample_id in project.list_samples(): if project.is_paired_end(): metric = 'efpkg' for sample_id", "in parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0", "= line if current_read in parser2.reads: parser2.reads[current_read].sequence = line elif line_counter == 3:", "parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6',", "'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args: sample (:obj:Sample): sample", "print('Paired reads for classified forward end reads exported') export_annotated_reads(parser1) print('Classified forward end reads", "parser1.reads.items() if read.status == STATUS_GOOD} else: # No hits found print('Pre-selection search did", "current_read in parser1.reads: parser1.reads[current_read].read_id_line = line if current_read in parser2.reads: parser2.reads[current_read].pe_id = line", "for the project if sample_identifier is None: # Skip project report if the", "line elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].quality = line if", "'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample, config): \"\"\"Runs", "end reads exported in FASTQ format') parser1.export_hit_list() print('List of hits fo forward end", "print('List of hits fo forward end reads exported') if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id),", "3: if current_read in parser1.reads: parser1.reads[current_read].line3 = line if current_read in parser2.reads: parser2.reads[current_read].pe_line3", "fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline for entire project Args: project (:obj:Project): current", "parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length',", "(str, optional): sample identifier end_identifier (str, optional): end identifier \"\"\" for sample_id in", "from fama.third_party.microbe_census import run_pipeline, report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs", "line if current_read in parser2.reads: parser2.reads[current_read].sequence = line elif line_counter == 3: if", "run_external_program from fama.project.sample import Sample from fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report import generate_fastq_report,", "== 2: base_count1 += len(line) if current_read in parser1.reads: parser1.reads[current_read].sequence = line if", "0 current_read = None infile_handle = None if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb')", "# Process output of reference DB search parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence data", "end reads exported') export_annotated_reads(parser2) print('Classified reverse end reads exported in JSON format') generate_fastq_report(parser2)", "delete sample from memory project.options.set_sample_data(project.samples[sample_id]) metric = None for sample_id in project.list_samples(): if", "export_paired_end_reads_fastq(parser1) print('Paired reads for classified forward end reads exported') export_annotated_reads(parser1) print('Classified forward end", "+ '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline for", "function\"\"\" print('This program is not intended to run directly.') if __name__ == '__main__':", "if current_read in parser1.reads: parser1.reads[current_read].read_id_line = line if current_read in parser2.reads: parser2.reads[current_read].pe_id =", "# No hits found print('Pre-selection search did not find any hits for reverse", "'length', 'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def", ") / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid',", "(read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].pe_id =", "current_read in parser1.reads: parser1.reads[current_read].line3 = line if current_read in parser2.reads: parser2.reads[current_read].pe_line3 = line", "current_read in parser1.reads: parser1.reads[current_read].quality = line if current_read in parser2.reads: parser2.reads[current_read].pe_quality = line", "results for reverse end reads imported') parser2.export_read_fastq() print('Classified reverse end reads exported in", "parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' +", "parser1.reads: parser1.reads[current_read].sequence = line if current_read in parser2.reads: parser2.reads[current_read].pe_sequence = line elif line_counter", "in reference database if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end", "file base_count (int): total number of bases in all reads \"\"\" fastq_file1 =", "options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id),", "def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline for entire project Args: project (:obj:Project):", "if current_read in parser2.reads: parser2.reads[current_read].pe_sequence = line elif line_counter == 3: if current_read", "parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0 read_count1 = 0 base_count1 = 0 current_read =", "else: infile_handle = open(fastq_file2, 'rb') for line in infile_handle: # count lines as", "parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.ref_output_name ) ): run_ref_search(parser2,", "current_read in parser1.reads: parser1.reads[current_read].pe_quality = line if current_read in parser2.reads: parser2.reads[current_read].quality = line", "'blastx' or 'blastp' (see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command,", "(:obj:ProgramConfig): program configuration object \"\"\" args = {} if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path,", "= line infile_handle.close() return (parser1, parser2, read_count1, read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\"", "for classified forward end reads exported') export_annotated_reads(parser1) print('Classified forward end reads exported in", "est_ags, None) def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or gzipped FASTQ file, finds sequences", "FASTQ file, finds sequences of selected reads and stores them Returns: read_count (int):", "= 'efpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'fragmentcount'", "'100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id)", "'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND search Args: parser", "forward end reads exported') if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' +", "= line elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].pe_line3 = line", "result[ENDS[1]] = {} return result def main(): \"\"\"Main function\"\"\" print('This program is not", "object processing an input sequence file command (str): either 'blastx' or 'blastp' (see", "project.options.set_sample_data(project.samples[sample_id]) metric = None for sample_id in project.list_samples(): if project.is_paired_end(): metric = 'efpkg'", "run_bgr_search(parser1, 'blastx') print('Classification DB search finished') parser1.parse_background_output() print('Classification DB search results imported') parser1.export_read_fastq()", "def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline for single FASTQ file processing Args: project", "FASTQ file \"\"\" outdir = parser.sample.work_directory read_ids = {} for read_id in sorted(parser.reads.keys()):", "= \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output for the sample or delete", "in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'readcount' # Generate output for", "# Generate output for the project if sample_identifier is None: # Skip project", "line if current_read in parser2.reads: parser2.reads[current_read].pe_id = line elif line_counter == 2: base_count1", "ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not", "current project sample_identifier (str, optional): sample identifier end_identifier (str, optional): end identifier \"\"\"", "parser2.reads: parser2.reads[current_read].sequence = line elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].pe_line3", "hits for reverse end reads') result[ENDS[1]] = {} return result def main(): \"\"\"Main", "= gzip.open(fastq_file1, 'rb') else: infile_handle = open(fastq_file1, 'rb') for line in infile_handle: #", "'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args:", "fastq_outfile = os.path.join(outdir, parser.sample.sample_id + '_' + parser.end + '_' + parser.options.pe_reads_fastq_name +", "parser1.reads: parser1.reads[current_read].pe_quality = line if current_read in parser2.reads: parser2.reads[current_read].quality = line infile_handle.close() return", "fama.utils.const import ENDS, STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils import run_external_program from", "print('List of hits for reverse end reads exported') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id),", "if current_read in parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter", "+ '_' + parser2.end + '_' + parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx') print('Classification", "fama.utils.utils import run_external_program from fama.project.sample import Sample from fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report", "identifier end_identifier (str, optional): end identifier \"\"\" for sample_id in project.list_samples(): if sample_identifier", "infile_handle = open(fastq_file2, 'rb') for line in infile_handle: # count lines as each", "parser1.export_read_fastq() print('Classified forward end reads exported in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads for", "+ '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional", "[sample.fastq_fwd_path] args['verbose'] = True args['diamond'] = config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory,", "if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'fragmentcount' else: metric = 'erpkg' for sample_id", "print('Text report for reverse end reads created') generate_pdf_report(parser2) print('PDF report for reverse end", "in parser1.reads: parser1.reads[current_read].pe_id = line if current_read in parser2.reads: parser2.reads[current_read].read_id_line = line elif", "of reference DB search parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence data for selected sequence", "for reverse end reads') result[ENDS[1]] = {} return result def main(): \"\"\"Main function\"\"\"", "(read_id, read) in parser1.reads.items() if read.status == STATUS_GOOD} else: # No hits found", "if current_read in parser2.reads: parser2.reads[current_read].quality = line infile_handle.close() return (parser1, parser2, read_count1, read_count2,", "= {} parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config,", "'_' + parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile, 'wt') as outfile: for read_id in", "reads exported') export_annotated_reads(parser1) print('Classified forward end reads exported in JSON format') generate_fastq_report(parser1) print('Text", "base_count2 += len(line) if current_read in parser1.reads: parser1.reads[current_read].pe_sequence = line if current_read in", "if current_read in parser1.reads: parser1.reads[current_read].sequence = line if current_read in parser2.reads: parser2.reads[current_read].pe_sequence =", "end reads') result[ENDS[1]] = {} return result def main(): \"\"\"Main function\"\"\" print('This program", "= line if current_read in parser2.reads: parser2.reads[current_read].pe_id = line elif line_counter == 2:", "line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count2 += 1 (read_id, _) =", "did not find any hits for forward end reads') result[ENDS[0]] = {} if", "end reads created') result[ENDS[0]] = {read_id: read for (read_id, read) in parser1.reads.items() if", "optional): end identifier \"\"\" for sample_id in project.list_samples(): if sample_identifier and sample_identifier !=", "in parser2.reads.items() if read.status == STATUS_GOOD} else: # No hits found print('Pre-selection search", "reverse end reads exported in JSON format') generate_fastq_report(parser2) print('Text report for reverse end", "for sample_id in project.list_samples(): generate_sample_report(project, sample_id, metric=metric) # Generate output for the project", "1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].read_id_line", "if the pipeline is running for only one sample project.generate_report() # Rename existing", "fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND search Args: parser (:obj:DiamondParser):", "current sample \"\"\" result = {} parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample,", "if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount =", "read_id in sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id + '_' +", "parser2.reads.items() if read.status == STATUS_GOOD} else: # No hits found print('Pre-selection search did", "'--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_output_name ),", "format') export_paired_end_reads_fastq(parser1) print('Paired reads for classified forward end reads exported') export_annotated_reads(parser1) print('Classified forward", "read_count (int): number of reads in the file base_count (int): total number of", "parser.sample.sample_id + '_' + parser.end + '_' + parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile,", "in parser2.reads: parser2.reads[current_read].pe_id = line elif line_counter == 2: base_count1 += len(line) if", "parser2.export_hit_fastq() print('Hits for reverse end reads exported in FASTQ format') parser2.export_hit_list() print('List of", "parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue',", "{} return result def main(): \"\"\"Main function\"\"\" print('This program is not intended to", "command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end", "from fama.project.sample import Sample from fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report import generate_fastq_report, generate_sample_report", "end reads created') make_functions_chart(parser2) print('Krona chart for reverse end reads created') result[ENDS[1]] =", "_) = parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].pe_id = line", "for forward end reads created') generate_pdf_report(parser1) print('PDF report for forward end reads created')", "forward end reads exported in FASTQ format') parser1.export_hit_list() print('List of hits fo forward", "'_' + parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx') print('Classification DB search for reverse end", "'wt') as outfile: for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n')", "print('Reading FASTQ file') (parser1, parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe( parser1, parser2", "base_count (int): total number of bases in all reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id,", "args['verbose'] = True args['diamond'] = config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt')", "parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0 read_count2", "single FASTQ file processing Args: project (:obj:Project): current project sample (:obj:Sample): current sample", "sample (:obj:Sample): sample analyzed config (:obj:ProgramConfig): program configuration object \"\"\" args = {}", "base_count1, base_count2) = import_fastq_pe( parser1, parser2 ) if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount =", "project Args: project (:obj:Project): current project sample_identifier (str, optional): sample identifier end_identifier (str,", "search Args: parser (:obj:DiamondParser): parser object processing an input sequence file command (str):", "profiling pipeline for single FASTQ file processing Args: project (:obj:Project): current project sample", "manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query',", "(int): total number of bases in all reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end)", "parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' +", "), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_hits_fastq_name", "reads exported') export_annotated_reads(parser2) print('Classified reverse end reads exported in JSON format') generate_fastq_report(parser2) print('Text", "= DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data,", "(str): either 'blastx' or 'blastp' (see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args =", "print('DIAMOND finished') def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args: sample (:obj:Sample): sample analyzed config", "bases in all reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0 read_count1", "created') make_functions_chart(parser2) print('Krona chart for reverse end reads created') result[ENDS[1]] = {read_id: read", "in parser1.reads: parser1.reads[current_read].pe_quality = line if current_read in parser2.reads: parser2.reads[current_read].quality = line infile_handle.close()", "functional profiling pipeline\"\"\" import os import gzip from fama.utils.const import ENDS, STATUS_GOOD from", "the project if sample_identifier is None: # Skip project report if the pipeline", "os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_hits_fastq_name ), '--out',", "each FASTQ entry has exactly four lines line_counter += 1 if line_counter ==", "sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] = True args['diamond']", "0 current_read = None if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb') else: infile_handle =", "parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx')", "subsamples 2M reads by default, but sequence library # must have more reads", "import run_external_program from fama.project.sample import Sample from fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report import", "reads exported in JSON format') generate_fastq_report(parser2) print('Text report for reverse end reads created')", "'.gz') with gzip.open(fastq_outfile, 'wt') as outfile: for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n')", "reverse end reads created') make_functions_chart(parser2) print('Krona chart for reverse end reads created') result[ENDS[1]]", "sample analyzed config (:obj:ProgramConfig): program configuration object \"\"\" args = {} if sample.is_paired_end:", "exported in FASTQ format') parser1.export_hit_list() print('List of hits fo forward end reads exported')", "parser1.reads[current_read].quality = line if current_read in parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2 =", "args['threads'] = int(config.threads) args['no_equivs'] = True if sample.fastq_fwd_readcount < 1500000: # MicrobeCensus subsamples", "parser2.export_hit_list() print('List of hits for reverse end reads exported') if not os.path.exists( os.path.join(", "line_counter += 1 if line_counter == 5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r')", "ENDS, STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils import run_external_program from fama.project.sample import", "run_pipeline, report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND search", "parser2.end + '_' + parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx') print('Classification DB search for", "\\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate", "base_count1 = 0 current_read = None infile_handle = None if fastq_file1.endswith('.gz'): infile_handle =", "if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' +", "1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count2 += 1 (read_id, _)", "with gzip.open(fastq_outfile, 'wt') as outfile: for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence", "parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads',", "read_ids = {} for read_id in sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile = os.path.join(outdir,", "= parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0 read_count1 = 0 base_count1 = 0 current_read", "fama.project.sample import Sample from fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report import generate_fastq_report, generate_sample_report from", "Args: parser (:obj:DiamondParser): parser object processing an input sequence file command (str): either", "in parser1.reads: parser1.reads[current_read].read_id_line = line if current_read in parser2.reads: parser2.reads[current_read].pe_id = line elif", "project.generate_report() # Rename existing project file and save current version project.save_project_options() return project", "- 1000000 else: args['nreads'] = 2000000 print(args) est_ags, args = run_pipeline(args) report_results(args, est_ags,", "export_annotated_reads, export_sample from fama.third_party.microbe_census import run_pipeline, report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser,", "+ '_' + parser.end + '_' + parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue', str(", "stores them Returns: read_count (int): number of reads in the file base_count (int):", "if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount =", "_) = parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].read_id_line = line", "+ parser.end + '_' + parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )),", "sample project.generate_report() # Rename existing project file and save current version project.save_project_options() return", "all reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0 read_count1 = 0", "in FASTQ format') parser1.export_hit_list() print('List of hits fo forward end reads exported') if", "end_id=end_identifier) else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output for the", "os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in reference database if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id", "parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0 read_count2 =", "not find any hits for reverse end reads') result[ENDS[1]] = {} return result", "parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx')", "end identifier \"\"\" for sample_id in project.list_samples(): if sample_identifier and sample_identifier != sample_id:", "config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs'] =", "= None if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb') else: infile_handle = open(fastq_file2, 'rb')", "DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an input sequence file command", "outdir = parser.sample.work_directory read_ids = {} for read_id in sorted(parser.reads.keys()): read_ids[read_id] = read_id", "parser1.end + '_' + parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx') if not os.path.exists( os.path.join(", "= read_count2 if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor == 0.0:", "current_read in parser2.reads: parser2.reads[current_read].quality = line infile_handle.close() return (parser1, parser2, read_count1, read_count2, base_count1,", "from memory project.options.set_sample_data(project.samples[sample_id]) metric = None for sample_id in project.list_samples(): if project.is_paired_end(): metric", "< 1500000: # MicrobeCensus subsamples 2M reads by default, but sequence library #", "base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence reads, write paired-end reads for", "report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND search Args:", "reads created') make_functions_chart(parser1) print('Krona chart for forward end reads created') result[ENDS[0]] = {read_id:", "as outfile: for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3", "end reads exported in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads for classified reverse end", "sample (:obj:Sample): current sample \"\"\" result = {} parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data,", "config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs'] = True if sample.fastq_fwd_readcount", "'_' + parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx') # Process output of reference DB", "run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline for single FASTQ file processing Args: project (:obj:Project):", "4: if current_read in parser1.reads: parser1.reads[current_read].quality = line if current_read in parser2.reads: parser2.reads[current_read].pe_quality", "read.status == STATUS_GOOD} else: # No hits found print('Pre-selection search did not find", "None if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb') else: infile_handle = open(fastq_file2, 'rb') for", "= read_id if current_read in parser1.reads: parser1.reads[current_read].read_id_line = line if current_read in parser2.reads:", "= 0 base_count1 = 0 current_read = None infile_handle = None if fastq_file1.endswith('.gz'):", "): run_ref_search(parser1, 'blastx') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end", "'_' + parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end +", "args['data_dir'] = config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs'] = True", "exported') export_annotated_reads(parser2) print('Classified reverse end reads exported in JSON format') generate_fastq_report(parser2) print('Text report", "base_count1 if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount", "\"\"\" result = {} parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2", "for single FASTQ file processing Args: project (:obj:Project): current project sample (:obj:Sample): current", "== 0: sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount = base_count2 if", "reads exported') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end +", "if parser1.reads: parser1.export_hit_fastq() print('Hits for forward end reads exported in FASTQ format') parser1.export_hit_list()", "sequence reads print('Reading FASTQ file') (parser1, parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe(", "= line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count1 += 1 (read_id, _) = parse_fastq_seqid(line)", "'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser,", "only one sample project.generate_report() # Rename existing project file and save current version", "sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)):", "os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.ref_output_name ) ):", "\"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id,", "reads exported in FASTQ format') parser2.export_hit_list() print('List of hits for reverse end reads", "generate_fastq_report(parser1) print('Text report for forward end reads created') generate_pdf_report(parser1) print('PDF report for forward", "for forward end reads') result[ENDS[0]] = {} if parser2.reads: parser2.export_hit_fastq() print('Hits for reverse", "parser1, parser2 ) if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount ==", "DB search results for reverse end reads imported') parser2.export_read_fastq() print('Classified reverse end reads", "[parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id", "end reads exported') export_annotated_reads(parser1) print('Classified forward end reads exported in JSON format') generate_fastq_report(parser1)", "parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx')", "forward end reads created') generate_pdf_report(parser1) print('PDF report for forward end reads created') make_functions_chart(parser1)", "parser2.reads[current_read].pe_id = line elif line_counter == 2: base_count1 += len(line) if current_read in", "line elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].pe_line3 = line if", "'blastx') # Process output of reference DB search parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence", "reads in the file base_count (int): total number of bases in all reads", "'--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.background_output_name ),", "Args: project (:obj:Project): current project sample_identifier (str, optional): sample identifier end_identifier (str, optional):", "= os.path.join(outdir, parser.sample.sample_id + '_' + parser.end + '_' + parser.options.pe_reads_fastq_name + '.gz')", "fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util import export_annotated_reads, export_sample from", "parser2.reads[current_read].quality = line infile_handle.close() return (parser1, parser2, read_count1, read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser):", "run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args: sample (:obj:Sample): sample analyzed", "if project.is_paired_end(): metric = 'efpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0:", "'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND search", "= 0 read_count1 = 0 base_count1 = 0 current_read = None infile_handle =", "reads for classified forward end reads exported') export_annotated_reads(parser1) print('Classified forward end reads exported", "paired-end reads for pre-selected reads into a separate FASTQ file \"\"\" outdir =", "= {} for read_id in sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id", "+ '_' + parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx') print('Classification DB search finished') parser1.parse_background_output()", "are always discarded by filtering args['nreads'] = sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount <", "{} for read_id in sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id +", "FASTQ file') (parser1, parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe( parser1, parser2 )", "file, finds sequences of selected reads and stores them Returns: read_count (int): number", "in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads for classified forward end reads exported') export_annotated_reads(parser1)", "exported in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads for classified forward end reads exported')", "parser.end + '_' + parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' +", "read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence reads, write paired-end reads", "sample_id in project.list_samples(): if project.is_paired_end(): metric = 'efpkg' for sample_id in project.list_samples(): if", "1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count1 += 1 (read_id, _)", "parser1.reads: parser1.reads[current_read].pe_id = line if current_read in parser2.reads: parser2.reads[current_read].read_id_line = line elif line_counter", "more reads as some reads are always discarded by filtering args['nreads'] = sample.fastq_fwd_readcount", "metric = 'efpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric =", "continue sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample if end_identifier: project.samples[sample_id].reads[end_identifier] = \\", "0 read_count2 = 0 base_count2 = 0 current_read = None if fastq_file2.endswith('.gz'): infile_handle", "line if current_read in parser2.reads: parser2.reads[current_read].read_id_line = line elif line_counter == 2: base_count2", "parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND search Args: parser (:obj:DiamondParser): parser object", "sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount ==", "infile_handle = gzip.open(fastq_file2, 'rb') else: infile_handle = open(fastq_file2, 'rb') for line in infile_handle:", "'_' + parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id", "= line if current_read in parser2.reads: parser2.reads[current_read].pe_line3 = line elif line_counter == 4:", "project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'fragmentcount' else: metric = 'erpkg' for", "parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.background_output_name ) ): run_bgr_search(parser1,", "# count lines as each FASTQ entry has exactly four lines line_counter +=", ") ): run_bgr_search(parser2, 'blastx') print('Classification DB search for reverse end reads finished') parser2.parse_background_output()", "== 0: sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount = base_count1 if", "Args: sample (:obj:Sample): sample analyzed config (:obj:ProgramConfig): program configuration object \"\"\" args =", "\"\"\"Runs classification DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an input sequence", "command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id +", "config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits for forward end reads exported in", "parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_hits_fastq_name ), '--out', os.path.join(", "search finished') parser1.parse_background_output() print('Classification DB search results imported') parser1.export_read_fastq() print('Classified forward end reads", "== 1: read_count2 += 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if", "= 0 current_read = None if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb') else: infile_handle", "(see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id)", "= 2000000 print(args) est_ags, args = run_pipeline(args) report_results(args, est_ags, None) def import_fastq_pe(parser1, parser2):", "into a separate FASTQ file \"\"\" outdir = parser.sample.work_directory read_ids = {} for", "in project.list_samples(): generate_sample_report(project, sample_id, metric=metric) # Generate output for the project if sample_identifier", "= {read_id: read for (read_id, read) in parser2.reads.items() if read.status == STATUS_GOOD} else:", "None) def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or gzipped FASTQ file, finds sequences of", "'_' + parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', #", "= 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count2 += 1 (read_id,", "JSON format') generate_fastq_report(parser2) print('Text report for reverse end reads created') generate_pdf_report(parser2) print('PDF report", "project.list_samples(): if project.is_paired_end(): metric = 'efpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor ==", "command): \"\"\"Runs classification DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an input", "4: if current_read in parser1.reads: parser1.reads[current_read].pe_quality = line if current_read in parser2.reads: parser2.reads[current_read].quality", "base_count2 if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor()", "in parser1.reads.items() if read.status == STATUS_GOOD} else: # No hits found print('Pre-selection search", "'50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid',", "number of bases in all reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter =", "make_functions_chart from fama.output.json_util import export_annotated_reads, export_sample from fama.third_party.microbe_census import run_pipeline, report_results from fama.diamond_parser.hit_utils", "read_count2 if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor()", "'sseqid', 'pident', 'length', 'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND", "= parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].pe_id = line if", "config): \"\"\"Runs MicrobeCensus Args: sample (:obj:Sample): sample analyzed config (:obj:ProgramConfig): program configuration object", "= {} return result def main(): \"\"\"Main function\"\"\" print('This program is not intended", "= line if current_read in parser2.reads: parser2.reads[current_read].pe_sequence = line elif line_counter == 3:", "file processing Args: project (:obj:Project): current project sample (:obj:Sample): current sample \"\"\" result", "= DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if", "'_' + parser1.end + '_' + parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx') if not", "sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'fragmentcount' else: metric =", "not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.ref_output_name", "infile_handle = gzip.open(fastq_file1, 'rb') else: infile_handle = open(fastq_file1, 'rb') for line in infile_handle:", "metric = 'erpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric =", "parser1.reads[current_read].pe_id = line if current_read in parser2.reads: parser2.reads[current_read].read_id_line = line elif line_counter ==", "sample or delete sample from memory project.options.set_sample_data(project.samples[sample_id]) metric = None for sample_id in", "imported') parser2.export_read_fastq() print('Classified reverse end reads exported in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads", "DiamondParser from fama.output.report import generate_fastq_report, generate_sample_report from fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer import", "1: read_count2 += 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read", "0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits for forward end reads", "sequence reads, write paired-end reads for pre-selected reads into a separate FASTQ file", "created') generate_pdf_report(parser2) print('PDF report for reverse end reads created') make_functions_chart(parser2) print('Krona chart for", "write paired-end reads for pre-selected reads into a separate FASTQ file \"\"\" outdir", "taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))):", "in parser1.reads: parser1.reads[current_read].sequence = line if current_read in parser2.reads: parser2.reads[current_read].pe_sequence = line elif", "for pre-selected reads into a separate FASTQ file \"\"\" outdir = parser.sample.work_directory read_ids", "pipeline for single FASTQ file processing Args: project (:obj:Project): current project sample (:obj:Sample):", "export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence reads, write paired-end reads for pre-selected reads into", "(str, optional): end identifier \"\"\" for sample_id in project.list_samples(): if sample_identifier and sample_identifier", "= read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id + '_' + parser.end + '_' +", "pre-selected reads into a separate FASTQ file \"\"\" outdir = parser.sample.work_directory read_ids =", "read_ids[read_id] = read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id + '_' + parser.end + '_'", "\"\"\"Main function\"\"\" print('This program is not intended to run directly.') if __name__ ==", "if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] = True", "2: base_count1 += len(line) if current_read in parser1.reads: parser1.reads[current_read].sequence = line if current_read", "in parser2.reads: parser2.reads[current_read].pe_line3 = line elif line_counter == 4: if current_read in parser1.reads:", "for the sample or delete sample from memory project.options.set_sample_data(project.samples[sample_id]) metric = None for", "Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample if end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier)", "+ parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id)", "report for reverse end reads created') generate_pdf_report(parser2) print('PDF report for reverse end reads", "for only one sample project.generate_report() # Rename existing project file and save current", "library # must have more reads as some reads are always discarded by", "print('PDF report for forward end reads created') make_functions_chart(parser1) print('Krona chart for forward end", "project sample (:obj:Sample): current sample \"\"\" result = {} parser1 = DiamondParser(config=project.config, options=project.options,", "search did not find any hits for forward end reads') result[ENDS[0]] = {}", "(:obj:Sample): sample analyzed config (:obj:ProgramConfig): program configuration object \"\"\" args = {} if", "'blastx') print('Classification DB search finished') parser1.parse_background_output() print('Classification DB search results imported') parser1.export_read_fastq() print('Classified", "parser1.reads[current_read].pe_sequence = line if current_read in parser2.reads: parser2.reads[current_read].sequence = line elif line_counter ==", "(see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id)", "STATUS_GOOD} else: # No hits found print('Pre-selection search did not find any hits", "<reponame>aekazakov/FamaProfiling \"\"\"Runs Fama functional profiling pipeline\"\"\" import os import gzip from fama.utils.const import", "current_read in parser2.reads: parser2.reads[current_read].pe_sequence = line elif line_counter == 3: if current_read in", "for reverse end reads imported') parser2.export_read_fastq() print('Classified reverse end reads exported in FASTQ", "'pident', 'length', 'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished')", "(:obj:Sample): current sample \"\"\" result = {} parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data,", "+ '.gz') with gzip.open(fastq_outfile, 'wt') as outfile: for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id +", "from fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report import generate_fastq_report, generate_sample_report from fama.output.pdf_report import generate_pdf_report", "parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe( parser1, parser2 ) if sample.fastq_fwd_readcount ==", "reads exported in JSON format') generate_fastq_report(parser1) print('Text report for forward end reads created')", "reads imported') parser2.export_read_fastq() print('Classified reverse end reads exported in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired", "reads finished') parser2.parse_background_output() print('Classification DB search results for reverse end reads imported') parser2.export_read_fastq()", "sample identifier end_identifier (str, optional): end identifier \"\"\" for sample_id in project.list_samples(): if", "project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'fragmentcount' else: metric = 'erpkg' for sample_id in", "hits found print('Pre-selection search did not find any hits for forward end reads')", "project.list_samples(): generate_sample_report(project, sample_id, metric=metric) # Generate output for the project if sample_identifier is", "+ parser2.end + '_' + parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx') print('Classification DB search", "else: # No hits found print('Pre-selection search did not find any hits for", "sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample if end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project,", "hits for reverse end reads exported') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id +", "version project.save_project_options() return project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline for single FASTQ", "'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs'] = True if sample.fastq_fwd_readcount < 1500000: # MicrobeCensus", "sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount = base_count1", "count lines as each FASTQ entry has exactly four lines line_counter += 1", "len(line) if current_read in parser1.reads: parser1.reads[current_read].pe_sequence = line if current_read in parser2.reads: parser2.reads[current_read].sequence", "/ parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident',", "parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].read_id_line = line if current_read", "metric=metric) # Generate output for the project if sample_identifier is None: # Skip", "= {} if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose']", "did not find any hits for reverse end reads') result[ENDS[1]] = {} return", "run_fastq_pipeline from fama.utils.utils import run_external_program from fama.project.sample import Sample from fama.diamond_parser.diamond_parser import DiamondParser", "format') generate_fastq_report(parser1) print('Text report for forward end reads created') generate_pdf_report(parser1) print('PDF report for", "reads for classified reverse end reads exported') export_annotated_reads(parser2) print('Classified reverse end reads exported", "if line_counter == 1: read_count2 += 1 (read_id, _) = parse_fastq_seqid(line) current_read =", "= [sample.fastq_fwd_path] args['verbose'] = True args['diamond'] = config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile'] =", "= 'readcount' # Generate output for all samples for sample_id in project.list_samples(): generate_sample_report(project,", "profiling pipeline for entire project Args: project (:obj:Project): current project sample_identifier (str, optional):", "paired-end sequence reads, write paired-end reads for pre-selected reads into a separate FASTQ", "# Search in reference database if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_'", "2M reads by default, but sequence library # must have more reads as", "line if current_read in parser2.reads: parser2.reads[current_read].pe_sequence = line elif line_counter == 3: if", "3: if current_read in parser1.reads: parser1.reads[current_read].pe_line3 = line if current_read in parser2.reads: parser2.reads[current_read].line3", "Returns: read_count (int): number of reads in the file base_count (int): total number", "+ parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx') print('Classification DB search finished') parser1.parse_background_output() print('Classification DB", "uncompressed or gzipped FASTQ file, finds sequences of selected reads and stores them", "os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.background_output_name ) ):", "make_functions_chart(parser2) print('Krona chart for reverse end reads created') result[ENDS[1]] = {read_id: read for", "3000000: args['nreads'] = sample.fastq_fwd_readcount - 1000000 else: args['nreads'] = 2000000 print(args) est_ags, args", "the sample or delete sample from memory project.options.set_sample_data(project.samples[sample_id]) metric = None for sample_id", "+ '_' + parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx') print('Classification DB search for reverse", "search did not find any hits for reverse end reads') result[ENDS[1]] = {}", "parser2 ) if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount == 0:", "// 2 elif sample.fastq_fwd_readcount < 3000000: args['nreads'] = sample.fastq_fwd_readcount - 1000000 else: args['nreads']", "), '--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) /", "parser1.reads[current_read].sequence = line if current_read in parser2.reads: parser2.reads[current_read].pe_sequence = line elif line_counter ==", "= 'erpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'readcount'", "reads and stores them Returns: read_count (int): number of reads in the file", "No hits found print('Pre-selection search did not find any hits for reverse end", "len(line) if current_read in parser1.reads: parser1.reads[current_read].sequence = line if current_read in parser2.reads: parser2.reads[current_read].pe_sequence", "args['diamond'] = config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads)", "= int(config.threads) args['no_equivs'] = True if sample.fastq_fwd_readcount < 1500000: # MicrobeCensus subsamples 2M", "output for the sample or delete sample from memory project.options.set_sample_data(project.samples[sample_id]) metric = None", "print('This program is not intended to run directly.') if __name__ == '__main__': main()", "sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount ==", ") ): run_ref_search(parser2, 'blastx') # Process output of reference DB search parser1.parse_reference_output() parser2.parse_reference_output()", "pipeline for entire project Args: project (:obj:Project): current project sample_identifier (str, optional): sample", "== 3: if current_read in parser1.reads: parser1.reads[current_read].pe_line3 = line if current_read in parser2.reads:", "exported in FASTQ format') parser2.export_hit_list() print('List of hits for reverse end reads exported')", "line if current_read in parser2.reads: parser2.reads[current_read].quality = line infile_handle.close() return (parser1, parser2, read_count1,", "= Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample if end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id],", "os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.background_output_name )", "'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser, command): \"\"\"Runs", "created') make_functions_chart(parser1) print('Krona chart for forward end reads created') result[ENDS[0]] = {read_id: read", "parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send',", "'blastx') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_'", "sample.fastq_fwd_readcount < 3000000: args['nreads'] = sample.fastq_fwd_readcount - 1000000 else: args['nreads'] = 2000000 print(args)", "reads into a separate FASTQ file \"\"\" outdir = parser.sample.work_directory read_ids = {}", "= line if current_read in parser2.reads: parser2.reads[current_read].line3 = line elif line_counter == 4:", "= line elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].quality = line", "Sample from fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report import generate_fastq_report, generate_sample_report from fama.output.pdf_report import", "): run_bgr_search(parser2, 'blastx') print('Classification DB search for reverse end reads finished') parser2.parse_background_output() print('Classification", "generate_pdf_report(parser2) print('PDF report for reverse end reads created') make_functions_chart(parser2) print('Krona chart for reverse", "0: sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount", "one sample project.generate_report() # Rename existing project file and save current version project.save_project_options()", "in JSON format') generate_fastq_report(parser1) print('Text report for forward end reads created') generate_pdf_report(parser1) print('PDF", "parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.ref_output_name ) ): run_ref_search(parser1,", "reads created') make_functions_chart(parser2) print('Krona chart for reverse end reads created') result[ENDS[1]] = {read_id:", "sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output for", "!= sample_id: continue sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample if end_identifier: project.samples[sample_id].reads[end_identifier]", "classified forward end reads exported') export_annotated_reads(parser1) print('Classified forward end reads exported in JSON", "reverse end reads imported') parser2.export_read_fastq() print('Classified reverse end reads exported in FASTQ format')", "project.list_samples(): if sample_identifier and sample_identifier != sample_id: continue sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id]", "fama.output.json_util import export_annotated_reads, export_sample from fama.third_party.microbe_census import run_pipeline, report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid", "read_count1 = 0 base_count1 = 0 current_read = None infile_handle = None if", "sample.load_sample(project.options) project.samples[sample_id] = sample if end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else:", "fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb') else: infile_handle = open(fastq_file1, 'rb') for line in", "forward end reads created') make_functions_chart(parser1) print('Krona chart for forward end reads created') result[ENDS[0]]", "(:obj:Project): current project sample_identifier (str, optional): sample identifier end_identifier (str, optional): end identifier", "reads for pre-selected reads into a separate FASTQ file \"\"\" outdir = parser.sample.work_directory", "'--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end +", "parser1.end) line_counter = 0 read_count1 = 0 base_count1 = 0 current_read = None", "elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].pe_quality = line if current_read", "= \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) #", "sequence library # must have more reads as some reads are always discarded", "project.is_paired_end(): metric = 'efpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric", "1000000 else: args['nreads'] = 2000000 print(args) est_ags, args = run_pipeline(args) report_results(args, est_ags, None)", "= os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs'] = True if sample.fastq_fwd_readcount < 1500000:", "sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n')", "+ parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads,", "'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus", "read_count2 = 0 base_count2 = 0 current_read = None if fastq_file2.endswith('.gz'): infile_handle =", "'_' + parser.end + '_' + parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size(", "fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0 read_count1 = 0 base_count1 = 0", "+ parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id +", "'--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid',", "parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0 read_count2 = 0 base_count2 = 0 current_read =", "line if current_read in parser2.reads: parser2.reads[current_read].line3 = line elif line_counter == 4: if", "os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in reference", "generate_fastq_report, generate_sample_report from fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util import", "sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id + '_' + parser.end +", "sample if end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads = \\", "end reads finished') parser2.parse_background_output() print('Classification DB search results for reverse end reads imported')", "import parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND search Args: parser (:obj:DiamondParser): parser", "end_identifier (str, optional): end identifier \"\"\" for sample_id in project.list_samples(): if sample_identifier and", "for classified reverse end reads exported') export_annotated_reads(parser2) print('Classified reverse end reads exported in", "5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count1 +=", "end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id),", "diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id +", "DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out',", "chart for reverse end reads created') result[ENDS[1]] = {read_id: read for (read_id, read)", "generate_sample_report from fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util import export_annotated_reads,", "diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join(", "else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] = True args['diamond'] = config.diamond_path args['data_dir'] = config.microbecensus_datadir", "end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id])", "reads exported') if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end +", "for read_id in sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id + '_'", "for reverse end reads finished') parser2.parse_background_output() print('Classification DB search results for reverse end", "+ '_' + parser.end + '_' + parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id", "import DiamondParser from fama.output.report import generate_fastq_report, generate_sample_report from fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer", "= [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] = True args['diamond'] = config.diamond_path", "= 0 base_count2 = 0 current_read = None if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2,", "Fama functional profiling pipeline\"\"\" import os import gzip from fama.utils.const import ENDS, STATUS_GOOD", "# MicrobeCensus subsamples 2M reads by default, but sequence library # must have", "selected reads and stores them Returns: read_count (int): number of reads in the", "run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits for forward end reads exported", "parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end", "0: sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount", "== 0.0: metric = 'fragmentcount' else: metric = 'erpkg' for sample_id in project.list_samples():", "for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'readcount' # Generate", "if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if", "current project sample (:obj:Sample): current sample \"\"\" result = {} parser1 = DiamondParser(config=project.config,", "Import sequence data for selected sequence reads print('Reading FASTQ file') (parser1, parser2, read_count1,", "os.path.join(outdir, parser.sample.sample_id + '_' + parser.end + '_' + parser.options.pe_reads_fastq_name + '.gz') with", "os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_output_name ), '--max-target-seqs',", "sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits for forward end reads exported in FASTQ", "for entire project Args: project (:obj:Project): current project sample_identifier (str, optional): sample identifier", "project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits for forward end reads exported in FASTQ format')", "project.save_project_options() return project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline for single FASTQ file", "for all samples for sample_id in project.list_samples(): generate_sample_report(project, sample_id, metric=metric) # Generate output", "exported') if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_'", "= gzip.open(fastq_file2, 'rb') else: infile_handle = open(fastq_file2, 'rb') for line in infile_handle: #", "+ '_' + parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) *", "# must have more reads as some reads are always discarded by filtering", "0 base_count2 = 0 current_read = None if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb')", "\"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join(", "True if sample.fastq_fwd_readcount < 1500000: # MicrobeCensus subsamples 2M reads by default, but", "metric = 'fragmentcount' else: metric = 'erpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor", "JSON format') generate_fastq_report(parser1) print('Text report for forward end reads created') generate_pdf_report(parser1) print('PDF report", "line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count2 += 1 (read_id, _) = parse_fastq_seqid(line) current_read", "line if current_read in parser2.reads: parser2.reads[current_read].pe_line3 = line elif line_counter == 4: if", "report for forward end reads created') generate_pdf_report(parser1) print('PDF report for forward end reads", "'_' + parser2.end + '_' + parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx') print('Classification DB", "None: # Skip project report if the pipeline is running for only one", "fama.output.report import generate_fastq_report, generate_sample_report from fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart from", "print(args) est_ags, args = run_pipeline(args) report_results(args, est_ags, None) def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed", "output for the project if sample_identifier is None: # Skip project report if", "< 3000000: args['nreads'] = sample.fastq_fwd_readcount - 1000000 else: args['nreads'] = 2000000 print(args) est_ags,", "exported in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads for classified reverse end reads exported')", "os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs'] = True if sample.fastq_fwd_readcount < 1500000: #", "def run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND search Args: parser (:obj:DiamondParser): parser object processing", "analyzed config (:obj:ProgramConfig): program configuration object \"\"\" args = {} if sample.is_paired_end: args['seqfiles']", "parser1.end + '_' + parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx') print('Classification DB search finished')", "'\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling", "1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].pe_id", "reads, write paired-end reads for pre-selected reads into a separate FASTQ file \"\"\"", "import gzip from fama.utils.const import ENDS, STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils", "line if current_read in parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end)", "'--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size(", "sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor", "entire project Args: project (:obj:Project): current project sample_identifier (str, optional): sample identifier end_identifier", "print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id),", "from fama.utils.utils import run_external_program from fama.project.sample import Sample from fama.diamond_parser.diamond_parser import DiamondParser from", "DB search results imported') parser1.export_read_fastq() print('Classified forward end reads exported in FASTQ format')", "parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt',", "export_paired_end_reads_fastq(parser2) print('Paired reads for classified reverse end reads exported') export_annotated_reads(parser2) print('Classified reverse end", "def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or gzipped FASTQ file, finds sequences of selected", "pipeline\"\"\" import os import gzip from fama.utils.const import ENDS, STATUS_GOOD from fama.se_functional_pipeline import", "an input sequence file command (str): either 'blastx' or 'blastp' (see DIAMOND manual)", "parser1.reads[current_read].line3 = line if current_read in parser2.reads: parser2.reads[current_read].pe_line3 = line elif line_counter ==", "outfile: for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 +", "outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def", "have more reads as some reads are always discarded by filtering args['nreads'] =", "must have more reads as some reads are always discarded by filtering args['nreads']", "in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads for classified reverse end reads exported') export_annotated_reads(parser2)", "from fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND search Args: parser", "finished') parser2.parse_background_output() print('Classification DB search results for reverse end reads imported') parser2.export_read_fastq() print('Classified", "sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id)))", "in parser1.reads: parser1.reads[current_read].quality = line if current_read in parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close()", "forward end reads created') result[ENDS[0]] = {read_id: read for (read_id, read) in parser1.reads.items()", "parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_output_name ), '--max-target-seqs', '50',", "current_read in parser1.reads: parser1.reads[current_read].pe_id = line if current_read in parser2.reads: parser2.reads[current_read].read_id_line = line", "parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', #", "hits found print('Pre-selection search did not find any hits for reverse end reads')", "Skip project report if the pipeline is running for only one sample project.generate_report()", "= line if current_read in parser2.reads: parser2.reads[current_read].read_id_line = line elif line_counter == 2:", "current version project.save_project_options() return project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline for single", "parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.background_output_name ) ): run_bgr_search(parser2,", "taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if", "any hits for reverse end reads') result[ENDS[1]] = {} return result def main():", "of bases in all reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0", "= line elif line_counter == 2: base_count2 += len(line) if current_read in parser1.reads:", "if line_counter == 5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter ==", "parser.end + '_' + parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile, 'wt') as outfile: for", "1 if line_counter == 5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter", "by default, but sequence library # must have more reads as some reads", "sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output for the sample or delete sample from memory", "has exactly four lines line_counter += 1 if line_counter == 5: line_counter =", ") ): run_ref_search(parser1, 'blastx') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' +", "outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline", "sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits", "= config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs'] = True if", "= [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id),", "if end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project,", "current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].pe_id = line if current_read in", "filtering args['nreads'] = sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount < 3000000: args['nreads'] = sample.fastq_fwd_readcount", "open(fastq_file1, 'rb') for line in infile_handle: # count lines as each FASTQ entry", "0: sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0:", "run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output", "Generate output for the project if sample_identifier is None: # Skip project report", "est_ags, args = run_pipeline(args) report_results(args, est_ags, None) def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or", "finished') parser1.parse_background_output() print('Classification DB search results imported') parser1.export_read_fastq() print('Classified forward end reads exported", "for (read_id, read) in parser1.reads.items() if read.status == STATUS_GOOD} else: # No hits", "os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.background_output_name ), '--max-target-seqs',", "created') result[ENDS[0]] = {read_id: read for (read_id, read) in parser1.reads.items() if read.status ==", "os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.background_output_name ) ):", "reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0 read_count1 = 0 base_count1", "= run_pipeline(args) report_results(args, est_ags, None) def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or gzipped FASTQ", "args = run_pipeline(args) report_results(args, est_ags, None) def import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or gzipped", "line elif line_counter == 2: base_count1 += len(line) if current_read in parser1.reads: parser1.reads[current_read].sequence", "'_' + parser.end + '_' + parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id +", "by filtering args['nreads'] = sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount < 3000000: args['nreads'] =", "def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args: sample (:obj:Sample): sample analyzed config (:obj:ProgramConfig): program", "reverse end reads finished') parser2.parse_background_output() print('Classification DB search results for reverse end reads", "current_read in parser2.reads: parser2.reads[current_read].line3 = line elif line_counter == 4: if current_read in", "result[ENDS[1]] = {read_id: read for (read_id, read) in parser2.reads.items() if read.status == STATUS_GOOD}", "print('Hits for reverse end reads exported in FASTQ format') parser2.export_hit_list() print('List of hits", "{} if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] =", "== 5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count2", "manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query',", "export_annotated_reads(parser1) print('Classified forward end reads exported in JSON format') generate_fastq_report(parser1) print('Text report for", "for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'fragmentcount' else: metric", "program configuration object \"\"\" args = {} if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path]", "\"\"\"Runs pre-selection DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an input sequence", "if sample_identifier is None: # Skip project report if the pipeline is running", "in infile_handle: # count lines as each FASTQ entry has exactly four lines", "= 0 current_read = None infile_handle = None if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1,", "reads are always discarded by filtering args['nreads'] = sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount", "(read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].read_id_line =", "reads exported in FASTQ format') parser1.export_hit_list() print('List of hits fo forward end reads", "fama.third_party.microbe_census import run_pipeline, report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid def run_ref_search(parser, command): \"\"\"Runs pre-selection", "parser2.parse_background_output() print('Classification DB search results for reverse end reads imported') parser2.export_read_fastq() print('Classified reverse", "import os import gzip from fama.utils.const import ENDS, STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline", "sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount < 3000000: args['nreads'] = sample.fastq_fwd_readcount - 1000000 else:", "sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'readcount' # Generate output", "True args['diamond'] = config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] =", "DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ),", "{} if parser2.reads: parser2.export_hit_fastq() print('Hits for reverse end reads exported in FASTQ format')", "reads created') result[ENDS[0]] = {read_id: read for (read_id, read) in parser1.reads.items() if read.status", "args = {} if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path]", "forward end reads exported in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads for classified forward", "DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample,", "if parser2.reads: parser2.export_hit_fastq() print('Hits for reverse end reads exported in FASTQ format') parser2.export_hit_list()", "command): \"\"\"Runs pre-selection DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an input", "MicrobeCensus subsamples 2M reads by default, but sequence library # must have more", "return project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline for single FASTQ file processing", "reads exported in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads for classified reverse end reads", "+ parser1.end + '_' + parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx') print('Classification DB search", "print('Classification DB search results for reverse end reads imported') parser2.export_read_fastq() print('Classified reverse end", "read) in parser1.reads.items() if read.status == STATUS_GOOD} else: # No hits found print('Pre-selection", "(:obj:Project): current project sample (:obj:Sample): current sample \"\"\" result = {} parser1 =", "for line in infile_handle: # count lines as each FASTQ entry has exactly", "find any hits for reverse end reads') result[ENDS[1]] = {} return result def", "parser1.reads: parser1.reads[current_read].read_id_line = line if current_read in parser2.reads: parser2.reads[current_read].pe_id = line elif line_counter", "No hits found print('Pre-selection search did not find any hits for forward end", "parser2.reads: parser2.reads[current_read].quality = line infile_handle.close() return (parser1, parser2, read_count1, read_count2, base_count1, base_count2) def", "read_count2 += 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read in", "not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.background_output_name", "+ parser1.end + '_' + parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx') if not os.path.exists(", "= import_fastq_pe( parser1, parser2 ) if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount = read_count1 if", ") ): run_bgr_search(parser1, 'blastx') print('Classification DB search finished') parser1.parse_background_output() print('Classification DB search results", "line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].quality = line if current_read in", "parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.background_output_name ), '--max-target-seqs', '100',", "= sample.fastq_fwd_readcount - 1000000 else: args['nreads'] = 2000000 print(args) est_ags, args = run_pipeline(args)", "from fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util import export_annotated_reads, export_sample from fama.third_party.microbe_census import run_pipeline,", "outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline for entire project", "fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb') else: infile_handle = open(fastq_file2, 'rb') for line in", "main(): \"\"\"Main function\"\"\" print('This program is not intended to run directly.') if __name__", "reference DB search parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence data for selected sequence reads", "from fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util import export_annotated_reads, export_sample", "in JSON format') generate_fastq_report(parser2) print('Text report for reverse end reads created') generate_pdf_report(parser2) print('PDF", "'blastp' (see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db(", "parser1.reads: parser1.export_hit_fastq() print('Hits for forward end reads exported in FASTQ format') parser1.export_hit_list() print('List", "[sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] = True args['diamond'] = config.diamond_path args['data_dir']", "entry has exactly four lines line_counter += 1 if line_counter == 5: line_counter", "\"\"\"Functional profiling pipeline for single FASTQ file processing Args: project (:obj:Project): current project", "sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] = True args['diamond'] = config.diamond_path args['data_dir'] =", "FASTQ format') parser2.export_hit_list() print('List of hits for reverse end reads exported') if not", "processing an input sequence file command (str): either 'blastx' or 'blastp' (see DIAMOND", "'rb') for line in infile_handle: # count lines as each FASTQ entry has", "{} parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options,", "parser1.export_hit_fastq() print('Hits for forward end reads exported in FASTQ format') parser1.export_hit_list() print('List of", "line elif line_counter == 2: base_count2 += len(line) if current_read in parser1.reads: parser1.reads[current_read].pe_sequence", "Generate output for the sample or delete sample from memory project.options.set_sample_data(project.samples[sample_id]) metric =", "is running for only one sample project.generate_report() # Rename existing project file and", "lines as each FASTQ entry has exactly four lines line_counter += 1 if", "= None infile_handle = None if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb') else: infile_handle", "for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n')", "project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline for single FASTQ file processing Args:", "+ '_' + parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx') # Process output of reference", "if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb') else: infile_handle = open(fastq_file1, 'rb') for line", "parser2.reads: parser2.reads[current_read].read_id_line = line elif line_counter == 2: base_count2 += len(line) if current_read", "is None: # Skip project report if the pipeline is running for only", "format') export_paired_end_reads_fastq(parser2) print('Paired reads for classified reverse end reads exported') export_annotated_reads(parser2) print('Classified reverse", "reverse end reads created') generate_pdf_report(parser2) print('PDF report for reverse end reads created') make_functions_chart(parser2)", "elif line_counter == 2: base_count2 += len(line) if current_read in parser1.reads: parser1.reads[current_read].pe_sequence =", "== 0.0: metric = 'readcount' # Generate output for all samples for sample_id", "always discarded by filtering args['nreads'] = sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount < 3000000:", "or gzipped FASTQ file, finds sequences of selected reads and stores them Returns:", "(:obj:DiamondParser): parser object processing an input sequence file command (str): either 'blastx' or", "search results imported') parser1.export_read_fastq() print('Classified forward end reads exported in FASTQ format') export_paired_end_reads_fastq(parser1)", "parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' +", "Args: project (:obj:Project): current project sample (:obj:Sample): current sample \"\"\" result = {}", "infile_handle.close() return (parser1, parser2, read_count1, read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For paired-end", "parser1.reads: parser1.reads[current_read].pe_line3 = line if current_read in parser2.reads: parser2.reads[current_read].line3 = line elif line_counter", "of hits fo forward end reads exported') if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id", "For paired-end sequence reads, write paired-end reads for pre-selected reads into a separate", "existing project file and save current version project.save_project_options() return project def run_pe_fastq_pipeline(project, sample):", "print('Classified forward end reads exported in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads for classified", "'rb') else: infile_handle = open(fastq_file2, 'rb') for line in infile_handle: # count lines", "reverse end reads') result[ENDS[1]] = {} return result def main(): \"\"\"Main function\"\"\" print('This", "+= len(line) if current_read in parser1.reads: parser1.reads[current_read].sequence = line if current_read in parser2.reads:", "(parser1, parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe( parser1, parser2 ) if sample.fastq_fwd_readcount", "\"\"\"Functional profiling pipeline for entire project Args: project (:obj:Project): current project sample_identifier (str,", "generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util import export_annotated_reads, export_sample from fama.third_party.microbe_census import", "exported') export_annotated_reads(parser1) print('Classified forward end reads exported in JSON format') generate_fastq_report(parser1) print('Text report", "sequences of selected reads and stores them Returns: read_count (int): number of reads", "parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile, 'wt') as outfile: for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id", "parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )", "database if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_'", "'\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project, sample_identifier=None,", "parser2.reads: parser2.reads[current_read].line3 = line elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].pe_quality", "export_sample(project.samples[sample_id]) # Generate output for the sample or delete sample from memory project.options.set_sample_data(project.samples[sample_id])", "sample): \"\"\"Functional profiling pipeline for single FASTQ file processing Args: project (:obj:Project): current", "project.samples[sample_id] = sample if end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads", "from fama.utils.const import ENDS, STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils import run_external_program", "if current_read in parser1.reads: parser1.reads[current_read].quality = line if current_read in parser2.reads: parser2.reads[current_read].pe_quality =", "current_read in parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter =", "parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data,", "print('Classified forward end reads exported in JSON format') generate_fastq_report(parser1) print('Text report for forward", "[parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' +", "for reverse end reads exported in FASTQ format') parser2.export_hit_list() print('List of hits for", "end reads exported in JSON format') generate_fastq_report(parser1) print('Text report for forward end reads", "infile_handle = open(fastq_file1, 'rb') for line in infile_handle: # count lines as each", "sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount = read_count2", "sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits for forward", "= None if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb') else: infile_handle = open(fastq_file1, 'rb')", "if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample)", "sample_identifier != sample_id: continue sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample if end_identifier:", "print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end),", "reads by default, but sequence library # must have more reads as some", "'_' + parser1.end + '_' + parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx') print('Classification DB", "+ '_' + parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id),", "some reads are always discarded by filtering args['nreads'] = sample.fastq_fwd_readcount // 2 elif", "\"\"\" outdir = parser.sample.work_directory read_ids = {} for read_id in sorted(parser.reads.keys()): read_ids[read_id] =", "for selected sequence reads print('Reading FASTQ file') (parser1, parser2, read_count1, read_count2, base_count1, base_count2)", "reverse end reads exported') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' +", "0 read_count1 = 0 base_count1 = 0 current_read = None infile_handle = None", "or delete sample from memory project.options.set_sample_data(project.samples[sample_id]) metric = None for sample_id in project.list_samples():", "parser.sample.sample_id + '_' + parser.end + '_' + parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue',", "'_' + parser2.end + '_' + parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx') # Process", "number of reads in the file base_count (int): total number of bases in", "0 base_count1 = 0 current_read = None infile_handle = None if fastq_file1.endswith('.gz'): infile_handle", "current_read in parser1.reads: parser1.reads[current_read].pe_line3 = line if current_read in parser2.reads: parser2.reads[current_read].line3 = line", "else: metric = 'erpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric", "== 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads:", "= config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs']", "parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx') print('Classification DB search finished') parser1.parse_background_output() print('Classification DB search", "in the file base_count (int): total number of bases in all reads \"\"\"", "Generate output for all samples for sample_id in project.list_samples(): generate_sample_report(project, sample_id, metric=metric) #", "line in infile_handle: # count lines as each FASTQ entry has exactly four", "or 'blastp' (see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db',", "forward end reads exported') export_annotated_reads(parser1) print('Classified forward end reads exported in JSON format')", "results imported') parser1.export_read_fastq() print('Classified forward end reads exported in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired", "0: sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor", "run_ref_search(parser1, 'blastx') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end +", "infile_handle = None if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb') else: infile_handle = open(fastq_file1,", "parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_'", "= line if current_read in parser2.reads: parser2.reads[current_read].quality = line infile_handle.close() return (parser1, parser2,", "parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence data for selected sequence reads print('Reading FASTQ file')", "line elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].pe_quality = line if", "parser object processing an input sequence file command (str): either 'blastx' or 'blastp'", "= open(fastq_file1, 'rb') for line in infile_handle: # count lines as each FASTQ", "not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in", "profiling pipeline\"\"\" import os import gzip from fama.utils.const import ENDS, STATUS_GOOD from fama.se_functional_pipeline", "= 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count1 += 1 (read_id,", "line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].pe_line3 = line if current_read in", "object \"\"\" args = {} if sample.is_paired_end: args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles']", "sample_identifier is None: # Skip project report if the pipeline is running for", "export_annotated_reads(parser2) print('Classified reverse end reads exported in JSON format') generate_fastq_report(parser2) print('Text report for", "if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount =", "= parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0 read_count2 = 0 base_count2 = 0 current_read", "import generate_fastq_report, generate_sample_report from fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util", "args['nreads'] = sample.fastq_fwd_readcount - 1000000 else: args['nreads'] = 2000000 print(args) est_ags, args =", "sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline for entire project Args: project (:obj:Project): current project", "as each FASTQ entry has exactly four lines line_counter += 1 if line_counter", "read_count1, read_count2, base_count1, base_count2) = import_fastq_pe( parser1, parser2 ) if sample.fastq_fwd_readcount == 0:", "MicrobeCensus Args: sample (:obj:Sample): sample analyzed config (:obj:ProgramConfig): program configuration object \"\"\" args", "os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in reference database", "project sample_identifier (str, optional): sample identifier end_identifier (str, optional): end identifier \"\"\" for", "0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq()", "parser2.reads: parser2.reads[current_read].pe_id = line elif line_counter == 2: base_count1 += len(line) if current_read", "memory project.options.set_sample_data(project.samples[sample_id]) metric = None for sample_id in project.list_samples(): if project.is_paired_end(): metric =", "parser2.reads: parser2.reads[current_read].pe_line3 = line elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].quality", "# Generate output for all samples for sample_id in project.list_samples(): generate_sample_report(project, sample_id, metric=metric)", "'--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident',", "'rb') else: infile_handle = open(fastq_file1, 'rb') for line in infile_handle: # count lines", "== 1: read_count1 += 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if", "elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].pe_line3 = line if current_read", "sample_identifier (str, optional): sample identifier end_identifier (str, optional): end identifier \"\"\" for sample_id", "): run_bgr_search(parser1, 'blastx') print('Classification DB search finished') parser1.parse_background_output() print('Classification DB search results imported')", "args['no_equivs'] = True if sample.fastq_fwd_readcount < 1500000: # MicrobeCensus subsamples 2M reads by", "exactly four lines line_counter += 1 if line_counter == 5: line_counter = 1", "DB search parser1.parse_reference_output() parser2.parse_reference_output() # Import sequence data for selected sequence reads print('Reading", "reads exported in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads for classified forward end reads", "read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id + '_' + parser.end + '_' + parser.options.pe_reads_fastq_name", "run_bgr_search(parser2, 'blastx') print('Classification DB search for reverse end reads finished') parser2.parse_background_output() print('Classification DB", "parser1.reads[current_read].read_id_line = line if current_read in parser2.reads: parser2.reads[current_read].pe_id = line elif line_counter ==", "FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads for classified forward end reads exported') export_annotated_reads(parser1) print('Classified", "reverse end reads exported') export_annotated_reads(parser2) print('Classified reverse end reads exported in JSON format')", "args['nreads'] = 2000000 print(args) est_ags, args = run_pipeline(args) report_results(args, est_ags, None) def import_fastq_pe(parser1,", "project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'readcount' # Generate output for all", "read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality", "reads print('Reading FASTQ file') (parser1, parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe( parser1,", "read for (read_id, read) in parser1.reads.items() if read.status == STATUS_GOOD} else: # No", "if line_counter == 1: read_count1 += 1 (read_id, _) = parse_fastq_seqid(line) current_read =", "parser.end + '_' + parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), #", "not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.background_output_name", "STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils import run_external_program from fama.project.sample import Sample", "from fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils import run_external_program from fama.project.sample import Sample from", "current_read = None infile_handle = None if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb') else:", "found print('Pre-selection search did not find any hits for reverse end reads') result[ENDS[1]]", "if current_read in parser2.reads: parser2.reads[current_read].line3 = line elif line_counter == 4: if current_read", "import run_fastq_pipeline from fama.utils.utils import run_external_program from fama.project.sample import Sample from fama.diamond_parser.diamond_parser import", "config (:obj:ProgramConfig): program configuration object \"\"\" args = {} if sample.is_paired_end: args['seqfiles'] =", "sample.fastq_fwd_readcount < 1500000: # MicrobeCensus subsamples 2M reads by default, but sequence library", "os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.ref_output_name ) ):", "# No hits found print('Pre-selection search did not find any hits for forward", "== 2: base_count2 += len(line) if current_read in parser1.reads: parser1.reads[current_read].pe_sequence = line if", "outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None):", "current_read in parser1.reads: parser1.reads[current_read].sequence = line if current_read in parser2.reads: parser2.reads[current_read].pe_sequence = line", "in parser1.reads: parser1.reads[current_read].line3 = line if current_read in parser2.reads: parser2.reads[current_read].pe_line3 = line elif", "infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0 read_count2 = 0 base_count2 =", "fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0 read_count2 = 0 base_count2 = 0", "result def main(): \"\"\"Main function\"\"\" print('This program is not intended to run directly.')", "in project.list_samples(): if project.is_paired_end(): metric = 'efpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor", "parser1.export_hit_list() print('List of hits fo forward end reads exported') if not os.path.exists( os.path.join(", "forward end reads') result[ENDS[0]] = {} if parser2.reads: parser2.export_hit_fastq() print('Hits for reverse end", "project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in reference database if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id),", "sample_id, metric=metric) # Generate output for the project if sample_identifier is None: #", "'\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline for entire", "== 4: if current_read in parser1.reads: parser1.reads[current_read].pe_quality = line if current_read in parser2.reads:", "== 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if parser1.reads: parser1.export_hit_fastq() print('Hits for forward end", "parser.end + '_' + parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) )", "import make_functions_chart from fama.output.json_util import export_annotated_reads, export_sample from fama.third_party.microbe_census import run_pipeline, report_results from", "line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end) line_counter = 0 read_count2 = 0 base_count2", "print('Krona chart for reverse end reads created') result[ENDS[1]] = {read_id: read for (read_id,", "end reads exported in FASTQ format') parser2.export_hit_list() print('List of hits for reverse end", "'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser, command):", "parser2): \"\"\"Reads uncompressed or gzipped FASTQ file, finds sequences of selected reads and", "read_count1, read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence reads, write paired-end", "print('Pre-selection search did not find any hits for forward end reads') result[ENDS[0]] =", "print('Hits for forward end reads exported in FASTQ format') parser1.export_hit_list() print('List of hits", "save current version project.save_project_options() return project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional profiling pipeline for", "open(fastq_file2, 'rb') for line in infile_handle: # count lines as each FASTQ entry", "if current_read in parser1.reads: parser1.reads[current_read].pe_id = line if current_read in parser2.reads: parser2.reads[current_read].read_id_line =", "if sample.fastq_fwd_readcount < 1500000: # MicrobeCensus subsamples 2M reads by default, but sequence", "elif line_counter == 2: base_count1 += len(line) if current_read in parser1.reads: parser1.reads[current_read].sequence =", "args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path] else: args['seqfiles'] = [sample.fastq_fwd_path] args['verbose'] = True args['diamond'] =", "gzip.open(fastq_file1, 'rb') else: infile_handle = open(fastq_file1, 'rb') for line in infile_handle: # count", "print('Classified reverse end reads exported in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads for classified", "format') parser2.export_hit_list() print('List of hits for reverse end reads exported') if not os.path.exists(", "sample.fastq_fwd_readcount - 1000000 else: args['nreads'] = 2000000 print(args) est_ags, args = run_pipeline(args) report_results(args,", "= sample if end_identifier: project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads =", "read_id if current_read in parser1.reads: parser1.reads[current_read].pe_id = line if current_read in parser2.reads: parser2.reads[current_read].read_id_line", "run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args: sample (:obj:Sample): sample analyzed config (:obj:ProgramConfig): program configuration", "'--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_'", "if current_read in parser2.reads: parser2.reads[current_read].read_id_line = line elif line_counter == 2: base_count2 +=", "parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' +", "'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args: sample (:obj:Sample):", "import generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart from fama.output.json_util import export_annotated_reads, export_sample from fama.third_party.microbe_census", "line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].pe_quality = line if current_read in", "parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx') print('Classification DB search for reverse end reads finished')", "FASTQ file processing Args: project (:obj:Project): current project sample (:obj:Sample): current sample \"\"\"", "of selected reads and stores them Returns: read_count (int): number of reads in", "= True args['diamond'] = config.diamond_path args['data_dir'] = config.microbecensus_datadir args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads']", "sample_id: continue sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample if end_identifier: project.samples[sample_id].reads[end_identifier] =", "# parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'slen', 'qstart', 'qend', 'sstart',", "1: read_count1 += 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read", "'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND", "result = {} parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0]) parser2 =", "classification DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an input sequence file", "os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in reference database if not os.path.exists( os.path.join(", "# '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'slen', 'qstart',", "+ '_' + parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end", "parser.sample.work_directory read_ids = {} for read_id in sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile =", "optional): sample identifier end_identifier (str, optional): end identifier \"\"\" for sample_id in project.list_samples():", "project (:obj:Project): current project sample_identifier (str, optional): sample identifier end_identifier (str, optional): end", "end reads created') generate_pdf_report(parser1) print('PDF report for forward end reads created') make_functions_chart(parser1) print('Krona", "line_counter == 5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1:", "current_read in parser2.reads: parser2.reads[current_read].sequence = line elif line_counter == 3: if current_read in", "(parser1, parser2, read_count1, read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence reads,", "+ parser.end + '_' + parser.options.background_output_name ), '--max-target-seqs', '100', '--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id)", "run_ref_search(parser, command): \"\"\"Runs pre-selection DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an", "line_counter == 2: base_count2 += len(line) if current_read in parser1.reads: parser1.reads[current_read].pe_sequence = line", "end reads') result[ENDS[0]] = {} if parser2.reads: parser2.export_hit_fastq() print('Hits for reverse end reads", "print('Text report for forward end reads created') generate_pdf_report(parser1) print('PDF report for forward end", "for forward end reads created') make_functions_chart(parser1) print('Krona chart for forward end reads created')", "parser1.reads: parser1.reads[current_read].line3 = line if current_read in parser2.reads: parser2.reads[current_read].pe_line3 = line elif line_counter", "fama.se_functional_pipeline import run_fastq_pipeline from fama.utils.utils import run_external_program from fama.project.sample import Sample from fama.diamond_parser.diamond_parser", "read) in parser2.reads.items() if read.status == STATUS_GOOD} else: # No hits found print('Pre-selection", "read_count2, base_count1, base_count2) = import_fastq_pe( parser1, parser2 ) if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount", "\\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output for the sample or delete sample", "the file base_count (int): total number of bases in all reads \"\"\" fastq_file1", "report if the pipeline is running for only one sample project.generate_report() # Rename", "== STATUS_GOOD} else: # No hits found print('Pre-selection search did not find any", "str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), #", "classified reverse end reads exported') export_annotated_reads(parser2) print('Classified reverse end reads exported in JSON", "print('Krona chart for forward end reads created') result[ENDS[0]] = {read_id: read for (read_id,", "'fragmentcount' else: metric = 'erpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0:", "in parser2.reads: parser2.reads[current_read].pe_sequence = line elif line_counter == 3: if current_read in parser1.reads:", "sample \"\"\" result = {} parser1 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[0])", "sample from memory project.options.set_sample_data(project.samples[sample_id]) metric = None for sample_id in project.list_samples(): if project.is_paired_end():", "created') generate_pdf_report(parser1) print('PDF report for forward end reads created') make_functions_chart(parser1) print('Krona chart for", "# Import sequence data for selected sequence reads print('Reading FASTQ file') (parser1, parser2,", "any hits for forward end reads') result[ENDS[0]] = {} if parser2.reads: parser2.export_hit_fastq() print('Hits", "print('DIAMOND finished') def run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND search Args: parser (:obj:DiamondParser): parser", "+ '_' + parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads',", "sample_identifier and sample_identifier != sample_id: continue sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample", "read for (read_id, read) in parser2.reads.items() if read.status == STATUS_GOOD} else: # No", "in parser2.reads: parser2.reads[current_read].quality = line infile_handle.close() return (parser1, parser2, read_count1, read_count2, base_count1, base_count2)", "end reads created') generate_pdf_report(parser2) print('PDF report for reverse end reads created') make_functions_chart(parser2) print('Krona", "result[ENDS[0]] = {read_id: read for (read_id, read) in parser1.reads.items() if read.status == STATUS_GOOD}", "for (read_id, read) in parser2.reads.items() if read.status == STATUS_GOOD} else: # No hits", "project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'readcount' # Generate output for all samples for", "of reads in the file base_count (int): total number of bases in all", "parser1.reads: parser1.reads[current_read].quality = line if current_read in parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2", "print('Classification DB search for reverse end reads finished') parser2.parse_background_output() print('Classification DB search results", "DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id", "sample_id in project.list_samples(): generate_sample_report(project, sample_id, metric=metric) # Generate output for the project if", "selected sequence reads print('Reading FASTQ file') (parser1, parser2, read_count1, read_count2, base_count1, base_count2) =", "* parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt',", "as some reads are always discarded by filtering args['nreads'] = sample.fastq_fwd_readcount // 2", "Search in reference database if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' +", "\"\"\"Runs Fama functional profiling pipeline\"\"\" import os import gzip from fama.utils.const import ENDS,", "if current_read in parser1.reads: parser1.reads[current_read].pe_quality = line if current_read in parser2.reads: parser2.reads[current_read].quality =", "return (parser1, parser2, read_count1, read_count2, base_count1, base_count2) def export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence", "\"\"\" For paired-end sequence reads, write paired-end reads for pre-selected reads into a", "# Rename existing project file and save current version project.save_project_options() return project def", "2: base_count2 += len(line) if current_read in parser1.reads: parser1.reads[current_read].pe_sequence = line if current_read", "report for forward end reads created') make_functions_chart(parser1) print('Krona chart for forward end reads", "+ parser2.options.background_output_name ) ): run_bgr_search(parser2, 'blastx') print('Classification DB search for reverse end reads", "and sample_identifier != sample_id: continue sample = Sample(sample_id) sample.load_sample(project.options) project.samples[sample_id] = sample if", "exported in JSON format') generate_fastq_report(parser1) print('Text report for forward end reads created') generate_pdf_report(parser1)", "find any hits for forward end reads') result[ENDS[0]] = {} if parser2.reads: parser2.export_hit_fastq()", "end reads exported in JSON format') generate_fastq_report(parser2) print('Text report for reverse end reads", "parser2.reads[current_read].pe_sequence = line elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].line3 =", "= {read_id: read for (read_id, read) in parser1.reads.items() if read.status == STATUS_GOOD} else:", "'_' + parser.end + '_' + parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id)", "str(parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length',", "separate FASTQ file \"\"\" outdir = parser.sample.work_directory read_ids = {} for read_id in", "imported') parser1.export_read_fastq() print('Classified forward end reads exported in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads", "Rename existing project file and save current version project.save_project_options() return project def run_pe_fastq_pipeline(project,", "sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config) sample.import_rpkg_scaling_factor() project.options.set_sample_data(sample) if", "run_bgr_search(parser, command): \"\"\"Runs classification DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an", "infile_handle: # count lines as each FASTQ entry has exactly four lines line_counter", "line_counter == 1: read_count2 += 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id", "reverse end reads exported in FASTQ format') parser2.export_hit_list() print('List of hits for reverse", "= line elif line_counter == 2: base_count1 += len(line) if current_read in parser1.reads:", "if current_read in parser2.reads: parser2.reads[current_read].pe_id = line elif line_counter == 2: base_count1 +=", "parser2.reads: parser2.export_hit_fastq() print('Hits for reverse end reads exported in FASTQ format') parser2.export_hit_list() print('List", "else: args['nreads'] = 2000000 print(args) est_ags, args = run_pipeline(args) report_results(args, est_ags, None) def", "generate_fastq_report(parser2) print('Text report for reverse end reads created') generate_pdf_report(parser2) print('PDF report for reverse", "+ parser2.end + '_' + parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx') # Process output", "project file and save current version project.save_project_options() return project def run_pe_fastq_pipeline(project, sample): \"\"\"Functional", "parser1.reads[current_read].pe_quality = line if current_read in parser2.reads: parser2.reads[current_read].quality = line infile_handle.close() return (parser1,", "the pipeline is running for only one sample project.generate_report() # Rename existing project", "chart for forward end reads created') result[ENDS[0]] = {read_id: read for (read_id, read)", "print('Classified reverse end reads exported in JSON format') generate_fastq_report(parser2) print('Text report for reverse", "a separate FASTQ file \"\"\" outdir = parser.sample.work_directory read_ids = {} for read_id", "'_' + parser.end + '_' + parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile, 'wt') as", "import export_annotated_reads, export_sample from fama.third_party.microbe_census import run_pipeline, report_results from fama.diamond_parser.hit_utils import parse_fastq_seqid def", "if fastq_file2.endswith('.gz'): infile_handle = gzip.open(fastq_file2, 'rb') else: infile_handle = open(fastq_file2, 'rb') for line", "if current_read in parser2.reads: parser2.reads[current_read].pe_line3 = line elif line_counter == 4: if current_read", "'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args)", "os import gzip from fama.utils.const import ENDS, STATUS_GOOD from fama.se_functional_pipeline import run_fastq_pipeline from", "'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_bgr_search(parser, command): \"\"\"Runs classification", "'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample, config):", "if current_read in parser1.reads: parser1.reads[current_read].pe_line3 = line if current_read in parser2.reads: parser2.reads[current_read].line3 =", "+= len(line) if current_read in parser1.reads: parser1.reads[current_read].pe_sequence = line if current_read in parser2.reads:", "), '--query', parser.options.get_fastq_path(parser.sample.sample_id, parser.end), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end +", "parser1.parse_background_output() print('Classification DB search results imported') parser1.export_read_fastq() print('Classified forward end reads exported in", "for reverse end reads created') result[ENDS[1]] = {read_id: read for (read_id, read) in", "elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].quality = line if current_read", "data for selected sequence reads print('Reading FASTQ file') (parser1, parser2, read_count1, read_count2, base_count1,", "= line elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].line3 = line", "in parser2.reads: parser2.reads[current_read].sequence = line elif line_counter == 3: if current_read in parser1.reads:", "0.0: metric = 'fragmentcount' else: metric = 'erpkg' for sample_id in project.list_samples(): if", "in project.list_samples(): if sample_identifier and sample_identifier != sample_id: continue sample = Sample(sample_id) sample.load_sample(project.options)", "parser2.reads: parser2.reads[current_read].pe_sequence = line elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].line3", "+ '_' + parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile, 'wt') as outfile: for read_id", "current_read in parser2.reads: parser2.reads[current_read].pe_line3 = line elif line_counter == 4: if current_read in", "elif sample.fastq_fwd_readcount < 3000000: args['nreads'] = sample.fastq_fwd_readcount - 1000000 else: args['nreads'] = 2000000", "file command (str): either 'blastx' or 'blastp' (see DIAMOND manual) \"\"\" print('Starting DIAMOND')", "'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample,", "file \"\"\" outdir = parser.sample.work_directory read_ids = {} for read_id in sorted(parser.reads.keys()): read_ids[read_id]", "gzip.open(fastq_outfile, 'wt') as outfile: for read_id in sorted(parser.reads.keys()): outfile.write(parser.reads[read_id].pe_id + '\\n') outfile.write(parser.reads[read_id].pe_sequence +", "result[ENDS[0]] = {} if parser2.reads: parser2.export_hit_fastq() print('Hits for reverse end reads exported in", "'blastp' (see DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db(", "base_count1 += len(line) if current_read in parser1.reads: parser1.reads[current_read].sequence = line if current_read in", "args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt') args['threads'] = int(config.threads) args['no_equivs'] = True if sample.fastq_fwd_readcount <", ")), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'slen',", "== 4: if current_read in parser1.reads: parser1.reads[current_read].quality = line if current_read in parser2.reads:", "in parser1.reads: parser1.reads[current_read].pe_sequence = line if current_read in parser2.reads: parser2.reads[current_read].sequence = line elif", "# Skip project report if the pipeline is running for only one sample", "end reads imported') parser2.export_read_fastq() print('Classified reverse end reads exported in FASTQ format') export_paired_end_reads_fastq(parser2)", "'efpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'fragmentcount' else:", "def export_paired_end_reads_fastq(parser): \"\"\" For paired-end sequence reads, write paired-end reads for pre-selected reads", "sample.fastq_rev_basecount = base_count2 if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample,", "from fama.output.report import generate_fastq_report, generate_sample_report from fama.output.pdf_report import generate_pdf_report from fama.output.krona_xml_writer import make_functions_chart", "== 5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count1", "+= 1 if line_counter == 5: line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if", "{read_id: read for (read_id, read) in parser2.reads.items() if read.status == STATUS_GOOD} else: #", "'erpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor == 0.0: metric = 'readcount' #", "project.samples[sample_id].reads[end_identifier] = \\ run_fastq_pipeline(project, sample=project.samples[sample_id], end_id=end_identifier) else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id])", "ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id),", "parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )), # '--threads', # parser.config.threads, '--outfmt', '6', 'qseqid',", "exported') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end + '_'", "print('Classification DB search finished') parser1.parse_background_output() print('Classification DB search results imported') parser1.export_read_fastq() print('Classified forward", "metric = None for sample_id in project.list_samples(): if project.is_paired_end(): metric = 'efpkg' for", "line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count2 += 1", "else: project.samples[sample_id].reads = \\ run_pe_fastq_pipeline(project, sample=project.samples[sample_id]) export_sample(project.samples[sample_id]) # Generate output for the sample", "line elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].line3 = line if", "line_counter = 0 read_count2 = 0 base_count2 = 0 current_read = None if", "= base_count2 if sample.rpkg_scaling_factor == 0.0: sample.import_rpkg_scaling_factor() if sample.rpkg_scaling_factor == 0.0: run_microbecensus(sample=sample, config=project.config)", "= line if current_read in parser2.reads: parser2.reads[current_read].pe_quality = line infile_handle.close() fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id,", "four lines line_counter += 1 if line_counter == 5: line_counter = 1 line", "make_functions_chart(parser1) print('Krona chart for forward end reads created') result[ENDS[0]] = {read_id: read for", "+= 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads:", "pipeline is running for only one sample project.generate_report() # Rename existing project file", "= 0 read_count2 = 0 base_count2 = 0 current_read = None if fastq_file2.endswith('.gz'):", "found print('Pre-selection search did not find any hits for forward end reads') result[ENDS[0]]", "FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads for classified reverse end reads exported') export_annotated_reads(parser2) print('Classified", "format') parser1.export_hit_list() print('List of hits fo forward end reads exported') if not os.path.exists(", "identifier \"\"\" for sample_id in project.list_samples(): if sample_identifier and sample_identifier != sample_id: continue", "+ parser.end + '_' + parser.options.pe_reads_fastq_name + '.gz') with gzip.open(fastq_outfile, 'wt') as outfile:", "parser2.reads[current_read].line3 = line elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].pe_quality =", "+ '_' + parser.end + '_' + parser.options.ref_output_name ), '--max-target-seqs', '50', '--evalue', str(parser.config.get_evalue_cutoff(", "parser2.reads[current_read].read_id_line = line elif line_counter == 2: base_count2 += len(line) if current_read in", "\"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0 read_count1 = 0 base_count1 =", "def main(): \"\"\"Main function\"\"\" print('This program is not intended to run directly.') if", "line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count1 += 1 (read_id, _) =", "end reads created') result[ENDS[1]] = {read_id: read for (read_id, read) in parser2.reads.items() if", "import_fastq_pe( parser1, parser2 ) if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount = read_count1 if sample.fastq_fwd_basecount", "line_counter == 2: base_count1 += len(line) if current_read in parser1.reads: parser1.reads[current_read].sequence = line", "DB search finished') parser1.parse_background_output() print('Classification DB search results imported') parser1.export_read_fastq() print('Classified forward end", "for reverse end reads exported') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_'", "None if fastq_file1.endswith('.gz'): infile_handle = gzip.open(fastq_file1, 'rb') else: infile_handle = open(fastq_file1, 'rb') for", "line_counter == 1: read_count1 += 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id", "reverse end reads exported in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads for classified reverse", "import_fastq_pe(parser1, parser2): \"\"\"Reads uncompressed or gzipped FASTQ file, finds sequences of selected reads", "= open(fastq_file2, 'rb') for line in infile_handle: # count lines as each FASTQ", "reads') result[ENDS[0]] = {} if parser2.reads: parser2.export_hit_fastq() print('Hits for reverse end reads exported", "fo forward end reads exported') if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_'", "DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True) if not", "line_counter = 0 read_count1 = 0 base_count1 = 0 current_read = None infile_handle", "parser2.export_read_fastq() print('Classified reverse end reads exported in FASTQ format') export_paired_end_reads_fastq(parser2) print('Paired reads for", "in all reads \"\"\" fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end) line_counter = 0 read_count1 =", "pre-selection DIAMOND search Args: parser (:obj:DiamondParser): parser object processing an input sequence file", "current_read in parser2.reads: parser2.reads[current_read].pe_id = line elif line_counter == 2: base_count1 += len(line)", "'\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline for entire project Args: project", "project (:obj:Project): current project sample (:obj:Sample): current sample \"\"\" result = {} parser1", "DB search for reverse end reads finished') parser2.parse_background_output() print('Classification DB search results for", "'_' + parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx') print('Classification DB search finished') parser1.parse_background_output() print('Classification", "in FASTQ format') parser2.export_hit_list() print('List of hits for reverse end reads exported') if", "exist_ok=True) if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in reference database if", "\"\"\"Reads uncompressed or gzipped FASTQ file, finds sequences of selected reads and stores", "project.options.get_output_subdir(sample.sample_id))) # Search in reference database if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id +", "line_counter = 1 line = line.decode('utf8').rstrip('\\n\\r') if line_counter == 1: read_count1 += 1", "parser1.reads[current_read].pe_line3 = line if current_read in parser2.reads: parser2.reads[current_read].line3 = line elif line_counter ==", "= 'fragmentcount' else: metric = 'erpkg' for sample_id in project.list_samples(): if project.samples[sample_id].rpkg_scaling_factor ==", "parser2.options.ref_output_name ) ): run_ref_search(parser2, 'blastx') # Process output of reference DB search parser1.parse_reference_output()", "finished') def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args: sample (:obj:Sample): sample analyzed config (:obj:ProgramConfig):", "run_ref_search(parser2, 'blastx') # Process output of reference DB search parser1.parse_reference_output() parser2.parse_reference_output() # Import", "elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].line3 = line if current_read", "{read_id: read for (read_id, read) in parser1.reads.items() if read.status == STATUS_GOOD} else: #", "DIAMOND manual) \"\"\" print('Starting DIAMOND') diamond_args = [parser.config.diamond_path, command, '--db', parser.config.get_reference_diamond_db( parser.options.get_collection(parser.sample.sample_id) ),", "end reads exported') if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id + '_' + parser1.end", "reads created') generate_pdf_report(parser2) print('PDF report for reverse end reads created') make_functions_chart(parser2) print('Krona chart", "+ '\\n') outfile.write(parser.reads[read_id].pe_sequence + '\\n') outfile.write(parser.reads[read_id].pe_line3 + '\\n') outfile.write(parser.reads[read_id].pe_quality + '\\n') def fastq_pe_pipeline(project,", "= {} if parser2.reads: parser2.export_hit_fastq() print('Hits for reverse end reads exported in FASTQ", "line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].line3 = line if current_read in", "project report if the pipeline is running for only one sample project.generate_report() #", "reads as some reads are always discarded by filtering args['nreads'] = sample.fastq_fwd_readcount //", "read_count1 if sample.fastq_fwd_basecount == 0: sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount", "= [parser.config.diamond_path, command, '--db', parser.config.get_background_diamond_db( parser.options.get_collection(parser.sample.sample_id) ), '--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_'", "return result def main(): \"\"\"Main function\"\"\" print('This program is not intended to run", "hits for forward end reads') result[ENDS[0]] = {} if parser2.reads: parser2.export_hit_fastq() print('Hits for", "in sorted(parser.reads.keys()): read_ids[read_id] = read_id fastq_outfile = os.path.join(outdir, parser.sample.sample_id + '_' + parser.end", "print('Classification DB search results imported') parser1.export_read_fastq() print('Classified forward end reads exported in FASTQ", "parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].pe_id = line if current_read", "+ '_' + parser1.end + '_' + parser1.options.background_output_name ) ): run_bgr_search(parser1, 'blastx') print('Classification", "parser1.options.ref_output_name ) ): run_ref_search(parser1, 'blastx') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_'", "'6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']", "for sample_id in project.list_samples(): if sample_identifier and sample_identifier != sample_id: continue sample =", "'--query', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_' + parser.end + '_' + parser.options.ref_hits_fastq_name ),", "from fama.output.json_util import export_annotated_reads, export_sample from fama.third_party.microbe_census import run_pipeline, report_results from fama.diamond_parser.hit_utils import", "forward end reads exported in JSON format') generate_fastq_report(parser1) print('Text report for forward end", "+ parser.end + '_' + parser.options.ref_hits_fastq_name ), '--out', os.path.join( parser.options.get_project_dir(parser.sample.sample_id), parser.sample.sample_id + '_'", "end reads exported in FASTQ format') export_paired_end_reads_fastq(parser1) print('Paired reads for classified forward end", "'blastx') print('Classification DB search for reverse end reads finished') parser2.parse_background_output() print('Classification DB search", "parser2.reads[current_read].sequence = line elif line_counter == 3: if current_read in parser1.reads: parser1.reads[current_read].pe_line3 =", "== 0: sample.fastq_fwd_basecount = base_count1 if sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount = read_count2 if", "print('PDF report for reverse end reads created') make_functions_chart(parser2) print('Krona chart for reverse end", "end reads exported') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id + '_' + parser2.end", "0.0: metric = 'readcount' # Generate output for all samples for sample_id in", "processing Args: project (:obj:Project): current project sample (:obj:Sample): current sample \"\"\" result =", "in parser2.reads: parser2.reads[current_read].line3 = line elif line_counter == 4: if current_read in parser1.reads:", "sequence data for selected sequence reads print('Reading FASTQ file') (parser1, parser2, read_count1, read_count2,", "for forward end reads created') result[ENDS[0]] = {read_id: read for (read_id, read) in", "\"\"\" for sample_id in project.list_samples(): if sample_identifier and sample_identifier != sample_id: continue sample", "hits fo forward end reads exported') if not os.path.exists( os.path.join( parser1.options.get_project_dir(parser1.sample.sample_id), parser1.sample.sample_id +", "search results for reverse end reads imported') parser2.export_read_fastq() print('Classified reverse end reads exported", "discarded by filtering args['nreads'] = sample.fastq_fwd_readcount // 2 elif sample.fastq_fwd_readcount < 3000000: args['nreads']", "'send', 'evalue', 'bitscore'] run_external_program(diamond_args) print('DIAMOND finished') def run_microbecensus(sample, config): \"\"\"Runs MicrobeCensus Args: sample", "sample.fastq_rev_readcount == 0: sample.fastq_rev_readcount = read_count2 if sample.fastq_rev_basecount == 0: sample.fastq_rev_basecount = base_count2", "base_count2) = import_fastq_pe( parser1, parser2 ) if sample.fastq_fwd_readcount == 0: sample.fastq_fwd_readcount = read_count1", "reads created') result[ENDS[1]] = {read_id: read for (read_id, read) in parser2.reads.items() if read.status", "FASTQ format') parser1.export_hit_list() print('List of hits fo forward end reads exported') if not", "'--evalue', str( parser.config.get_background_db_size( parser.options.get_collection(parser.sample.sample_id) ) * parser.config.get_evalue_cutoff( parser.options.get_collection(parser.sample.sample_id) ) / parser.config.get_reference_db_size( parser.options.get_collection(parser.sample.sample_id) )),", "if current_read in parser2.reads: parser2.reads[current_read].sequence = line elif line_counter == 3: if current_read", "not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))): os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id), project.options.get_output_subdir(sample.sample_id))) # Search in reference database if not os.path.exists(", "current_read in parser2.reads: parser2.reads[current_read].read_id_line = line elif line_counter == 2: base_count2 += len(line)", "+ '\\n') def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None): \"\"\"Functional profiling pipeline for entire project Args:", "in parser2.reads: parser2.reads[current_read].read_id_line = line elif line_counter == 2: base_count2 += len(line) if", "): run_ref_search(parser2, 'blastx') # Process output of reference DB search parser1.parse_reference_output() parser2.parse_reference_output() #", "read_count1 += 1 (read_id, _) = parse_fastq_seqid(line) current_read = read_id if current_read in", "= True if sample.fastq_fwd_readcount < 1500000: # MicrobeCensus subsamples 2M reads by default,", "of hits for reverse end reads exported') if not os.path.exists( os.path.join( parser2.options.get_project_dir(parser2.sample.sample_id), parser2.sample.sample_id", "= line elif line_counter == 4: if current_read in parser1.reads: parser1.reads[current_read].pe_quality = line", "input sequence file command (str): either 'blastx' or 'blastp' (see DIAMOND manual) \"\"\"", "parser2 = DiamondParser(config=project.config, options=project.options, taxonomy_data=project.taxonomy_data, ref_data=project.ref_data, sample=sample, end=ENDS[1]) if not os.path.isdir(project.options.get_project_dir(sample.sample_id)): os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True)", "= parse_fastq_seqid(line) current_read = read_id if current_read in parser1.reads: parser1.reads[current_read].read_id_line = line if", "gzip.open(fastq_file2, 'rb') else: infile_handle = open(fastq_file2, 'rb') for line in infile_handle: # count", "fama.diamond_parser.diamond_parser import DiamondParser from fama.output.report import generate_fastq_report, generate_sample_report from fama.output.pdf_report import generate_pdf_report from" ]
[ "int: size = int((len(arr)) / 4) loose = max(1, size) for index in", "arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample 80 ms submission class Solution: def findSpecialInteger(self, arr:", "+ loose)) right = bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr), index +", "-> int: size = int((len(arr)) / 4) loose = max(1, size) for index", "arr: List[int]) -> int: for idx, num in enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]:", "sample 76 ms submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int: for", "- loose), min(len(arr), index + loose)) if right - left > size: return", "- loose), min(len(arr), index + loose)) right = bisect.bisect_right(arr, candidate, max(0, index -", "ms submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int: size = int((len(arr))", "size) for index in range(0, len(arr), loose): candidate = arr[index] left = bisect.bisect_left(arr,", "class Solution: def findSpecialInteger(self, arr: List[int]) -> int: for idx, num in enumerate(arr):", "def findSpecialInteger(self, arr: List[int]) -> int: size = int((len(arr)) / 4) loose =", "loose): candidate = arr[index] left = bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr),", "candidate = arr[index] left = bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr), index", "if arr[idx] == arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample 80 ms submission class Solution:", "= max(1, size) for index in range(0, len(arr), loose): candidate = arr[index] left", "candidate, max(0, index - loose), min(len(arr), index + loose)) right = bisect.bisect_right(arr, candidate,", "in enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample 80 ms submission", "<reponame>rakhi2001/ecom7 __________________________________________________________________________________________________ sample 76 ms submission class Solution: def findSpecialInteger(self, arr: List[int]) ->", "4) loose = max(1, size) for index in range(0, len(arr), loose): candidate =", "submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int: for idx, num in", "loose), min(len(arr), index + loose)) if right - left > size: return arr[index]", "loose)) right = bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr), index + loose))", "index in range(0, len(arr), loose): candidate = arr[index] left = bisect.bisect_left(arr, candidate, max(0,", "arr[index] left = bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr), index + loose))", "bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr), index + loose)) if right -", "range(0, len(arr), loose): candidate = arr[index] left = bisect.bisect_left(arr, candidate, max(0, index -", "/ 4) loose = max(1, size) for index in range(0, len(arr), loose): candidate", "min(len(arr), index + loose)) if right - left > size: return arr[index] assert(False)", "-> int: for idx, num in enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]: return num", "80 ms submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int: size =", "len(arr), loose): candidate = arr[index] left = bisect.bisect_left(arr, candidate, max(0, index - loose),", "return num __________________________________________________________________________________________________ sample 80 ms submission class Solution: def findSpecialInteger(self, arr: List[int])", "max(1, size) for index in range(0, len(arr), loose): candidate = arr[index] left =", "arr[idx] == arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample 80 ms submission class Solution: def", "enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample 80 ms submission class", "Solution: def findSpecialInteger(self, arr: List[int]) -> int: for idx, num in enumerate(arr): if", "findSpecialInteger(self, arr: List[int]) -> int: for idx, num in enumerate(arr): if arr[idx] ==", "right = bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr), index + loose)) if", "def findSpecialInteger(self, arr: List[int]) -> int: for idx, num in enumerate(arr): if arr[idx]", "num in enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample 80 ms", "Solution: def findSpecialInteger(self, arr: List[int]) -> int: size = int((len(arr)) / 4) loose", "int: for idx, num in enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________", "= arr[index] left = bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr), index +", "index - loose), min(len(arr), index + loose)) right = bisect.bisect_right(arr, candidate, max(0, index", "__________________________________________________________________________________________________ sample 80 ms submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int:", "List[int]) -> int: for idx, num in enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]: return", "submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int: size = int((len(arr)) /", "size = int((len(arr)) / 4) loose = max(1, size) for index in range(0,", "== arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample 80 ms submission class Solution: def findSpecialInteger(self,", "findSpecialInteger(self, arr: List[int]) -> int: size = int((len(arr)) / 4) loose = max(1,", "arr: List[int]) -> int: size = int((len(arr)) / 4) loose = max(1, size)", "num __________________________________________________________________________________________________ sample 80 ms submission class Solution: def findSpecialInteger(self, arr: List[int]) ->", "in range(0, len(arr), loose): candidate = arr[index] left = bisect.bisect_left(arr, candidate, max(0, index", "index + loose)) right = bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr), index", "__________________________________________________________________________________________________ sample 76 ms submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int:", "class Solution: def findSpecialInteger(self, arr: List[int]) -> int: size = int((len(arr)) / 4)", "= int((len(arr)) / 4) loose = max(1, size) for index in range(0, len(arr),", "max(0, index - loose), min(len(arr), index + loose)) if right - left >", "candidate, max(0, index - loose), min(len(arr), index + loose)) if right - left", "index - loose), min(len(arr), index + loose)) if right - left > size:", "loose = max(1, size) for index in range(0, len(arr), loose): candidate = arr[index]", "min(len(arr), index + loose)) right = bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr),", "int((len(arr)) / 4) loose = max(1, size) for index in range(0, len(arr), loose):", "sample 80 ms submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int: size", "left = bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr), index + loose)) right", "for index in range(0, len(arr), loose): candidate = arr[index] left = bisect.bisect_left(arr, candidate,", "bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr), index + loose)) right = bisect.bisect_right(arr,", "idx, num in enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample 80", "loose), min(len(arr), index + loose)) right = bisect.bisect_right(arr, candidate, max(0, index - loose),", "= bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr), index + loose)) right =", "List[int]) -> int: size = int((len(arr)) / 4) loose = max(1, size) for", "max(0, index - loose), min(len(arr), index + loose)) right = bisect.bisect_right(arr, candidate, max(0,", "for idx, num in enumerate(arr): if arr[idx] == arr[idx+len(arr)//4]: return num __________________________________________________________________________________________________ sample", "= bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr), index + loose)) if right", "76 ms submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int: for idx,", "ms submission class Solution: def findSpecialInteger(self, arr: List[int]) -> int: for idx, num", "index + loose)) if right - left > size: return arr[index] assert(False) __________________________________________________________________________________________________" ]
[ "if N==1:print(\"1\");exit() if N<4:print(\"NO SOLUTION\");exit() [print(i) for i in range(1,N+1,2)] [print(i) for i", "3\");exit() if N==1:print(\"1\");exit() if N<4:print(\"NO SOLUTION\");exit() [print(i) for i in range(1,N+1,2)] [print(i) for", "if N==4:print(\"2 4 1 3\");exit() if N==1:print(\"1\");exit() if N<4:print(\"NO SOLUTION\");exit() [print(i) for i", "if N<4:print(\"NO SOLUTION\");exit() [print(i) for i in range(1,N+1,2)] [print(i) for i in range(2,N+1,2)]", "N=int(input()) if N==4:print(\"2 4 1 3\");exit() if N==1:print(\"1\");exit() if N<4:print(\"NO SOLUTION\");exit() [print(i) for", "1 3\");exit() if N==1:print(\"1\");exit() if N<4:print(\"NO SOLUTION\");exit() [print(i) for i in range(1,N+1,2)] [print(i)", "N==1:print(\"1\");exit() if N<4:print(\"NO SOLUTION\");exit() [print(i) for i in range(1,N+1,2)] [print(i) for i in", "N==4:print(\"2 4 1 3\");exit() if N==1:print(\"1\");exit() if N<4:print(\"NO SOLUTION\");exit() [print(i) for i in", "4 1 3\");exit() if N==1:print(\"1\");exit() if N<4:print(\"NO SOLUTION\");exit() [print(i) for i in range(1,N+1,2)]" ]
[]
[ "if vlan['tagged']: mapping = mac + \"-\" + str(vlan['vlan_id']) mappings.append(mapping) return mappings def", "None) # We dont care, just remove if there if len(task_data) > 1:", "governing permissions and # # limitations under the License. # ############################################################################## import json", "License is distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR", "mac, vlan_list in iface_dict.items(): if vlan_list: return False return True def get_mappings(self, task_data):", "distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF", "understood by host task 'default': self.get_default_interface(task_data), # interface that should be def route", "be def route 'empty': self.detect_empty(task_data) } return ret def detect_empty(self, task_data): for hostname,", "return ret def detect_empty(self, task_data): for hostname, iface_dict in task_data.items(): for mac, vlan_list", "for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): if vlan_list: return", "KIND, either express or implied. # # See the License for the specific", "express or implied. # # See the License for the specific language governing", "Action class ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\", None) # We dont care, just", "} return ret def detect_empty(self, task_data): for hostname, iface_dict in task_data.items(): for mac,", "copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # #", "if vlan_list: return False return True def get_mappings(self, task_data): mappings = [] for", "mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if vlan['tagged']: mapping = mac", "language governing permissions and # # limitations under the License. # ############################################################################## import", "iface_dict.items(): for vlan in vlan_list: if int(vlan['vlan_id']) in default: default_interface = mac if", "Unless required by applicable law or agreed to in writing, software # #", "in compliance with the License. # # You may obtain a copy of", "vlan_list: if vlan['tagged']: mapping = mac + \"-\" + str(vlan['vlan_id']) mappings.append(mapping) return mappings", "return False return True def get_mappings(self, task_data): mappings = [] for hostname, iface_dict", "mappings def get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list def get_default_interface(self,", "task_data): default = set(self.get_default_vlans()) for hostname, iface_dict in task_data.items(): for mac, vlan_list in", "# # you may not use this file except in compliance with the", "implied. # # See the License for the specific language governing permissions and", "'+'.join(self.get_mappings(task_data)), # mappings as understood by host task 'default': self.get_default_interface(task_data), # interface that", "def get_mappings(self, task_data): mappings = [] for hostname, iface_dict in task_data.items(): for mac,", "is distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS", "class ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\", None) # We dont care, just remove", "# We dont care, just remove if there if len(task_data) > 1: print(\"There", "in iface_dict.items(): for vlan in vlan_list: if int(vlan['vlan_id']) in default: default_interface = mac", "vlan in vlan_list: if int(vlan['vlan_id']) in default: default_interface = mac if vlan['tagged']: default_interface", "not use this file except in compliance with the License. # # You", "# limitations under the License. # ############################################################################## import json from st2actions.runners.pythonrunner import Action", "under the Apache License, Version 2.0 (the \"License\"); # # you may not", "the License. # ############################################################################## import json from st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action): def", "local=False) ) return vlan_list def get_default_interface(self, task_data): default = set(self.get_default_vlans()) for hostname, iface_dict", "'host': list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood by host task", "self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list def get_default_interface(self, task_data): default = set(self.get_default_vlans()) for hostname,", "permissions and # # limitations under the License. # ############################################################################## import json from", "task_data.pop(\"lab_token\", None) # We dont care, just remove if there if len(task_data) >", "be one host here!\") return None ret = { 'host': list(task_data.keys())[0], # hostname", "get_mappings(self, task_data): mappings = [] for hostname, iface_dict in task_data.items(): for mac, vlan_list", "vlan_list in iface_dict.items(): for vlan in vlan_list: if vlan['tagged']: mapping = mac +", "you may not use this file except in compliance with the License. #", "mappings = [] for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items():", "License. # # You may obtain a copy of the License at #", "= { 'host': list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood by", "law or agreed to in writing, software # # distributed under the License", "get_default_interface(self, task_data): default = set(self.get_default_vlans()) for hostname, iface_dict in task_data.items(): for mac, vlan_list", "and Others # # # # Licensed under the Apache License, Version 2.0", "return True def get_mappings(self, task_data): mappings = [] for hostname, iface_dict in task_data.items():", "2018 <NAME> and Others # # # # Licensed under the Apache License,", "interface that should be def route 'empty': self.detect_empty(task_data) } return ret def detect_empty(self,", "for vlan in vlan_list: if vlan['tagged']: mapping = mac + \"-\" + str(vlan['vlan_id'])", "= mac + \"-\" + str(vlan['vlan_id']) mappings.append(mapping) return mappings def get_default_vlans(self): vlan_list =", "ret = { 'host': list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood", "import Action class ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\", None) # We dont care,", "= set(self.get_default_vlans()) for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): for", "distributed under the License is distributed on an \"AS IS\" BASIS, # #", "mapping = mac + \"-\" + str(vlan['vlan_id']) mappings.append(mapping) return mappings def get_default_vlans(self): vlan_list", "# # See the License for the specific language governing permissions and #", "# # # Unless required by applicable law or agreed to in writing,", "just remove if there if len(task_data) > 1: print(\"There should only be one", "remove if there if len(task_data) > 1: print(\"There should only be one host", "iface_dict.items(): for vlan in vlan_list: if vlan['tagged']: mapping = mac + \"-\" +", "vlan_list in iface_dict.items(): if vlan_list: return False return True def get_mappings(self, task_data): mappings", "task 'default': self.get_default_interface(task_data), # interface that should be def route 'empty': self.detect_empty(task_data) }", "applicable law or agreed to in writing, software # # distributed under the", "= json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list def get_default_interface(self, task_data): default = set(self.get_default_vlans())", "set(self.get_default_vlans()) for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan", "agreed to in writing, software # # distributed under the License is distributed", "# interface that should be def route 'empty': self.detect_empty(task_data) } return ret def", ") return vlan_list def get_default_interface(self, task_data): default = set(self.get_default_vlans()) for hostname, iface_dict in", "# # limitations under the License. # ############################################################################## import json from st2actions.runners.pythonrunner import", "'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood by host task 'default': self.get_default_interface(task_data), # interface", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "+ \"-\" + str(vlan['vlan_id']) mappings.append(mapping) return mappings def get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\",", "vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list def get_default_interface(self, task_data): default =", "for mac, vlan_list in iface_dict.items(): if vlan_list: return False return True def get_mappings(self,", "in default: default_interface = mac if vlan['tagged']: default_interface += \".\" + str(vlan['vlan_id']) return", "self.detect_empty(task_data) } return ret def detect_empty(self, task_data): for hostname, iface_dict in task_data.items(): for", "software # # distributed under the License is distributed on an \"AS IS\"", "specific language governing permissions and # # limitations under the License. # ##############################################################################", "> 1: print(\"There should only be one host here!\") return None ret =", "under the License is distributed on an \"AS IS\" BASIS, # # WITHOUT", "host here!\") return None ret = { 'host': list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)),", "for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan in", "with the License. # # You may obtain a copy of the License", "# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "to in writing, software # # distributed under the License is distributed on", "'empty': self.detect_empty(task_data) } return ret def detect_empty(self, task_data): for hostname, iface_dict in task_data.items():", "here!\") return None ret = { 'host': list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)), #", "\"-\" + str(vlan['vlan_id']) mappings.append(mapping) return mappings def get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False)", "############################################################################## import json from st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\",", "default: default_interface = mac if vlan['tagged']: default_interface += \".\" + str(vlan['vlan_id']) return default_interface", "You may obtain a copy of the License at # # # #", "should be def route 'empty': self.detect_empty(task_data) } return ret def detect_empty(self, task_data): for", "Apache License, Version 2.0 (the \"License\"); # # you may not use this", "dont care, just remove if there if len(task_data) > 1: print(\"There should only", "(the \"License\"); # # you may not use this file except in compliance", "at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by", "{ 'host': list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood by host", "required by applicable law or agreed to in writing, software # # distributed", "vlan_list in iface_dict.items(): for vlan in vlan_list: if int(vlan['vlan_id']) in default: default_interface =", "import json from st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\", None)", "vlan_list def get_default_interface(self, task_data): default = set(self.get_default_vlans()) for hostname, iface_dict in task_data.items(): for", "2.0 (the \"License\"); # # you may not use this file except in", "if len(task_data) > 1: print(\"There should only be one host here!\") return None", "the License. # # You may obtain a copy of the License at", "in vlan_list: if vlan['tagged']: mapping = mac + \"-\" + str(vlan['vlan_id']) mappings.append(mapping) return", "\"License\"); # # you may not use this file except in compliance with", "if int(vlan['vlan_id']) in default: default_interface = mac if vlan['tagged']: default_interface += \".\" +", "\"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "[] for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan", "len(task_data) > 1: print(\"There should only be one host here!\") return None ret", "# # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law", "# you may not use this file except in compliance with the License.", "in writing, software # # distributed under the License is distributed on an", "may not use this file except in compliance with the License. # #", "hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan in vlan_list:", "on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "as understood by host task 'default': self.get_default_interface(task_data), # interface that should be def", "obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 #", "in iface_dict.items(): for vlan in vlan_list: if vlan['tagged']: mapping = mac + \"-\"", "OF ANY KIND, either express or implied. # # See the License for", "None ret = { 'host': list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings as", "or agreed to in writing, software # # distributed under the License is", "CONDITIONS OF ANY KIND, either express or implied. # # See the License", "get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list def get_default_interface(self, task_data): default", "host task 'default': self.get_default_interface(task_data), # interface that should be def route 'empty': self.detect_empty(task_data)", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # #", "# mappings as understood by host task 'default': self.get_default_interface(task_data), # interface that should", "in task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if int(vlan['vlan_id'])", "ANY KIND, either express or implied. # # See the License for the", "# # # # Licensed under the Apache License, Version 2.0 (the \"License\");", "# hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood by host task 'default': self.get_default_interface(task_data),", "task_data): mappings = [] for hostname, iface_dict in task_data.items(): for mac, vlan_list in", "# # Unless required by applicable law or agreed to in writing, software", "hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood by host task 'default': self.get_default_interface(task_data), #", "Version 2.0 (the \"License\"); # # you may not use this file except", "+ str(vlan['vlan_id']) mappings.append(mapping) return mappings def get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False) )", "or implied. # # See the License for the specific language governing permissions", "for mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if vlan['tagged']: mapping =", "vlan_list: if int(vlan['vlan_id']) in default: default_interface = mac if vlan['tagged']: default_interface += \".\"", "return None ret = { 'host': list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings", "# Unless required by applicable law or agreed to in writing, software #", "the License for the specific language governing permissions and # # limitations under", "care, just remove if there if len(task_data) > 1: print(\"There should only be", "this file except in compliance with the License. # # You may obtain", "# Copyright 2018 <NAME> and Others # # # # Licensed under the", "License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required", "use this file except in compliance with the License. # # You may", "# http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed", "that should be def route 'empty': self.detect_empty(task_data) } return ret def detect_empty(self, task_data):", "iface_dict.items(): if vlan_list: return False return True def get_mappings(self, task_data): mappings = []", "self.get_default_interface(task_data), # interface that should be def route 'empty': self.detect_empty(task_data) } return ret", "def run(self, task_data): task_data.pop(\"lab_token\", None) # We dont care, just remove if there", "detect_empty(self, task_data): for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): if", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # #", "json from st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\", None) #", "= [] for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): for", "json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list def get_default_interface(self, task_data): default = set(self.get_default_vlans()) for", "if there if len(task_data) > 1: print(\"There should only be one host here!\")", "list(task_data.keys())[0], # hostname 'mappings': '+'.join(self.get_mappings(task_data)), # mappings as understood by host task 'default':", "mac + \"-\" + str(vlan['vlan_id']) mappings.append(mapping) return mappings def get_default_vlans(self): vlan_list = json.loads(", "mappings.append(mapping) return mappings def get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list", "one host here!\") return None ret = { 'host': list(task_data.keys())[0], # hostname 'mappings':", "of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # #", "True def get_mappings(self, task_data): mappings = [] for hostname, iface_dict in task_data.items(): for", "# # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or", "for the specific language governing permissions and # # limitations under the License.", "# # You may obtain a copy of the License at # #", "only be one host here!\") return None ret = { 'host': list(task_data.keys())[0], #", "except in compliance with the License. # # You may obtain a copy", "print(\"There should only be one host here!\") return None ret = { 'host':", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See", "task_data): for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): if vlan_list:", "the License is distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES", "the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless", "hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): if vlan_list: return False", "st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\", None) # We dont", "ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\", None) # We dont care, just remove if", "in iface_dict.items(): if vlan_list: return False return True def get_mappings(self, task_data): mappings =", "License for the specific language governing permissions and # # limitations under the", "by host task 'default': self.get_default_interface(task_data), # interface that should be def route 'empty':", "file except in compliance with the License. # # You may obtain a", "'default': self.get_default_interface(task_data), # interface that should be def route 'empty': self.detect_empty(task_data) } return", "may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0", "iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): if vlan_list: return False return", "License, Version 2.0 (the \"License\"); # # you may not use this file", "OR CONDITIONS OF ANY KIND, either express or implied. # # See the", "We dont care, just remove if there if len(task_data) > 1: print(\"There should", "BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# ############################################################################## import json from st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action): def run(self, task_data):", "for mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if int(vlan['vlan_id']) in default:", "def detect_empty(self, task_data): for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items():", "int(vlan['vlan_id']) in default: default_interface = mac if vlan['tagged']: default_interface += \".\" + str(vlan['vlan_id'])", "1: print(\"There should only be one host here!\") return None ret = {", "# See the License for the specific language governing permissions and # #", "License. # ############################################################################## import json from st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action): def run(self,", "See the License for the specific language governing permissions and # # limitations", "in vlan_list: if int(vlan['vlan_id']) in default: default_interface = mac if vlan['tagged']: default_interface +=", "def get_default_interface(self, task_data): default = set(self.get_default_vlans()) for hostname, iface_dict in task_data.items(): for mac,", "vlan in vlan_list: if vlan['tagged']: mapping = mac + \"-\" + str(vlan['vlan_id']) mappings.append(mapping)", "an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# # distributed under the License is distributed on an \"AS IS\" BASIS,", "limitations under the License. # ############################################################################## import json from st2actions.runners.pythonrunner import Action class", "a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # #", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "vlan['tagged']: mapping = mac + \"-\" + str(vlan['vlan_id']) mappings.append(mapping) return mappings def get_default_vlans(self):", "for vlan in vlan_list: if int(vlan['vlan_id']) in default: default_interface = mac if vlan['tagged']:", "run(self, task_data): task_data.pop(\"lab_token\", None) # We dont care, just remove if there if", "and # # limitations under the License. # ############################################################################## import json from st2actions.runners.pythonrunner", "task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if int(vlan['vlan_id']) in", "mappings as understood by host task 'default': self.get_default_interface(task_data), # interface that should be", "return mappings def get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list def", "Copyright 2018 <NAME> and Others # # # # Licensed under the Apache", "vlan_list: return False return True def get_mappings(self, task_data): mappings = [] for hostname,", "there if len(task_data) > 1: print(\"There should only be one host here!\") return", "in task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if vlan['tagged']:", "return vlan_list def get_default_interface(self, task_data): default = set(self.get_default_vlans()) for hostname, iface_dict in task_data.items():", "task_data): task_data.pop(\"lab_token\", None) # We dont care, just remove if there if len(task_data)", "should only be one host here!\") return None ret = { 'host': list(task_data.keys())[0],", "mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if int(vlan['vlan_id']) in default: default_interface", "str(vlan['vlan_id']) mappings.append(mapping) return mappings def get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return", "def route 'empty': self.detect_empty(task_data) } return ret def detect_empty(self, task_data): for hostname, iface_dict", "ret def detect_empty(self, task_data): for hostname, iface_dict in task_data.items(): for mac, vlan_list in", "task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if vlan['tagged']: mapping", "Licensed under the Apache License, Version 2.0 (the \"License\"); # # you may", "############################################################################## # Copyright 2018 <NAME> and Others # # # # Licensed under", "IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "task_data.items(): for mac, vlan_list in iface_dict.items(): if vlan_list: return False return True def", "in task_data.items(): for mac, vlan_list in iface_dict.items(): if vlan_list: return False return True", "http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to", "from st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action): def run(self, task_data): task_data.pop(\"lab_token\", None) # We", "default = set(self.get_default_vlans()) for hostname, iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items():", "either express or implied. # # See the License for the specific language", "writing, software # # distributed under the License is distributed on an \"AS", "iface_dict in task_data.items(): for mac, vlan_list in iface_dict.items(): for vlan in vlan_list: if", "the Apache License, Version 2.0 (the \"License\"); # # you may not use", "# # # # Unless required by applicable law or agreed to in", "route 'empty': self.detect_empty(task_data) } return ret def detect_empty(self, task_data): for hostname, iface_dict in", "the specific language governing permissions and # # limitations under the License. #", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # # you", "compliance with the License. # # You may obtain a copy of the", "# You may obtain a copy of the License at # # #", "by applicable law or agreed to in writing, software # # distributed under", "def get_default_vlans(self): vlan_list = json.loads( self.action_service.get_value(\"default_vlans\", local=False) ) return vlan_list def get_default_interface(self, task_data):", "# # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "False return True def get_mappings(self, task_data): mappings = [] for hostname, iface_dict in", "# # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable", "under the License. # ############################################################################## import json from st2actions.runners.pythonrunner import Action class ParseNetworkAction(Action):", "Others # # # # Licensed under the Apache License, Version 2.0 (the", "<NAME> and Others # # # # Licensed under the Apache License, Version" ]
[ "in queue] keys = [message.get('key') for message in messages] model_inputs = [pre_process(message.get('model_input')) for", "self.redis_queue = redis_queue assert 'predict' in dir(model_class), 'No predict function in model class'", "== 0: mq_miss = 0 if self.collection and model: model = None print('model", "model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ): parse = lambda x: {'host': x.split(':')[0],", "and mq_miss % self.collection_limit == 0: mq_miss = 0 if self.collection and model:", "def run(self, pre_process=lambda x: x, post_process=lambda x: x): model = self.model_class(**self.model_config) mq_miss =", "pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _ = pipe.execute() if queue: mq_miss = 0 if", "queue: mq_miss = 0 if not model: model = self.model_class(**self.model_config) messages = [pickle.loads(x)", "= pipe.execute() if queue: mq_miss = 0 if not model: model = self.model_class(**self.model_config)", "messages] model_inputs = [pre_process(message.get('model_input')) for message in messages] results = [post_process(x) for x", "mq_miss % self.collection_limit == 0: mq_miss = 0 if self.collection and model: model", "'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert 'predict' in dir(model_class), 'No", "model = self.model_class(**self.model_config) messages = [pickle.loads(x) for x in queue] keys = [message.get('key')", "True: with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size, -1)", "= [message.get('key') for message in messages] model_inputs = [pre_process(message.get('model_input')) for message in messages]", "self.collection = collection self.collection_limit = collection_limit def run(self, pre_process=lambda x: x, post_process=lambda x:", "not model: model = self.model_class(**self.model_config) messages = [pickle.loads(x) for x in queue] keys", "self.model_sleep = model_sleep self.collection = collection self.collection_limit = collection_limit def run(self, pre_process=lambda x:", "= self.model_class(**self.model_config) mq_miss = 0 print('model init') while True: with self.db.pipeline() as pipe:", "redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ): parse = lambda x: {'host':", "mq_miss = 0 print('model init') while True: with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0,", "self.collection_limit == 0: mq_miss = 0 if self.collection and model: model = None", "0: mq_miss = 0 if self.collection and model: model = None print('model is", "self.collection_limit = collection_limit def run(self, pre_process=lambda x: x, post_process=lambda x: x): model =", "self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _", "= collection_limit def run(self, pre_process=lambda x: x, post_process=lambda x: x): model = self.model_class(**self.model_config)", "in dir(model_class), 'No predict function in model class' self.model_class = model_class self.model_config =", "batch_size self.model_sleep = model_sleep self.collection = collection self.collection_limit = collection_limit def run(self, pre_process=lambda", "class' self.model_class = model_class self.model_config = model_config self.batch_size = batch_size self.model_sleep = model_sleep", "self.model_config = model_config self.batch_size = batch_size self.model_sleep = model_sleep self.collection = collection self.collection_limit", "= 0 if self.collection and model: model = None print('model is collected') time.sleep(self.model_sleep)", "[pre_process(message.get('model_input')) for message in messages] results = [post_process(x) for x in model.predict(model_inputs)] self.db.mset({key:", "= 0 print('model init') while True: with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size", "self.batch_size = batch_size self.model_sleep = model_sleep self.collection = collection self.collection_limit = collection_limit def", "if not model: model = self.model_class(**self.model_config) messages = [pickle.loads(x) for x in queue]", "x in queue] keys = [message.get('key') for message in messages] model_inputs = [pre_process(message.get('model_input'))", "collection_limit=6000, ): parse = lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker))", "with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue,", "redis class ModelAgent: def __init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False,", "redis_queue assert 'predict' in dir(model_class), 'No predict function in model class' self.model_class =", "batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ): parse = lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])}", "= [post_process(x) for x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key, result in zip(keys,", "model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key, result in zip(keys, results)}) else: mq_miss += 1", "import redis class ModelAgent: def __init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1,", "= [pickle.loads(x) for x in queue] keys = [message.get('key') for message in messages]", "def __init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ): parse", "x.split(':')[0], 'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert 'predict' in dir(model_class),", "model_sleep self.collection = collection self.collection_limit = collection_limit def run(self, pre_process=lambda x: x, post_process=lambda", "as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _ =", "keys = [message.get('key') for message in messages] model_inputs = [pre_process(message.get('model_input')) for message in", "1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _ = pipe.execute() if queue: mq_miss = 0", "class ModelAgent: def __init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000,", "{'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert 'predict' in", "+= 1 if mq_miss and mq_miss % self.collection_limit == 0: mq_miss = 0", "results)}) else: mq_miss += 1 if mq_miss and mq_miss % self.collection_limit == 0:", "else: mq_miss += 1 if mq_miss and mq_miss % self.collection_limit == 0: mq_miss", "= 0 if not model: model = self.model_class(**self.model_config) messages = [pickle.loads(x) for x", "self.model_class = model_class self.model_config = model_config self.batch_size = batch_size self.model_sleep = model_sleep self.collection", "messages] results = [post_process(x) for x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key, result", "self.model_class(**self.model_config) messages = [pickle.loads(x) for x in queue] keys = [message.get('key') for message", "post_process=lambda x: x): model = self.model_class(**self.model_config) mq_miss = 0 print('model init') while True:", "= self.model_class(**self.model_config) messages = [pickle.loads(x) for x in queue] keys = [message.get('key') for", "time import pickle import redis class ModelAgent: def __init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None,", "while True: with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size,", "= redis_queue assert 'predict' in dir(model_class), 'No predict function in model class' self.model_class", "run(self, pre_process=lambda x: x, post_process=lambda x: x): model = self.model_class(**self.model_config) mq_miss = 0", "model_class self.model_config = model_config self.batch_size = batch_size self.model_sleep = model_sleep self.collection = collection", "collection self.collection_limit = collection_limit def run(self, pre_process=lambda x: x, post_process=lambda x: x): model", "for key, result in zip(keys, results)}) else: mq_miss += 1 if mq_miss and", "if queue: mq_miss = 0 if not model: model = self.model_class(**self.model_config) messages =", "[post_process(x) for x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key, result in zip(keys, results)})", "pipe: pipe.lrange(self.redis_queue, 0, self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _ = pipe.execute()", "= [pre_process(message.get('model_input')) for message in messages] results = [post_process(x) for x in model.predict(model_inputs)]", "x: x): model = self.model_class(**self.model_config) mq_miss = 0 print('model init') while True: with", "results = [post_process(x) for x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key, result in", "pipe.execute() if queue: mq_miss = 0 if not model: model = self.model_class(**self.model_config) messages", "mq_miss += 1 if mq_miss and mq_miss % self.collection_limit == 0: mq_miss =", "mq_miss = 0 if self.collection and model: model = None print('model is collected')", "lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert", "x: x, post_process=lambda x: x): model = self.model_class(**self.model_config) mq_miss = 0 print('model init')", "self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert 'predict' in dir(model_class), 'No predict function", "model: model = self.model_class(**self.model_config) messages = [pickle.loads(x) for x in queue] keys =", "import pickle import redis class ModelAgent: def __init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={},", "model_inputs = [pre_process(message.get('model_input')) for message in messages] results = [post_process(x) for x in", "queue, _ = pipe.execute() if queue: mq_miss = 0 if not model: model", "pre_process=lambda x: x, post_process=lambda x: x): model = self.model_class(**self.model_config) mq_miss = 0 print('model", "model_config self.batch_size = batch_size self.model_sleep = model_sleep self.collection = collection self.collection_limit = collection_limit", "'predict' in dir(model_class), 'No predict function in model class' self.model_class = model_class self.model_config", "= collection self.collection_limit = collection_limit def run(self, pre_process=lambda x: x, post_process=lambda x: x):", "x, post_process=lambda x: x): model = self.model_class(**self.model_config) mq_miss = 0 print('model init') while", "redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ): parse = lambda x:", "pipe.lrange(self.redis_queue, 0, self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _ = pipe.execute() if", "_ = pipe.execute() if queue: mq_miss = 0 if not model: model =", "1 if mq_miss and mq_miss % self.collection_limit == 0: mq_miss = 0 if", "% self.collection_limit == 0: mq_miss = 0 if self.collection and model: model =", "in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key, result in zip(keys, results)}) else: mq_miss +=", "init') while True: with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size - 1) pipe.ltrim(self.redis_queue,", "- 1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _ = pipe.execute() if queue: mq_miss =", "zip(keys, results)}) else: mq_miss += 1 if mq_miss and mq_miss % self.collection_limit ==", "collection_limit def run(self, pre_process=lambda x: x, post_process=lambda x: x): model = self.model_class(**self.model_config) mq_miss", "0 if not model: model = self.model_class(**self.model_config) messages = [pickle.loads(x) for x in", "function in model class' self.model_class = model_class self.model_config = model_config self.batch_size = batch_size", "= model_sleep self.collection = collection self.collection_limit = collection_limit def run(self, pre_process=lambda x: x,", "= redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert 'predict' in dir(model_class), 'No predict function in", "'No predict function in model class' self.model_class = model_class self.model_config = model_config self.batch_size", "messages = [pickle.loads(x) for x in queue] keys = [message.get('key') for message in", "in messages] results = [post_process(x) for x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key,", "= lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue", "parse = lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue =", "predict function in model class' self.model_class = model_class self.model_config = model_config self.batch_size =", "message in messages] model_inputs = [pre_process(message.get('model_input')) for message in messages] results = [post_process(x)", "model class' self.model_class = model_class self.model_config = model_config self.batch_size = batch_size self.model_sleep =", "= model_class self.model_config = model_config self.batch_size = batch_size self.model_sleep = model_sleep self.collection =", "model = self.model_class(**self.model_config) mq_miss = 0 print('model init') while True: with self.db.pipeline() as", "self.model_class(**self.model_config) mq_miss = 0 print('model init') while True: with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue,", "ModelAgent: def __init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ):", "assert 'predict' in dir(model_class), 'No predict function in model class' self.model_class = model_class", "= batch_size self.model_sleep = model_sleep self.collection = collection self.collection_limit = collection_limit def run(self,", "mq_miss = 0 if not model: model = self.model_class(**self.model_config) messages = [pickle.loads(x) for", "result in zip(keys, results)}) else: mq_miss += 1 if mq_miss and mq_miss %", "message in messages] results = [post_process(x) for x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for", "): parse = lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue", "in messages] model_inputs = [pre_process(message.get('model_input')) for message in messages] results = [post_process(x) for", "-1) queue, _ = pipe.execute() if queue: mq_miss = 0 if not model:", "self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _ = pipe.execute() if queue: mq_miss", "int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert 'predict' in dir(model_class), 'No predict", "import time import pickle import redis class ModelAgent: def __init__( self, redis_broker='localhost:6379', redis_queue='broker',", "x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db = redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert 'predict'", "collection=False, collection_limit=6000, ): parse = lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db =", "[pickle.loads(x) for x in queue] keys = [message.get('key') for message in messages] model_inputs", "for x in queue] keys = [message.get('key') for message in messages] model_inputs =", "model_sleep=0.1, collection=False, collection_limit=6000, ): parse = lambda x: {'host': x.split(':')[0], 'port': int(x.split(':')[1])} self.db", "redis.StrictRedis(**parse(redis_broker)) self.redis_queue = redis_queue assert 'predict' in dir(model_class), 'No predict function in model", "if mq_miss and mq_miss % self.collection_limit == 0: mq_miss = 0 if self.collection", "pickle.dumps(result) for key, result in zip(keys, results)}) else: mq_miss += 1 if mq_miss", "for message in messages] results = [post_process(x) for x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result)", "queue] keys = [message.get('key') for message in messages] model_inputs = [pre_process(message.get('model_input')) for message", "[message.get('key') for message in messages] model_inputs = [pre_process(message.get('model_input')) for message in messages] results", "dir(model_class), 'No predict function in model class' self.model_class = model_class self.model_config = model_config", "0, self.batch_size - 1) pipe.ltrim(self.redis_queue, self.batch_size, -1) queue, _ = pipe.execute() if queue:", "for x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key, result in zip(keys, results)}) else:", "x): model = self.model_class(**self.model_config) mq_miss = 0 print('model init') while True: with self.db.pipeline()", "self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ): parse = lambda", "mq_miss and mq_miss % self.collection_limit == 0: mq_miss = 0 if self.collection and", "model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ): parse = lambda x: {'host': x.split(':')[0], 'port':", "in model class' self.model_class = model_class self.model_config = model_config self.batch_size = batch_size self.model_sleep", "0 print('model init') while True: with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size -", "self.batch_size, -1) queue, _ = pipe.execute() if queue: mq_miss = 0 if not", "= model_config self.batch_size = batch_size self.model_sleep = model_sleep self.collection = collection self.collection_limit =", "key, result in zip(keys, results)}) else: mq_miss += 1 if mq_miss and mq_miss", "print('model init') while True: with self.db.pipeline() as pipe: pipe.lrange(self.redis_queue, 0, self.batch_size - 1)", "self.db.mset({key: pickle.dumps(result) for key, result in zip(keys, results)}) else: mq_miss += 1 if", "pickle import redis class ModelAgent: def __init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64,", "for message in messages] model_inputs = [pre_process(message.get('model_input')) for message in messages] results =", "in zip(keys, results)}) else: mq_miss += 1 if mq_miss and mq_miss % self.collection_limit", "__init__( self, redis_broker='localhost:6379', redis_queue='broker', model_class=None, model_config={}, batch_size=64, model_sleep=0.1, collection=False, collection_limit=6000, ): parse =", "x in model.predict(model_inputs)] self.db.mset({key: pickle.dumps(result) for key, result in zip(keys, results)}) else: mq_miss" ]
[ "schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query variable to device.\") variables = {\"device\":", "GraphQL query from the ORM.\"\"\" import logging from django.utils.module_loading import import_string from graphene_django.settings", "data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data", "query Failed: `%s`\", str(query)) return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute query with", "if global_settings.shorten_sot_query is True: data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data", "(400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute query with variables\") result = document.execute(context_value=request, variable_values=variables)", "document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL - query executed unsuccessfully\") return (400, result.to_dict()) data", "- request for `%s`\", str(device)) backend = get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL -", "try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error: # pylint: disable=broad-except return (400,", "import GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings from .constant import PLUGIN_CFG LOGGER = logging.getLogger(__name__)", "graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query variable to device.\") variables = {\"device\": device} try:", "to device.\") variables = {\"device\": device} try: LOGGER.debug(\"GraphQL - test query: `%s`\", str(query))", "GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True: data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform", "= get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query variable to device.\") variables", "- execute query with variables\") result = document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL -", "= data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try:", "execute GraphQL query from the ORM.\"\"\" import logging from django.utils.module_loading import import_string from", "variables\") result = document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL - query executed unsuccessfully\") return", "- query executed unsuccessfully\") return (400, result.to_dict()) data = result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\")", "graph_ql_query(request, device, query): \"\"\"Function to run graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL - request", "graphql.error import GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings from .constant import PLUGIN_CFG LOGGER =", "query: `%s`\", str(query)) document = backend.document_from_string(schema, query) except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL -", "get_default_backend from graphql.error import GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings from .constant import PLUGIN_CFG", "with variables\") result = document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL - query executed unsuccessfully\")", "test query: `%s`\", str(query)) document = backend.document_from_string(schema, query) except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL", "test query Failed: `%s`\", str(query)) return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute query", "tansform data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as", "LOGGER.debug(\"GraphQL - tansform data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except", "result = document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL - query executed unsuccessfully\") return (400,", "if result.invalid: LOGGER.warning(\"GraphQL - query executed unsuccessfully\") return (400, result.to_dict()) data = result.data", "for `%s`\", str(device)) backend = get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query", "result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True: data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"):", "is True: data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data with function:", "= {\"device\": device} try: LOGGER.debug(\"GraphQL - test query: `%s`\", str(query)) document = backend.document_from_string(schema,", "ORM.\"\"\" import logging from django.utils.module_loading import import_string from graphene_django.settings import graphene_settings from graphql", "data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error:", "PLUGIN_CFG LOGGER = logging.getLogger(__name__) def graph_ql_query(request, device, query): \"\"\"Function to run graphql and", "- test query: `%s`\", str(query)) document = backend.document_from_string(schema, query) except GraphQLSyntaxError as error:", "variable to device.\") variables = {\"device\": device} try: LOGGER.debug(\"GraphQL - test query: `%s`\",", "import import_string from graphene_django.settings import graphene_settings from graphql import get_default_backend from graphql.error import", "GoldenConfigSettings from .constant import PLUGIN_CFG LOGGER = logging.getLogger(__name__) def graph_ql_query(request, device, query): \"\"\"Function", "error: # pylint: disable=broad-except return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - request successful\") return", "graphene_django.settings import graphene_settings from graphql import get_default_backend from graphql.error import GraphQLSyntaxError from nautobot_golden_config.models", "except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL - test query Failed: `%s`\", str(query)) return (400,", "data = result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True: data = data[\"devices\"][0]", "GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL - test query Failed: `%s`\", str(query)) return (400, {\"error\":", "LOGGER = logging.getLogger(__name__) def graph_ql_query(request, device, query): \"\"\"Function to run graphql and transposer", "transposer command.\"\"\" LOGGER.debug(\"GraphQL - request for `%s`\", str(device)) backend = get_default_backend() schema =", "device} try: LOGGER.debug(\"GraphQL - test query: `%s`\", str(query)) document = backend.document_from_string(schema, query) except", "str(query)) document = backend.document_from_string(schema, query) except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL - test query", "backend.document_from_string(schema, query) except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL - test query Failed: `%s`\", str(query))", "result.invalid: LOGGER.warning(\"GraphQL - query executed unsuccessfully\") return (400, result.to_dict()) data = result.data global_settings", "(400, result.to_dict()) data = result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True: data", "import graphene_settings from graphql import get_default_backend from graphql.error import GraphQLSyntaxError from nautobot_golden_config.models import", "except Exception as error: # pylint: disable=broad-except return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL -", "data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error: # pylint: disable=broad-except return (400, {\"error\":", "`%s`\", str(device)) backend = get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query variable", "and transposer command.\"\"\" LOGGER.debug(\"GraphQL - request for `%s`\", str(device)) backend = get_default_backend() schema", "django.utils.module_loading import import_string from graphene_django.settings import graphene_settings from graphql import get_default_backend from graphql.error", "= logging.getLogger(__name__) def graph_ql_query(request, device, query): \"\"\"Function to run graphql and transposer command.\"\"\"", "get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query variable to device.\") variables =", "import get_default_backend from graphql.error import GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings from .constant import", "execute query with variables\") result = document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL - query", "- tansform data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception", "document = backend.document_from_string(schema, query) except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL - test query Failed:", "import_string from graphene_django.settings import graphene_settings from graphql import get_default_backend from graphql.error import GraphQLSyntaxError", "graphql import get_default_backend from graphql.error import GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings from .constant", "`%s`\", str(query)) document = backend.document_from_string(schema, query) except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL - test", "query) except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL - test query Failed: `%s`\", str(query)) return", "variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL - query executed unsuccessfully\") return (400, result.to_dict()) data =", "as error: LOGGER.warning(\"GraphQL - test query Failed: `%s`\", str(query)) return (400, {\"error\": str(error)})", "= GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True: data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL -", "try: LOGGER.debug(\"GraphQL - test query: `%s`\", str(query)) document = backend.document_from_string(schema, query) except GraphQLSyntaxError", "# pylint: disable=broad-except return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - request successful\") return (200,", "- set query variable to device.\") variables = {\"device\": device} try: LOGGER.debug(\"GraphQL -", "from graphql import get_default_backend from graphql.error import GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings from", "to execute GraphQL query from the ORM.\"\"\" import logging from django.utils.module_loading import import_string", "PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data)", "import GoldenConfigSettings from .constant import PLUGIN_CFG LOGGER = logging.getLogger(__name__) def graph_ql_query(request, device, query):", "str(query)) return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute query with variables\") result =", "unsuccessfully\") return (400, result.to_dict()) data = result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is", "= import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error: # pylint: disable=broad-except return (400, {\"error\": str(error)})", "= backend.document_from_string(schema, query) except GraphQLSyntaxError as error: LOGGER.warning(\"GraphQL - test query Failed: `%s`\",", "data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\")))", "Failed: `%s`\", str(query)) return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute query with variables\")", "import PLUGIN_CFG LOGGER = logging.getLogger(__name__) def graph_ql_query(request, device, query): \"\"\"Function to run graphql", "GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings from .constant import PLUGIN_CFG LOGGER = logging.getLogger(__name__) def", "query with variables\") result = document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL - query executed", "query executed unsuccessfully\") return (400, result.to_dict()) data = result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if", "str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error: # pylint: disable=broad-except return", "query variable to device.\") variables = {\"device\": device} try: LOGGER.debug(\"GraphQL - test query:", "request for `%s`\", str(device)) backend = get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set", ".constant import PLUGIN_CFG LOGGER = logging.getLogger(__name__) def graph_ql_query(request, device, query): \"\"\"Function to run", "\"\"\"Function to run graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL - request for `%s`\", str(device))", "str(device)) backend = get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query variable to", "result.to_dict()) data = result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True: data =", "`%s`\", str(query)) return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute query with variables\") result", "device, query): \"\"\"Function to run graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL - request for", "from graphql.error import GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings from .constant import PLUGIN_CFG LOGGER", "\"\"\"Example code to execute GraphQL query from the ORM.\"\"\" import logging from django.utils.module_loading", "if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data =", "command.\"\"\" LOGGER.debug(\"GraphQL - request for `%s`\", str(device)) backend = get_default_backend() schema = graphene_settings.SCHEMA", "= graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query variable to device.\") variables = {\"device\": device}", "- test query Failed: `%s`\", str(query)) return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute", "LOGGER.debug(\"GraphQL - request for `%s`\", str(device)) backend = get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL", "variables = {\"device\": device} try: LOGGER.debug(\"GraphQL - test query: `%s`\", str(query)) document =", "global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True: data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL", "query from the ORM.\"\"\" import logging from django.utils.module_loading import import_string from graphene_django.settings import", "code to execute GraphQL query from the ORM.\"\"\" import logging from django.utils.module_loading import", "from django.utils.module_loading import import_string from graphene_django.settings import graphene_settings from graphql import get_default_backend from", "`%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error: # pylint: disable=broad-except", "= result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True: data = data[\"devices\"][0] if", "import logging from django.utils.module_loading import import_string from graphene_django.settings import graphene_settings from graphql import", "<filename>nautobot_golden_config/utilities/graphql.py<gh_stars>0 \"\"\"Example code to execute GraphQL query from the ORM.\"\"\" import logging from", "nautobot_golden_config.models import GoldenConfigSettings from .constant import PLUGIN_CFG LOGGER = logging.getLogger(__name__) def graph_ql_query(request, device,", "error: LOGGER.warning(\"GraphQL - test query Failed: `%s`\", str(query)) return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL", "function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error: # pylint:", "def graph_ql_query(request, device, query): \"\"\"Function to run graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL -", "graphene_settings from graphql import get_default_backend from graphql.error import GraphQLSyntaxError from nautobot_golden_config.models import GoldenConfigSettings", "logging from django.utils.module_loading import import_string from graphene_django.settings import graphene_settings from graphql import get_default_backend", "LOGGER.debug(\"GraphQL - set query variable to device.\") variables = {\"device\": device} try: LOGGER.debug(\"GraphQL", "from nautobot_golden_config.models import GoldenConfigSettings from .constant import PLUGIN_CFG LOGGER = logging.getLogger(__name__) def graph_ql_query(request,", "logging.getLogger(__name__) def graph_ql_query(request, device, query): \"\"\"Function to run graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL", "graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL - request for `%s`\", str(device)) backend = get_default_backend()", "LOGGER.debug(\"GraphQL - test query: `%s`\", str(query)) document = backend.document_from_string(schema, query) except GraphQLSyntaxError as", "str(error)}) LOGGER.debug(\"GraphQL - execute query with variables\") result = document.execute(context_value=request, variable_values=variables) if result.invalid:", "= document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL - query executed unsuccessfully\") return (400, result.to_dict())", "True: data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data with function: `%s`\",", "return (400, result.to_dict()) data = result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query is True:", "run graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL - request for `%s`\", str(device)) backend =", "from .constant import PLUGIN_CFG LOGGER = logging.getLogger(__name__) def graph_ql_query(request, device, query): \"\"\"Function to", "query): \"\"\"Function to run graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL - request for `%s`\",", "device.\") variables = {\"device\": device} try: LOGGER.debug(\"GraphQL - test query: `%s`\", str(query)) document", "to run graphql and transposer command.\"\"\" LOGGER.debug(\"GraphQL - request for `%s`\", str(device)) backend", "executed unsuccessfully\") return (400, result.to_dict()) data = result.data global_settings = GoldenConfigSettings.objects.get(id=\"aaaaaaaa-0000-0000-0000-000000000001\") if global_settings.shorten_sot_query", "LOGGER.warning(\"GraphQL - test query Failed: `%s`\", str(query)) return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL -", "global_settings.shorten_sot_query is True: data = data[\"devices\"][0] if PLUGIN_CFG.get(\"sot_agg_transposer\"): LOGGER.debug(\"GraphQL - tansform data with", "as error: # pylint: disable=broad-except return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - request successful\")", "from the ORM.\"\"\" import logging from django.utils.module_loading import import_string from graphene_django.settings import graphene_settings", "pylint: disable=broad-except return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - request successful\") return (200, data)", "the ORM.\"\"\" import logging from django.utils.module_loading import import_string from graphene_django.settings import graphene_settings from", "{\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute query with variables\") result = document.execute(context_value=request, variable_values=variables) if", "Exception as error: # pylint: disable=broad-except return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - request", "{\"device\": device} try: LOGGER.debug(\"GraphQL - test query: `%s`\", str(query)) document = backend.document_from_string(schema, query)", "set query variable to device.\") variables = {\"device\": device} try: LOGGER.debug(\"GraphQL - test", "LOGGER.warning(\"GraphQL - query executed unsuccessfully\") return (400, result.to_dict()) data = result.data global_settings =", "with function: `%s`\", str(PLUGIN_CFG.get(\"sot_agg_transposer\"))) try: data = import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error: #", "return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL - execute query with variables\") result = document.execute(context_value=request,", "LOGGER.debug(\"GraphQL - execute query with variables\") result = document.execute(context_value=request, variable_values=variables) if result.invalid: LOGGER.warning(\"GraphQL", "import_string(PLUGIN_CFG.get(\"sot_agg_transposer\"))(data) except Exception as error: # pylint: disable=broad-except return (400, {\"error\": str(error)}) LOGGER.debug(\"GraphQL", "backend = get_default_backend() schema = graphene_settings.SCHEMA LOGGER.debug(\"GraphQL - set query variable to device.\")", "from graphene_django.settings import graphene_settings from graphql import get_default_backend from graphql.error import GraphQLSyntaxError from" ]
[ "n_output]) # Parameters learning_rate = 0.001 training_epochs = 10 batch_size = 100 display_step", "import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib',", "cost: %.9f\" % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys})", "layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything out = { 'input_r':", "'conv3' is %s\" % (conv3.shape,)) # Plot ! for i in range(3): plt.matshow(conv3[0,", "(conv2.shape,)) # Plot ! for i in range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray'))", "USED # In[3]: device_type = \"/gpu:1\" # # DEFINE CNN # In[4]: with", "of 'pool' is %s\" % (pool.shape,)) # Plot ! for i in range(3):", "python # coding: utf-8 # ## SIMPLE CONVOLUTIONAL NEURAL NETWORK # In[1]: import", "Convolution filters # In[16]: # Let's see weight! wc1 = sess.run(weights['wc1']) print (\"Size", "optional n_input = 784 n_output = 10 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3,", "trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1,", "64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)),", "# Saver save_step = 1; savedir = \"nets/\" saver = tf.train.Saver(max_to_keep=3) print (\"Network", "conv2\") plt.colorbar() plt.show() # # Conv3 (ReLU) # In[13]: # Let's see 'conv3'", ":, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\") plt.colorbar() plt.show() # # Dense", "# LET'S SEE HOW CNN WORKS # In[9]: with tf.device(device_type): conv_out = conv_simple(x,", "_dense, 'out': _out } return out print (\"CNN ready\") # # DEFINE COMPUTATIONAL", "'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3 , 'pool': _pool, 'dense': _dense,", "0: epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK RESTORED\") # #", "In[8]: if do_train == 0: epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print", "# # RESTORE # In[8]: if do_train == 0: epoch = training_epochs-1 saver.restore(sess,", "import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from", "= tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects", "print (\"Optimization Finished.\") # # RESTORE # In[8]: if do_train == 0: epoch", "cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\") plt.colorbar() plt.show() # # Pool (max_pool) # In[14]:", "batch_ys}) # Compute average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch #", "feed_dict={x: testimg, y: testlabel}) print (\" Test accuracy: %.3f\" % (test_acc)) # Save", "tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) #", "(\"Size of 'input_r' is %s\" % (input_r.shape,)) label = np.argmax(trainlabel[0, :]) print (\"Label", "(test_acc)) # Save Net if epoch % save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" +", ":, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\") plt.colorbar() plt.show() # # Conv2", "tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1'])", "cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\") plt.colorbar() plt.show() # # Dense # In[15]: #", "! for i in range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th", "'pool' print (\"Size of 'pool' is %s\" % (pool.shape,)) # Plot ! for", "tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if do_train == 1: for epoch in range(training_epochs): avg_cost", "(input_r.shape,)) label = np.argmax(trainlabel[0, :]) print (\"Label is %d\" % (label)) # Plot", "+ \"th conv1\") plt.colorbar() plt.show() # # Conv2 (+bias) # In[12]: # Let's", "\"th conv2\") plt.colorbar() plt.show() # # Conv3 (ReLU) # In[13]: # Let's see", "y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr =", "= mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys})", "print (\"Size of 'conv3' is %s\" % (conv3.shape,)) # Plot ! for i", "# ## SIMPLE CONVOLUTIONAL NEURAL NETWORK # In[1]: import numpy as np import", "28, 1]) # Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME')", "! for i in range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th", "% save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization Finished.\") # #", "'conv2' is %s\" % (conv2.shape,)) # Plot ! for i in range(3): plt.matshow(conv2[0,", "i in range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\") plt.colorbar()", "plt.title(str(i) + \"th conv3\") plt.colorbar() plt.show() # # Pool (max_pool) # In[14]: #", "GRAPH # In[5]: # tf Graph input x = tf.placeholder(tf.float32, [None, n_input]) y", "feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # # Input #", "stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1':", "'conv2': _conv2, 'conv3': _conv3 , 'pool': _pool, 'dense': _dense, 'out': _out } return", "tf.device(device_type): # <= This is optional _pred = conv_simple(x, weights, biases)['out'] cost =", "# Reshape input _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) # Convolution _conv1", "tf.device(device_type): conv_out = conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1", "Finished.\") # # RESTORE # In[8]: if do_train == 0: epoch = training_epochs-1", "# <= This is optional _pred = conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred,", "see 'out' print (\"Size of 'out' is %s\" % (out.shape,)) # # Convolution", "'conv2' print (\"Size of 'conv2' is %s\" % (conv2.shape,)) # Plot ! for", "all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training", "(ReLU) # In[13]: # Let's see 'conv3' print (\"Size of 'conv3' is %s\"", "save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization Finished.\") # # RESTORE", "'pool' is %s\" % (pool.shape,)) # Plot ! for i in range(3): plt.matshow(pool[0,", "'conv3': _conv3 , 'pool': _pool, 'dense': _dense, 'out': _out } return out print", "# # DEFINE COMPUTATIONAL GRAPH # In[5]: # tf Graph input x =", "out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # # Input # In[10]: # Let's", "optional _pred = conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)", "# Dense # In[15]: # Let's see 'dense' print (\"Size of 'dense' is", "of 'out' is %s\" % (out.shape,)) # # Convolution filters # In[16]: #", "tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1))", "(\"CNN ready\") # # DEFINE COMPUTATIONAL GRAPH # In[5]: # tf Graph input", "# In[2]: mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg", "Let's see 'out' print (\"Size of 'out' is %s\" % (out.shape,)) # #", "in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches", "In[1]: import numpy as np import tensorflow as tf import matplotlib.pyplot as plt", "coding: utf-8 # ## SIMPLE CONVOLUTIONAL NEURAL NETWORK # In[1]: import numpy as", "# In[12]: # Let's see 'conv2' print (\"Size of 'conv2' is %s\" %", "plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\") plt.colorbar() plt.show() # #", "0: print (\"Epoch: %03d/%03d cost: %.9f\" % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr,", "range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\") plt.colorbar() plt.show() #", "Save Net if epoch % save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print", "np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data", "Fit training using batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute average", "In[12]: # Let's see 'conv2' print (\"Size of 'conv2' is %s\" % (conv2.shape,))", "= tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init = tf.initialize_all_variables() # Saver save_step = 1;", "'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3 , 'pool': _pool, 'dense': _dense, 'out': _out", "= tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init", "In[5]: # tf Graph input x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32,", "# # Conv1 (convolution) # In[11]: # Let's see 'conv1' print (\"Size of", "i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\") plt.colorbar() plt.show() # # Pool (max_pool) #", "OPTIMIZE # ## DO TRAIN OR NOT # In[6]: do_train = 1 sess", "'out' print (\"Size of 'out' is %s\" % (out.shape,)) # # Convolution filters", "# Plot ! for i in range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i)", "range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for", "shape=[-1, 28, 28, 1]) # Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1,", "1 # Functions! with tf.device(device_type): # <= This is optional _pred = conv_simple(x,", "numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist", "{ 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases", "data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += sess.run(cost,", "accuracy: %.3f\" % (test_acc)) # Save Net if epoch % save_step == 0:", "see 'pool' print (\"Size of 'pool' is %s\" % (pool.shape,)) # Plot !", "= tf.initialize_all_variables() # Saver save_step = 1; savedir = \"nets/\" saver = tf.train.Saver(max_to_keep=3)", "plt.show() # # Conv3 (ReLU) # In[13]: # Let's see 'conv3' print (\"Size", "% (wc1.shape,)) # Plot ! for i in range(3): plt.matshow(wc1[:, :, 0, i],", "DEFINE COMPUTATIONAL GRAPH # In[5]: # tf Graph input x = tf.placeholder(tf.float32, [None,", "MNIST # In[2]: mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels", "In[11]: # Let's see 'conv1' print (\"Size of 'conv1' is %s\" % (conv1.shape,))", "% (out.shape,)) # # Convolution filters # In[16]: # Let's see weight! wc1", "for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over", "tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr", "batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost +=", "_b['bc1']) # Pass ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1,", "## DO TRAIN OR NOT # In[6]: do_train = 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))", "%.3f\" % (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (\" Test", "i in range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\") plt.colorbar()", "tf.float32)) # Accuracy init = tf.initialize_all_variables() # Saver save_step = 1; savedir =", "feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs,", "In[13]: # Let's see 'conv3' print (\"Size of 'conv3' is %s\" % (conv3.shape,))", "range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv filter\") plt.colorbar() plt.show()", "# Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size)", "Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']),", "plt.title(str(i) + \"th conv1\") plt.colorbar() plt.show() # # Conv2 (+bias) # In[12]: #", "epoch % save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization Finished.\") #", "range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\") plt.colorbar() plt.show() #", "= { 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3 , 'pool': _pool,", "range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optm, feed_dict={x:", "conv1\") plt.colorbar() plt.show() # # Conv2 (+bias) # In[12]: # Let's see 'conv2'", "trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print", "for i in range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\")", "This is optional n_input = 784 n_output = 10 weights = { 'wc1':", "Net if epoch % save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization", "CNN WORKS # In[9]: with tf.device(device_type): conv_out = conv_simple(x, weights, biases) input_r =", "input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\") # # LOAD MNIST # In[2]: mnist", "# Input # In[10]: # Let's see 'input_r' print (\"Size of 'input_r' is", "sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'],", "%s\" % (out.shape,)) # # Convolution filters # In[16]: # Let's see weight!", "= sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense =", "= 10 batch_size = 100 display_step = 1 # Functions! with tf.device(device_type): #", "_pool, 'dense': _dense, 'out': _out } return out print (\"CNN ready\") # #", "print (\"Network Ready to Go!\") # # OPTIMIZE # ## DO TRAIN OR", "%.9f\" % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print", "= 784 n_output = 10 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64],", "# # LOAD MNIST # In[2]: mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images", "+ str(epoch)) print (\"Optimization Finished.\") # # RESTORE # In[8]: if do_train ==", "print (\"NETWORK RESTORED\") # # LET'S SEE HOW CNN WORKS # In[9]: with", "_out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything out = { 'input_r': _input_r,", "Plot ! for i in range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray')) plt.title(str(i) +", "n_output = 10 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1':", "saver = tf.train.Saver(max_to_keep=3) print (\"Network Ready to Go!\") # # OPTIMIZE # ##", ":, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\") plt.colorbar() plt.show() # # Conv2 (+bias)", "1, 1], padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3", "Dense # In[15]: # Let's see 'dense' print (\"Size of 'dense' is %s\"", "label = np.argmax(trainlabel[0, :]) print (\"Label is %d\" % (label)) # Plot !", "(\"MNIST ready\") # # SELECT DEVICE TO BE USED # In[3]: device_type =", "Return everything out = { 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3", "Go!\") # # OPTIMIZE # ## DO TRAIN OR NOT # In[6]: do_train", "[None, n_input]) y = tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate = 0.001 training_epochs", "feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x:", ":, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\") plt.colorbar() plt.show() # # Conv3", "SELECT DEVICE TO BE USED # In[3]: device_type = \"/gpu:1\" # # DEFINE", "Pass ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2,", "(\"Optimization Finished.\") # # RESTORE # In[8]: if do_train == 0: epoch =", "corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init = tf.initialize_all_variables() # Saver save_step", "0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this image is \" + str(label) + \"\") plt.colorbar()", "do_train = 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if do_train == 1:", "784 n_output = 10 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)),", "# In[16]: # Let's see weight! wc1 = sess.run(weights['wc1']) print (\"Size of 'wc1'", "device_type = \"/gpu:1\" # # DEFINE CNN # In[4]: with tf.device(device_type): # <=", "(\"NETWORK RESTORED\") # # LET'S SEE HOW CNN WORKS # In[9]: with tf.device(device_type):", "plt.colorbar() plt.show() # # Pool (max_pool) # In[14]: # Let's see 'pool' print", "Let's see weight! wc1 = sess.run(weights['wc1']) print (\"Size of 'wc1' is %s\" %", "ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1],", "_pred = conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr", "Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything out = {", "is %s\" % (input_r.shape,)) label = np.argmax(trainlabel[0, :]) print (\"Label is %d\" %", "in range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv filter\") plt.colorbar()", "= 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in", ":, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\") plt.colorbar() plt.show() # # Conv3 (ReLU)", ":]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]})", "%s\" % (conv3.shape,)) # Plot ! for i in range(3): plt.matshow(conv3[0, :, :,", "display_step == 0: print (\"Epoch: %03d/%03d cost: %.9f\" % (epoch, training_epochs, avg_cost)) train_acc", "TRAIN OR NOT # In[6]: do_train = 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) #", "# Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3 = tf.nn.relu(_conv2) #", "3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases = { 'bc1':", "optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr = tf.reduce_mean(tf.cast(_corr,", "loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs per epoch", "# Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything out =", "<= This is optional n_input = 784 n_output = 10 weights = {", "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count", "input _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) # Convolution _conv1 = tf.nn.conv2d(_input_r,", "cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\") plt.colorbar() plt.show() # # Conv3 (ReLU) # In[13]:", "_conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # Add-bias _conv2 =", "% (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (\" Test accuracy:", "# Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out = tf.add(tf.matmul(_dense,", "weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1,", "\"th conv3\") plt.colorbar() plt.show() # # Pool (max_pool) # In[14]: # Let's see", "DEVICE TO BE USED # In[3]: device_type = \"/gpu:1\" # # DEFINE CNN", "= tf.reshape(_input, shape=[-1, 28, 28, 1]) # Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1,", "np.argmax(trainlabel[0, :]) print (\"Label is %d\" % (label)) # Plot ! plt.matshow(input_r[0, :,", "see 'conv1' print (\"Size of 'conv1' is %s\" % (conv1.shape,)) # Plot !", "In[16]: # Let's see weight! wc1 = sess.run(weights['wc1']) print (\"Size of 'wc1' is", "plt.title(str(i) + \"th conv2\") plt.colorbar() plt.show() # # Conv3 (ReLU) # In[13]: #", "%s\" % (pool.shape,)) # Plot ! for i in range(3): plt.matshow(pool[0, :, :,", "testlabel}) print (\" Test accuracy: %.3f\" % (test_acc)) # Save Net if epoch", "(\"Network Ready to Go!\") # # OPTIMIZE # ## DO TRAIN OR NOT", "Parameters learning_rate = 0.001 training_epochs = 10 batch_size = 100 display_step = 1", "# Plot ! for i in range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i)", "# SELECT DEVICE TO BE USED # In[3]: device_type = \"/gpu:1\" # #", "input x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_output]) # Parameters", "epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK RESTORED\") # # LET'S", "\"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK RESTORED\") # # LET'S SEE HOW CNN WORKS", "HOW CNN WORKS # In[9]: with tf.device(device_type): conv_out = conv_simple(x, weights, biases) input_r", "conv_simple(_input, _w, _b): # Reshape input _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1])", "# Accuracy init = tf.initialize_all_variables() # Saver save_step = 1; savedir = \"nets/\"", "print (\"Size of 'out' is %s\" % (out.shape,)) # # Convolution filters #", "Reshape input _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) # Convolution _conv1 =", "of 'dense' is %s\" % (dense.shape,)) # Let's see 'out' print (\"Size of", "input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2", "# Plot ! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this image is", "padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3 = tf.nn.relu(_conv2)", "= tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything out = { 'input_r': _input_r, 'conv1':", "sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs per epoch step if epoch", "logs per epoch step if epoch % display_step == 0: print (\"Epoch: %03d/%03d", "'input_r' is %s\" % (input_r.shape,)) label = np.argmax(trainlabel[0, :]) print (\"Label is %d\"", "# Functions! with tf.device(device_type): # <= This is optional _pred = conv_simple(x, weights,", "% (conv2.shape,)) # Plot ! for i in range(3): plt.matshow(conv2[0, :, :, i],", "filters # In[16]: # Let's see weight! wc1 = sess.run(weights['wc1']) print (\"Size of", "out = { 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3 , 'pool':", "in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optm,", "to Go!\") # # OPTIMIZE # ## DO TRAIN OR NOT # In[6]:", "== 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization Finished.\") # # RESTORE #", "(\"Size of 'conv1' is %s\" % (conv1.shape,)) # Plot ! for i in", "cmap=plt.get_cmap('gray')) plt.title(\"Label of this image is \" + str(label) + \"\") plt.colorbar() plt.show()", "see 'input_r' print (\"Size of 'input_r' is %s\" % (input_r.shape,)) label = np.argmax(trainlabel[0,", "stddev=0.1)) } def conv_simple(_input, _w, _b): # Reshape input _input_r = tf.reshape(_input, shape=[-1,", "! for i in range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th", "tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate = 0.001", "= mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print (\"MNIST", "# In[14]: # Let's see 'pool' print (\"Size of 'pool' is %s\" %", "plt.colorbar() plt.show() # # Dense # In[15]: # Let's see 'dense' print (\"Size", "print (\" Test accuracy: %.3f\" % (test_acc)) # Save Net if epoch %", "biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) #", "epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all", ":, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\") plt.colorbar() plt.show() # # Pool", "In[2]: mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg =", "COMPUTATIONAL GRAPH # In[5]: # tf Graph input x = tf.placeholder(tf.float32, [None, n_input])", "_w['wd1']), _b['bd1']) # Return everything out = { 'input_r': _input_r, 'conv1': _conv1, 'conv2':", "tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases = {", "<= This is optional _pred = conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y))", "i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data", "# In[1]: import numpy as np import tensorflow as tf import matplotlib.pyplot as", "SEE HOW CNN WORKS # In[9]: with tf.device(device_type): conv_out = conv_simple(x, weights, biases)", "using batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost", "import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\")", ":]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]})", "Conv2 (+bias) # In[12]: # Let's see 'conv2' print (\"Size of 'conv2' is", "(\"Epoch: %03d/%03d cost: %.9f\" % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs,", "In[9]: with tf.device(device_type): conv_out = conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1,", "# In[5]: # tf Graph input x = tf.placeholder(tf.float32, [None, n_input]) y =", "= sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (\" Test accuracy: %.3f\" % (test_acc))", "plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this image is \" + str(label)", "(+bias) # In[12]: # Let's see 'conv2' print (\"Size of 'conv2' is %s\"", "_conv1, 'conv2': _conv2, 'conv3': _conv3 , 'pool': _pool, 'dense': _dense, 'out': _out }", "This is optional _pred = conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm", "of 'conv2' is %s\" % (conv2.shape,)) # Plot ! for i in range(3):", "+ \"\") plt.colorbar() plt.show() # # Conv1 (convolution) # In[11]: # Let's see", "tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything out = { 'input_r': _input_r, 'conv1': _conv1,", "\" + str(label) + \"\") plt.colorbar() plt.show() # # Conv1 (convolution) # In[11]:", ":, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\") plt.colorbar() plt.show() # # Dense #", "Let's see 'conv3' print (\"Size of 'conv3' is %s\" % (conv3.shape,)) # Plot", "print (\"Size of 'wc1' is %s\" % (wc1.shape,)) # Plot ! for i", "# DEFINE CNN # In[4]: with tf.device(device_type): # <= This is optional n_input", "tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2,", "Training accuracy: %.3f\" % (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print", "ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Vectorize _dense =", "= 1 # Functions! with tf.device(device_type): # <= This is optional _pred =", "# In[3]: device_type = \"/gpu:1\" # # DEFINE CNN # In[4]: with tf.device(device_type):", "plt.title(\"Label of this image is \" + str(label) + \"\") plt.colorbar() plt.show() #", "def conv_simple(_input, _w, _b): # Reshape input _input_r = tf.reshape(_input, shape=[-1, 28, 28,", "weight! wc1 = sess.run(weights['wc1']) print (\"Size of 'wc1' is %s\" % (wc1.shape,)) #", "conv_out = conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 =", "{ 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w, _b): #", "_conv3 , 'pool': _pool, 'dense': _dense, 'out': _out } return out print (\"CNN", "Input # In[10]: # Let's see 'input_r' print (\"Size of 'input_r' is %s\"", "_w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything out", "batch_ys})/total_batch # Display logs per epoch step if epoch % display_step == 0:", "LET'S SEE HOW CNN WORKS # In[9]: with tf.device(device_type): conv_out = conv_simple(x, weights,", "step if epoch % display_step == 0: print (\"Epoch: %03d/%03d cost: %.9f\" %", "CONVOLUTIONAL NEURAL NETWORK # In[1]: import numpy as np import tensorflow as tf", "do_train == 0: epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK RESTORED\")", "sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'],", "\"\") plt.colorbar() plt.show() # # Conv1 (convolution) # In[11]: # Let's see 'conv1'", "# Let's see 'pool' print (\"Size of 'pool' is %s\" % (pool.shape,)) #", "is %s\" % (pool.shape,)) # Plot ! for i in range(3): plt.matshow(pool[0, :,", "% (input_r.shape,)) label = np.argmax(trainlabel[0, :]) print (\"Label is %d\" % (label)) #", "i in range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\") plt.colorbar()", "if epoch % display_step == 0: print (\"Epoch: %03d/%03d cost: %.9f\" % (epoch,", "plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\") plt.colorbar() plt.show() # #", "is \" + str(label) + \"\") plt.colorbar() plt.show() # # Conv1 (convolution) #", "n_output], stddev=0.1)) } biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) }", "one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels", "tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w, _b): # Reshape input", "feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x:", "{ 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3 , 'pool': _pool, 'dense':", "(dense.shape,)) # Let's see 'out' print (\"Size of 'out' is %s\" % (out.shape,))", "weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1))", "'conv1' is %s\" % (conv1.shape,)) # Plot ! for i in range(3): plt.matshow(conv1[0,", "sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += sess.run(cost, feed_dict={x:", "# Save Net if epoch % save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch))", "sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'],", "sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # # Input", "\"nets/\" saver = tf.train.Saver(max_to_keep=3) print (\"Network Ready to Go!\") # # OPTIMIZE #", "i in range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\") plt.colorbar()", "% display_step == 0: print (\"Epoch: %03d/%03d cost: %.9f\" % (epoch, training_epochs, avg_cost))", "# Fit training using batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute", "see 'conv3' print (\"Size of 'conv3' is %s\" % (conv3.shape,)) # Plot !", "for i in range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\")", "weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1))", "i in range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv filter\")", "1, 1, 1], padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu", "save_step = 1; savedir = \"nets/\" saver = tf.train.Saver(max_to_keep=3) print (\"Network Ready to", "== 0: epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK RESTORED\") #", "tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate = 0.001 training_epochs = 10 batch_size =", "feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x:", "tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init =", "Compute average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs", "# Let's see 'conv2' print (\"Size of 'conv2' is %s\" % (conv2.shape,)) #", "n_input = 784 n_output = 10 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1,", "= training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK RESTORED\") # # LET'S SEE", "Plot ! for i in range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) +", "'out': _out } return out print (\"CNN ready\") # # DEFINE COMPUTATIONAL GRAPH", "learning_rate = 0.001 training_epochs = 10 batch_size = 100 display_step = 1 #", "see weight! wc1 = sess.run(weights['wc1']) print (\"Size of 'wc1' is %s\" % (wc1.shape,))", "# RESTORE # In[8]: if do_train == 0: epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\"", "tf.initialize_all_variables() # Saver save_step = 1; savedir = \"nets/\" saver = tf.train.Saver(max_to_keep=3) print", "1]) # Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') #", "for i in range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\")", "Display logs per epoch step if epoch % display_step == 0: print (\"Epoch:", "dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) #", "= tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1,", "batch_xs, y: batch_ys}) print (\" Training accuracy: %.3f\" % (train_acc)) test_acc = sess.run(accr,", "Conv1 (convolution) # In[11]: # Let's see 'conv1' print (\"Size of 'conv1' is", "\"th pool\") plt.colorbar() plt.show() # # Dense # In[15]: # Let's see 'dense'", "== 1: for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) #", "tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Vectorize _dense", "= tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if do_train == 1: for epoch in range(training_epochs):", "average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs per", "avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs per epoch step", "= 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if do_train == 1: for", "for i in range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\")", "Let's see 'conv2' print (\"Size of 'conv2' is %s\" % (conv2.shape,)) # Plot", "'dense': _dense, 'out': _out } return out print (\"CNN ready\") # # DEFINE", "(\"PACKAGES LOADED\") # # LOAD MNIST # In[2]: mnist = input_data.read_data_sets('data/', one_hot=True) trainimg", "In[3]: device_type = \"/gpu:1\" # # DEFINE CNN # In[4]: with tf.device(device_type): #", "} biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input,", "(out.shape,)) # # Convolution filters # In[16]: # Let's see weight! wc1 =", "(\"Label is %d\" % (label)) # Plot ! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray'))", "with tf.device(device_type): conv_out = conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]})", "total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_xs,", "accuracy: %.3f\" % (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (\"", "0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization Finished.\") # # RESTORE # In[8]:", "batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optm, feed_dict={x: batch_xs, y:", "1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if do_train == 1: for epoch", "i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\") plt.colorbar() plt.show() # # Dense # In[15]:", "as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline') print", "print (\"Size of 'conv2' is %s\" % (conv2.shape,)) # Plot ! for i", "range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\") plt.colorbar() plt.show() #", "== 0: print (\"Epoch: %03d/%03d cost: %.9f\" % (epoch, training_epochs, avg_cost)) train_acc =", "cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\") plt.colorbar() plt.show() # # Conv2 (+bias) # In[12]:", "In[15]: # Let's see 'dense' print (\"Size of 'dense' is %s\" % (dense.shape,))", "feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs per epoch step if epoch %", "in range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\") plt.colorbar() plt.show()", "is %s\" % (out.shape,)) # # Convolution filters # In[16]: # Let's see", "'dense' print (\"Size of 'dense' is %s\" % (dense.shape,)) # Let's see 'out'", "conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x:", "# Display logs per epoch step if epoch % display_step == 0: print", "Pool (max_pool) # In[14]: # Let's see 'pool' print (\"Size of 'pool' is", "'conv1' print (\"Size of 'conv1' is %s\" % (conv1.shape,)) # Plot ! for", "1], padding='SAME') # Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out", "of 'conv3' is %s\" % (conv3.shape,)) # Plot ! for i in range(3):", "LOADED\") # # LOAD MNIST # In[2]: mnist = input_data.read_data_sets('data/', one_hot=True) trainimg =", "# # LET'S SEE HOW CNN WORKS # In[9]: with tf.device(device_type): conv_out =", "Saver save_step = 1; savedir = \"nets/\" saver = tf.train.Saver(max_to_keep=3) print (\"Network Ready", "# tf Graph input x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None,", "# In[4]: with tf.device(device_type): # <= This is optional n_input = 784 n_output", "feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x:", "# Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],", ", 'pool': _pool, 'dense': _dense, 'out': _out } return out print (\"CNN ready\")", "'dense' is %s\" % (dense.shape,)) # Let's see 'out' print (\"Size of 'out'", "pool\") plt.colorbar() plt.show() # # Dense # In[15]: # Let's see 'dense' print", "if epoch % save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization Finished.\")", "plt.colorbar() plt.show() # # Conv3 (ReLU) # In[13]: # Let's see 'conv3' print", "= { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) }", "x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate", "# Plot ! for i in range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i)", "epoch % display_step == 0: print (\"Epoch: %03d/%03d cost: %.9f\" % (epoch, training_epochs,", "# Conv3 (ReLU) # In[13]: # Let's see 'conv3' print (\"Size of 'conv3'", "'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w, _b): # Reshape input _input_r =", "tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3,", "# Pool (max_pool) # In[14]: # Let's see 'pool' print (\"Size of 'pool'", "savedir = \"nets/\" saver = tf.train.Saver(max_to_keep=3) print (\"Network Ready to Go!\") # #", "of 'conv1' is %s\" % (conv1.shape,)) # Plot ! for i in range(3):", "# # Pool (max_pool) # In[14]: # Let's see 'pool' print (\"Size of", "NETWORK # In[1]: import numpy as np import tensorflow as tf import matplotlib.pyplot", "display_step = 1 # Functions! with tf.device(device_type): # <= This is optional _pred", "is %s\" % (conv2.shape,)) # Plot ! for i in range(3): plt.matshow(conv2[0, :,", "mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print (\"MNIST ready\")", "image is \" + str(label) + \"\") plt.colorbar() plt.show() # # Conv1 (convolution)", "(wc1.shape,)) # Plot ! for i in range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray'))", "str(label) + \"\") plt.colorbar() plt.show() # # Conv1 (convolution) # In[11]: # Let's", "= \"/gpu:1\" # # DEFINE CNN # In[4]: with tf.device(device_type): # <= This", "print (\"MNIST ready\") # # SELECT DEVICE TO BE USED # In[3]: device_type", "batch_xs, y: batch_ys}) # Compute average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y:", "sess.run(weights['wc1']) print (\"Size of 'wc1' is %s\" % (wc1.shape,)) # Plot ! for", "= 0.001 training_epochs = 10 batch_size = 100 display_step = 1 # Functions!", "of 'input_r' is %s\" % (input_r.shape,)) label = np.argmax(trainlabel[0, :]) print (\"Label is", "= conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr =", "%s\" % (conv1.shape,)) # Plot ! for i in range(3): plt.matshow(conv1[0, :, :,", "in range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\") plt.colorbar() plt.show()", "trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1,", "%s\" % (dense.shape,)) # Let's see 'out' print (\"Size of 'out' is %s\"", "% (conv3.shape,)) # Plot ! for i in range(3): plt.matshow(conv3[0, :, :, i],", "1], padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3 =", "_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy", "print (\"Size of 'conv1' is %s\" % (conv1.shape,)) # Plot ! for i", "= np.argmax(trainlabel[0, :]) print (\"Label is %d\" % (label)) # Plot ! plt.matshow(input_r[0,", "10 batch_size = 100 display_step = 1 # Functions! with tf.device(device_type): # <=", "tf Graph input x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_output])", "Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) #", "is %s\" % (conv1.shape,)) # Plot ! for i in range(3): plt.matshow(conv1[0, :,", "(\"Size of 'conv3' is %s\" % (conv3.shape,)) # Plot ! for i in", "y: testlabel}) print (\" Test accuracy: %.3f\" % (test_acc)) # Save Net if", "plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\") plt.colorbar() plt.show() # #", "10 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output],", "# Let's see 'input_r' print (\"Size of 'input_r' is %s\" % (input_r.shape,)) label", "sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'],", "% (label)) # Plot ! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this", "! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this image is \" +", "'out' is %s\" % (out.shape,)) # # Convolution filters # In[16]: # Let's", "# In[6]: do_train = 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if do_train", "plt.title(str(i) + \"th pool\") plt.colorbar() plt.show() # # Dense # In[15]: # Let's", "tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w, _b): # Reshape input _input_r = tf.reshape(_input,", "= int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_xs, batch_ys", "mnist.test.labels print (\"MNIST ready\") # # SELECT DEVICE TO BE USED # In[3]:", "for i in range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv", "y: batch_ys}) # Compute average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch", "= { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w, _b):", "# Return everything out = { 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3':", "# Let's see 'dense' print (\"Size of 'dense' is %s\" % (dense.shape,)) #", "_b['bd1']) # Return everything out = { 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2,", "(label)) # Plot ! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this image", "over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit", "get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\") # # LOAD MNIST # In[2]: mnist =", "trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1,", "OR NOT # In[6]: do_train = 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]:", ":]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # # Input # In[10]: #", "'conv3' print (\"Size of 'conv3' is %s\" % (conv3.shape,)) # Plot ! for", "= sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out =", "# In[11]: # Let's see 'conv1' print (\"Size of 'conv1' is %s\" %", "accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init = tf.initialize_all_variables() # Saver save_step =", "# In[15]: # Let's see 'dense' print (\"Size of 'dense' is %s\" %", "Let's see 'conv1' print (\"Size of 'conv1' is %s\" % (conv1.shape,)) # Plot", "# In[8]: if do_train == 0: epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch))", "everything out = { 'input_r': _input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3 ,", "conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool", "(train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (\" Test accuracy: %.3f\"", "biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]})", "print (\"Epoch: %03d/%03d cost: %.9f\" % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x:", "print (\"Size of 'pool' is %s\" % (pool.shape,)) # Plot ! for i", "trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print (\"MNIST ready\") #", "ready\") # # SELECT DEVICE TO BE USED # In[3]: device_type = \"/gpu:1\"", "# In[7]: if do_train == 1: for epoch in range(training_epochs): avg_cost = 0.", "y: batch_ys}) print (\" Training accuracy: %.3f\" % (train_acc)) test_acc = sess.run(accr, feed_dict={x:", "In[10]: # Let's see 'input_r' print (\"Size of 'input_r' is %s\" % (input_r.shape,))", "_conv3 = tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1,", "conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3", "= tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate = 0.001 training_epochs = 10 batch_size", "%s\" % (conv2.shape,)) # Plot ! for i in range(3): plt.matshow(conv2[0, :, :,", "Plot ! for i in range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) +", "is %s\" % (dense.shape,)) # Let's see 'out' print (\"Size of 'out' is", "biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w,", "% (dense.shape,)) # Let's see 'out' print (\"Size of 'out' is %s\" %", "%.3f\" % (test_acc)) # Save Net if epoch % save_step == 0: saver.save(sess,", "plt.colorbar() plt.show() # # Conv1 (convolution) # In[11]: # Let's see 'conv1' print", "_b): # Reshape input _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) # Convolution", "# Let's see 'conv3' print (\"Size of 'conv3' is %s\" % (conv3.shape,)) #", "In[14]: # Let's see 'pool' print (\"Size of 'pool' is %s\" % (pool.shape,))", "mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print (\"MNIST ready\") # # SELECT", "+ str(epoch)) print (\"NETWORK RESTORED\") # # LET'S SEE HOW CNN WORKS #", "feed_dict={x: trainimg[0:1, :]}) # # Input # In[10]: # Let's see 'input_r' print", "feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x:", "testimg = mnist.test.images testlabel = mnist.test.labels print (\"MNIST ready\") # # SELECT DEVICE", "In[4]: with tf.device(device_type): # <= This is optional n_input = 784 n_output =", "is %d\" % (label)) # Plot ! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label", "# OPTIMIZE # ## DO TRAIN OR NOT # In[6]: do_train = 1", "RESTORED\") # # LET'S SEE HOW CNN WORKS # In[9]: with tf.device(device_type): conv_out", "trainimg[0:1, :]}) # # Input # In[10]: # Let's see 'input_r' print (\"Size", "plt from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\") # # LOAD", "tf.argmax(y,1)) # Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init = tf.initialize_all_variables()", "# Compute average loss avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display", "(\"Size of 'dense' is %s\" % (dense.shape,)) # Let's see 'out' print (\"Size", "trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # # Input # In[10]:", "# # Dense # In[15]: # Let's see 'dense' print (\"Size of 'dense'", "1], strides=[1, 2, 2, 1], padding='SAME') # Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]])", "(conv1.shape,)) # Plot ! for i in range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray'))", "feed_dict={x: batch_xs, y: batch_ys}) print (\" Training accuracy: %.3f\" % (train_acc)) test_acc =", "test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (\" Test accuracy: %.3f\" %", "is optional n_input = 784 n_output = 10 weights = { 'wc1': tf.Variable(tf.random_normal([3,", "sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (\" Test accuracy: %.3f\" % (test_acc)) #", "range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\") plt.colorbar() plt.show() #", "'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output],", "training using batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss", "RESTORE # In[8]: if do_train == 0: epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" +", "%03d/%03d cost: %.9f\" % (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y:", "of 'wc1' is %s\" % (wc1.shape,)) # Plot ! for i in range(3):", "\"/gpu:1\" # # DEFINE CNN # In[4]: with tf.device(device_type): # <= This is", "# <= This is optional n_input = 784 n_output = 10 weights =", "= input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel", "for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch", "Plot ! for i in range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) +", "(\"Size of 'out' is %s\" % (out.shape,)) # # Convolution filters # In[16]:", "per epoch step if epoch % display_step == 0: print (\"Epoch: %03d/%03d cost:", "print (\"CNN ready\") # # DEFINE COMPUTATIONAL GRAPH # In[5]: # tf Graph", "[None, n_output]) # Parameters learning_rate = 0.001 training_epochs = 10 batch_size = 100", "Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init = tf.initialize_all_variables() # Saver", "sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'],", "_w, _b): # Reshape input _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) #", "SIMPLE CONVOLUTIONAL NEURAL NETWORK # In[1]: import numpy as np import tensorflow as", "strides=[1, 1, 1, 1], padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass", "2, 2, 1], padding='SAME') # Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected", "training_epochs = 10 batch_size = 100 display_step = 1 # Functions! with tf.device(device_type):", "0.001 training_epochs = 10 batch_size = 100 display_step = 1 # Functions! with", "+ \"th pool\") plt.colorbar() plt.show() # # Dense # In[15]: # Let's see", "%s\" % (wc1.shape,)) # Plot ! for i in range(3): plt.matshow(wc1[:, :, 0,", "Plot ! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this image is \"", "tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return", "[-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) # Return everything", "= tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1)) # Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32))", "= tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1']) #", "train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (\" Training accuracy: %.3f\" %", "as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import", "(\" Training accuracy: %.3f\" % (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel})", "\"th conv1\") plt.colorbar() plt.show() # # Conv2 (+bias) # In[12]: # Let's see", "NOT # In[6]: do_train = 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if", "do_train == 1: for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size)", "Test accuracy: %.3f\" % (test_acc)) # Save Net if epoch % save_step ==", "with tf.device(device_type): # <= This is optional n_input = 784 n_output = 10", "tf.device(device_type): # <= This is optional n_input = 784 n_output = 10 weights", "Let's see 'pool' print (\"Size of 'pool' is %s\" % (pool.shape,)) # Plot", "(conv3.shape,)) # Plot ! for i in range(3): plt.matshow(conv3[0, :, :, i], cmap=plt.get_cmap('gray'))", "0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch):", "testimg, y: testlabel}) print (\" Test accuracy: %.3f\" % (test_acc)) # Save Net", "tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\") # # LOAD MNIST #", "conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) _corr = tf.equal(tf.argmax(_pred,1),", "#!/usr/bin/env python # coding: utf-8 # ## SIMPLE CONVOLUTIONAL NEURAL NETWORK # In[1]:", "_conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling _pool", ":]}) # # Input # In[10]: # Let's see 'input_r' print (\"Size of", "(\"Size of 'pool' is %s\" % (pool.shape,)) # Plot ! for i in", "} def conv_simple(_input, _w, _b): # Reshape input _input_r = tf.reshape(_input, shape=[-1, 28,", "= \"nets/\" saver = tf.train.Saver(max_to_keep=3) print (\"Network Ready to Go!\") # # OPTIMIZE", "# # OPTIMIZE # ## DO TRAIN OR NOT # In[6]: do_train =", "# Pass ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2,", "# ## DO TRAIN OR NOT # In[6]: do_train = 1 sess =", "tf.train.Saver(max_to_keep=3) print (\"Network Ready to Go!\") # # OPTIMIZE # ## DO TRAIN", "# # Input # In[10]: # Let's see 'input_r' print (\"Size of 'input_r'", ":, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this image is \" + str(label) +", "_conv2, 'conv3': _conv3 , 'pool': _pool, 'dense': _dense, 'out': _out } return out", "# # SELECT DEVICE TO BE USED # In[3]: device_type = \"/gpu:1\" #", "strides=[1, 2, 2, 1], padding='SAME') # Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) #", "LOAD MNIST # In[2]: mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel =", "y = tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate = 0.001 training_epochs = 10", "= mnist.test.labels print (\"MNIST ready\") # # SELECT DEVICE TO BE USED #", ":]) print (\"Label is %d\" % (label)) # Plot ! plt.matshow(input_r[0, :, :,", "(\" Test accuracy: %.3f\" % (test_acc)) # Save Net if epoch % save_step", "# Convolution filters # In[16]: # Let's see weight! wc1 = sess.run(weights['wc1']) print", "= mnist.test.images testlabel = mnist.test.labels print (\"MNIST ready\") # # SELECT DEVICE TO", "Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # Add-bias _conv2", "Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optm, feed_dict={x: batch_xs,", "# # DEFINE CNN # In[4]: with tf.device(device_type): # <= This is optional", "%d\" % (label)) # Plot ! plt.matshow(input_r[0, :, :, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of", "plt.show() # # Pool (max_pool) # In[14]: # Let's see 'pool' print (\"Size", "'pool': _pool, 'dense': _dense, 'out': _out } return out print (\"CNN ready\") #", "stddev=0.1)) } biases = { 'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def", "(convolution) # In[11]: # Let's see 'conv1' print (\"Size of 'conv1' is %s\"", "avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i", "% (pool.shape,)) # Plot ! for i in range(3): plt.matshow(pool[0, :, :, i],", "+= sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch # Display logs per epoch step if", "int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(total_batch): batch_xs, batch_ys =", "_dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out = tf.add(tf.matmul(_dense, _w['wd1']), _b['bd1'])", ":, 0], cmap=plt.get_cmap('gray')) plt.title(\"Label of this image is \" + str(label) + \"\")", "stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w, _b): # Reshape input _input_r", "mnist.test.images testlabel = mnist.test.labels print (\"MNIST ready\") # # SELECT DEVICE TO BE", "print (\" Training accuracy: %.3f\" % (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg, y:", "# In[9]: with tf.device(device_type): conv_out = conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x:", "init = tf.initialize_all_variables() # Saver save_step = 1; savedir = \"nets/\" saver =", "BE USED # In[3]: device_type = \"/gpu:1\" # # DEFINE CNN # In[4]:", "conv3\") plt.colorbar() plt.show() # # Pool (max_pool) # In[14]: # Let's see 'pool'", "# Parameters learning_rate = 0.001 training_epochs = 10 batch_size = 100 display_step =", "(max_pool) # In[14]: # Let's see 'pool' print (\"Size of 'pool' is %s\"", ":]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]})", "# coding: utf-8 # ## SIMPLE CONVOLUTIONAL NEURAL NETWORK # In[1]: import numpy", "'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases =", "% (epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (\"", "In[7]: if do_train == 1: for epoch in range(training_epochs): avg_cost = 0. total_batch", "2, 1], strides=[1, 2, 2, 1], padding='SAME') # Vectorize _dense = tf.reshape(_pool, [-1,", "DO TRAIN OR NOT # In[6]: do_train = 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init)", "training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK RESTORED\") # # LET'S SEE HOW", "1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64, n_output], stddev=0.1)) } biases = { 'bc1': tf.Variable(tf.random_normal([64],", "TO BE USED # In[3]: device_type = \"/gpu:1\" # # DEFINE CNN #", "# In[10]: # Let's see 'input_r' print (\"Size of 'input_r' is %s\" %", "Let's see 'input_r' print (\"Size of 'input_r' is %s\" % (input_r.shape,)) label =", "see 'conv2' print (\"Size of 'conv2' is %s\" % (conv2.shape,)) # Plot !", "mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optm, feed_dict={x: batch_xs, y: batch_ys}) #", "= tf.nn.relu(_conv2) # Max-pooling _pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2,", "batch_xs, y: batch_ys})/total_batch # Display logs per epoch step if epoch % display_step", "# Plot ! for i in range(3): plt.matshow(wc1[:, :, 0, i], cmap=plt.get_cmap('gray')) plt.title(str(i)", "input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel =", "= sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (\" Training accuracy: %.3f\" % (train_acc))", "DEFINE CNN # In[4]: with tf.device(device_type): # <= This is optional n_input =", "# In[13]: # Let's see 'conv3' print (\"Size of 'conv3' is %s\" %", "(pool.shape,)) # Plot ! for i in range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray'))", "# # Convolution filters # In[16]: # Let's see weight! wc1 = sess.run(weights['wc1'])", "out print (\"CNN ready\") # # DEFINE COMPUTATIONAL GRAPH # In[5]: # tf", "NEURAL NETWORK # In[1]: import numpy as np import tensorflow as tf import", "# LOAD MNIST # In[2]: mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel", "avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (\" Training accuracy: %.3f\"", "see 'dense' print (\"Size of 'dense' is %s\" % (dense.shape,)) # Let's see", "= 100 display_step = 1 # Functions! with tf.device(device_type): # <= This is", "'inline') print (\"PACKAGES LOADED\") # # LOAD MNIST # In[2]: mnist = input_data.read_data_sets('data/',", "28, 28, 1]) # Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1],", "ready\") # # DEFINE COMPUTATIONAL GRAPH # In[5]: # tf Graph input x", "i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\") plt.colorbar() plt.show() # # Conv3 (ReLU) #", "# Count corrects accr = tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init = tf.initialize_all_variables() #", "# Conv1 (convolution) # In[11]: # Let's see 'conv1' print (\"Size of 'conv1'", "mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images", ":, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv3\") plt.colorbar() plt.show() # # Pool (max_pool)", "_pool = tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #", "Graph input x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_output]) #", "# Let's see weight! wc1 = sess.run(weights['wc1']) print (\"Size of 'wc1' is %s\"", "wc1 = sess.run(weights['wc1']) print (\"Size of 'wc1' is %s\" % (wc1.shape,)) # Plot", "plt.colorbar() plt.show() # # Conv2 (+bias) # In[12]: # Let's see 'conv2' print", "plt.show() # # Conv1 (convolution) # In[11]: # Let's see 'conv1' print (\"Size", "Accuracy init = tf.initialize_all_variables() # Saver save_step = 1; savedir = \"nets/\" saver", "utf-8 # ## SIMPLE CONVOLUTIONAL NEURAL NETWORK # In[1]: import numpy as np", "%s\" % (input_r.shape,)) label = np.argmax(trainlabel[0, :]) print (\"Label is %d\" % (label))", "Let's see 'dense' print (\"Size of 'dense' is %s\" % (dense.shape,)) # Let's", "! for i in range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th", "= sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool =", "_out } return out print (\"CNN ready\") # # DEFINE COMPUTATIONAL GRAPH #", "batch_ys}) print (\" Training accuracy: %.3f\" % (train_acc)) test_acc = sess.run(accr, feed_dict={x: testimg,", "as plt from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\") # #", "_w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) #", "= tf.train.Saver(max_to_keep=3) print (\"Network Ready to Go!\") # # OPTIMIZE # ## DO", "# DEFINE COMPUTATIONAL GRAPH # In[5]: # tf Graph input x = tf.placeholder(tf.float32,", "epoch step if epoch % display_step == 0: print (\"Epoch: %03d/%03d cost: %.9f\"", ":]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]})", "= sess.run(weights['wc1']) print (\"Size of 'wc1' is %s\" % (wc1.shape,)) # Plot !", "2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Vectorize _dense = tf.reshape(_pool,", "batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using", "= sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # # Input # In[10]: # Let's see", "% (test_acc)) # Save Net if epoch % save_step == 0: saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\"", "pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense = sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out", "Add-bias _conv2 = tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling", "str(epoch)) print (\"Optimization Finished.\") # # RESTORE # In[8]: if do_train == 0:", "= sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 =", "in range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th pool\") plt.colorbar() plt.show()", "\"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization Finished.\") # # RESTORE # In[8]: if do_train", "print (\"Size of 'dense' is %s\" % (dense.shape,)) # Let's see 'out' print", "= sess.run(conv_out['dense'], feed_dict={x: trainimg[0:1, :]}) out = sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # #", "if do_train == 0: epoch = training_epochs-1 saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK", "'wc1' is %s\" % (wc1.shape,)) # Plot ! for i in range(3): plt.matshow(wc1[:,", "# Plot ! for i in range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i)", "sess.run(init) # In[7]: if do_train == 1: for epoch in range(training_epochs): avg_cost =", "1; savedir = \"nets/\" saver = tf.train.Saver(max_to_keep=3) print (\"Network Ready to Go!\") #", "tf.reshape(_input, shape=[-1, 28, 28, 1]) # Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1,", "is optional _pred = conv_simple(x, weights, biases)['out'] cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(_pred, y)) optm =", "n_input]) y = tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate = 0.001 training_epochs =", "# Conv2 (+bias) # In[12]: # Let's see 'conv2' print (\"Size of 'conv2'", "'bc1': tf.Variable(tf.random_normal([64], stddev=0.1)), 'bd1': tf.Variable(tf.random_normal([n_output], stddev=0.1)) } def conv_simple(_input, _w, _b): # Reshape", "trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1,", "y: batch_ys})/total_batch # Display logs per epoch step if epoch % display_step ==", "% (conv1.shape,)) # Plot ! for i in range(3): plt.matshow(conv1[0, :, :, i],", "tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES", "print (\"PACKAGES LOADED\") # # LOAD MNIST # In[2]: mnist = input_data.read_data_sets('data/', one_hot=True)", "matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\") #", "trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1,", "! for i in range(3): plt.matshow(conv1[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th", "saver.save(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"Optimization Finished.\") # # RESTORE # In[8]: if", "from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\") # # LOAD MNIST", "is %s\" % (wc1.shape,)) # Plot ! for i in range(3): plt.matshow(wc1[:, :,", "training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (\" Training accuracy:", "return out print (\"CNN ready\") # # DEFINE COMPUTATIONAL GRAPH # In[5]: #", "= conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'], feed_dict={x: trainimg[0:1, :]}) conv1 = sess.run(conv_out['conv1'],", "= sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]}) conv3 =", "Plot ! for i in range(3): plt.matshow(pool[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) +", "WORKS # In[9]: with tf.device(device_type): conv_out = conv_simple(x, weights, biases) input_r = sess.run(conv_out['input_r'],", "i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv1\") plt.colorbar() plt.show() # # Conv2 (+bias) #", "saver.restore(sess, \"nets/cnn_mnist_simple.ckpt-\" + str(epoch)) print (\"NETWORK RESTORED\") # # LET'S SEE HOW CNN", "# # Conv3 (ReLU) # In[13]: # Let's see 'conv3' print (\"Size of", "100 display_step = 1 # Functions! with tf.device(device_type): # <= This is optional", "str(epoch)) print (\"NETWORK RESTORED\") # # LET'S SEE HOW CNN WORKS # In[9]:", "print (\"Label is %d\" % (label)) # Plot ! plt.matshow(input_r[0, :, :, 0],", "# Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # Add-bias", "2, 1], padding='SAME') # Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer", "# # Conv2 (+bias) # In[12]: # Let's see 'conv2' print (\"Size of", "this image is \" + str(label) + \"\") plt.colorbar() plt.show() # # Conv1", "(epoch, training_epochs, avg_cost)) train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (\" Training", "import input_data get_ipython().run_line_magic('matplotlib', 'inline') print (\"PACKAGES LOADED\") # # LOAD MNIST # In[2]:", "} return out print (\"CNN ready\") # # DEFINE COMPUTATIONAL GRAPH # In[5]:", "sess.run(conv_out['out'], feed_dict={x: trainimg[0:1, :]}) # # Input # In[10]: # Let's see 'input_r'", "plt.show() # # Conv2 (+bias) # In[12]: # Let's see 'conv2' print (\"Size", "of this image is \" + str(label) + \"\") plt.colorbar() plt.show() # #", "= tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_output]) # Parameters learning_rate =", "(\"Size of 'conv2' is %s\" % (conv2.shape,)) # Plot ! for i in", "plt.show() # # Dense # In[15]: # Let's see 'dense' print (\"Size of", "= tf.nn.bias_add(_conv1, _b['bc1']) # Pass ReLu _conv3 = tf.nn.relu(_conv2) # Max-pooling _pool =", "testlabel = mnist.test.labels print (\"MNIST ready\") # # SELECT DEVICE TO BE USED", "_input_r, 'conv1': _conv1, 'conv2': _conv2, 'conv3': _conv3 , 'pool': _pool, 'dense': _dense, 'out':", "# Let's see 'conv1' print (\"Size of 'conv1' is %s\" % (conv1.shape,)) #", "+ \"th conv2\") plt.colorbar() plt.show() # # Conv3 (ReLU) # In[13]: # Let's", "## SIMPLE CONVOLUTIONAL NEURAL NETWORK # In[1]: import numpy as np import tensorflow", "(\"Size of 'wc1' is %s\" % (wc1.shape,)) # Plot ! for i in", ":]}) conv1 = sess.run(conv_out['conv1'], feed_dict={x: trainimg[0:1, :]}) conv2 = sess.run(conv_out['conv2'], feed_dict={x: trainimg[0:1, :]})", "batch_size = 100 display_step = 1 # Functions! with tf.device(device_type): # <= This", "print (\"Size of 'input_r' is %s\" % (input_r.shape,)) label = np.argmax(trainlabel[0, :]) print", "CNN # In[4]: with tf.device(device_type): # <= This is optional n_input = 784", "+ \"th conv3\") plt.colorbar() plt.show() # # Pool (max_pool) # In[14]: # Let's", "Conv3 (ReLU) # In[13]: # Let's see 'conv3' print (\"Size of 'conv3' is", "= mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels print (\"MNIST ready\") # #", "In[6]: do_train = 1 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if do_train ==", "Ready to Go!\") # # OPTIMIZE # ## DO TRAIN OR NOT #", "'input_r' print (\"Size of 'input_r' is %s\" % (input_r.shape,)) label = np.argmax(trainlabel[0, :])", "padding='SAME') # Vectorize _dense = tf.reshape(_pool, [-1, _w['wd1'].get_shape().as_list()[0]]) # Fully-connected layer _out =", "sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) # In[7]: if do_train == 1: for epoch in", "_input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) # Convolution _conv1 = tf.nn.conv2d(_input_r, _w['wc1'],", "tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data get_ipython().run_line_magic('matplotlib', 'inline')", "if do_train == 1: for epoch in range(training_epochs): avg_cost = 0. total_batch =", "# Let's see 'out' print (\"Size of 'out' is %s\" % (out.shape,)) #", "plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\") plt.colorbar() plt.show() # #", "Functions! with tf.device(device_type): # <= This is optional _pred = conv_simple(x, weights, biases)['out']", "in range(3): plt.matshow(conv2[0, :, :, i], cmap=plt.get_cmap('gray')) plt.title(str(i) + \"th conv2\") plt.colorbar() plt.show()", "= tf.nn.max_pool(_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Vectorize", "with tf.device(device_type): # <= This is optional _pred = conv_simple(x, weights, biases)['out'] cost", "is %s\" % (conv3.shape,)) # Plot ! for i in range(3): plt.matshow(conv3[0, :,", "+ str(label) + \"\") plt.colorbar() plt.show() # # Conv1 (convolution) # In[11]: #", "tf.reduce_mean(tf.cast(_corr, tf.float32)) # Accuracy init = tf.initialize_all_variables() # Saver save_step = 1; savedir", "= 1; savedir = \"nets/\" saver = tf.train.Saver(max_to_keep=3) print (\"Network Ready to Go!\")", "conv3 = sess.run(conv_out['conv3'], feed_dict={x: trainimg[0:1, :]}) pool = sess.run(conv_out['pool'], feed_dict={x: trainimg[0:1, :]}) dense", "sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print (\" Training accuracy: %.3f\" % (train_acc)) test_acc", "= 10 weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64], stddev=0.1)), 'wd1': tf.Variable(tf.random_normal([14*14*64,", "1: for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples/batch_size) # Loop" ]
[ "QtGui, QtCore from widgets.window_analisys import Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle", "QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL(", "self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'),", "__init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle = controle self.ui = Analysis_Window() self.ui.setup(self) self.count = 3", "== '__main__': import sys app = QtGui.QApplication(sys.argv) av = Analise_View() av.add_widgets() av.showMaximized() sys.exit(app.exec_())", "self.ui = Analysis_Window() self.ui.setup(self) self.count = 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left", "= QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo)", "Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle = controle self.ui = Analysis_Window() self.ui.setup(self) self.count", "self.count = 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view)", "QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv)", "'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__ == '__main__': import sys app", "QtGui.QMainWindow.__init__(self) self.controle = controle self.ui = Analysis_Window() self.ui.setup(self) self.count = 3 self.shortcut_right =", "3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end =", "class Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle = controle self.ui = Analysis_Window() self.ui.setup(self)", "self.ui.setup(self) self.count = 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self,", "Analysis_Window() self.ui.setup(self) self.count = 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"),", "PyQt4 import QtGui, QtCore from widgets.window_analisys import Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self, controle):", "QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view)", "QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider,", "self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL(", "self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab)", "widgets.window_analisys import Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle = controle self.ui", "Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle = controle self.ui = Analysis_Window()", "import QtGui, QtCore from widgets.window_analisys import Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self)", "from PyQt4 import QtGui, QtCore from widgets.window_analisys import Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self,", "self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes,", "self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__ == '__main__': import sys app =", "self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home", "if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) av = Analise_View() av.add_widgets()", "__name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) av = Analise_View() av.add_widgets() av.showMaximized()", "= controle self.ui = Analysis_Window() self.ui.setup(self) self.count = 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self,", "self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"),", "self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL(", "QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__ == '__main__': import", "self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home =", "QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if", "QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view)", "<filename>src/view/view_analisys.py from PyQt4 import QtGui, QtCore from widgets.window_analisys import Analysis_Window class Analise_View(QtGui.QMainWindow): def", "def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle = controle self.ui = Analysis_Window() self.ui.setup(self) self.count =", "self.controle = controle self.ui = Analysis_Window() self.ui.setup(self) self.count = 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"),", "= 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end", "= QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self,", "self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'),", "'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__ ==", "QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) av", "from widgets.window_analisys import Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle = controle", "= QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs,", "self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__ == '__main__':", "QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__", "= Analysis_Window() self.ui.setup(self) self.count = 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view) self.shortcut_left =", "self.controle.set_current_tab) if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) av = Analise_View()", "self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view)", "QtCore from widgets.window_analisys import Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle =", "QtCore.SIGNAL( 'valueChanged(int)'), self.controle.set_view) QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL( 'currentChanged(int)'), self.controle.set_current_tab) if __name__ == '__main__': import sys", "self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"),", "self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self, self.controle.first_view) QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL( 'triggered()'), self.controle.abrir_arquivo) QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL( 'valueChanged(int)'),", "'currentChanged(int)'), self.controle.set_current_tab) if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) av =", "controle self.ui = Analysis_Window() self.ui.setup(self) self.count = 3 self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence(\"l\"), self, self.controle.incrementa_view)", "controle): QtGui.QMainWindow.__init__(self) self.controle = controle self.ui = Analysis_Window() self.ui.setup(self) self.count = 3 self.shortcut_right", "= QtGui.QShortcut(QtGui.QKeySequence(\"j\"), self, self.controle.decrementa_view) self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence(\"end\"), self, self.controle.last_view) self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence(\"home\"), self,", "import Analysis_Window class Analise_View(QtGui.QMainWindow): def __init__(self, controle): QtGui.QMainWindow.__init__(self) self.controle = controle self.ui =" ]
[ "input('Digite se nome: ') cont = 0 while cont < 10: print(f'Hello word", "<reponame>DouglasCarvalhoPereira/Interact-OS-PYTHON<filename>M1/Aula1.py name = input('Digite se nome: ') cont = 0 while cont <", "= input('Digite se nome: ') cont = 0 while cont < 10: print(f'Hello", "nome: ') cont = 0 while cont < 10: print(f'Hello word {name}') cont+=1", "se nome: ') cont = 0 while cont < 10: print(f'Hello word {name}')", "name = input('Digite se nome: ') cont = 0 while cont < 10:" ]
[ "0.25 self.add_widget(button) class ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing = 8 self.padding =", "8 self.padding = 10 self.cols = 8 self.rows = 5 for i in", "__init__(self, **kwargs): super().__init__(**kwargs) self.cols = 3 self.rows = 3 for i in range(self.cols", "class CreditsScreen(Screen): pass class MenuScreen(Screen): pass class Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.cols", "import DropDown from kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager import Screen, ScreenManager from sudokuboard", "__init__(self, **kwargs): super().__init__(**kwargs) self.spacing = 8 self.padding = 10 self.cols = 8 self.rows", "from kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager import Screen, ScreenManager from sudokuboard import SudokuBoard", "class SolverScreen(Screen): pass class HelpScreen(Screen): pass class CreditsScreen(Screen): pass class MenuScreen(Screen): pass class", "i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App):", "ScreenManager from sudokuboard import SudokuBoard class GameScreen(Screen): pass class SolverScreen(Screen): pass class HelpScreen(Screen):", "pass class SolverScreen(Screen): pass class HelpScreen(Screen): pass class CreditsScreen(Screen): pass class MenuScreen(Screen): pass", "self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App): def build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help'))", "self.cols = 3 self.rows = 3 for i in range(self.cols * self.rows): button", "height=20)) class SudokuApp(App): def build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current", "sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current = 'menu' return sm if __name__ == '__main__': SudokuApp().run()", "ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current = 'menu' return sm if __name__ ==", "class Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.cols = 3 self.rows = 3 for", "SudokuApp(App): def build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current = 'menu'", "import GridLayout from kivy.uix.screenmanager import Screen, ScreenManager from sudokuboard import SudokuBoard class GameScreen(Screen):", "import CheckBox from kivy.uix.dropdown import DropDown from kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager import", "3 for i in range(self.cols * self.rows): button = Button(text=str(i + 1)) button.font_size", "from sudokuboard import SudokuBoard class GameScreen(Screen): pass class SolverScreen(Screen): pass class HelpScreen(Screen): pass", "= 3 for i in range(self.cols * self.rows): button = Button(text=str(i + 1))", "= 8 self.rows = 5 for i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20,", "self.cols = 8 self.rows = 5 for i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None),", "self.rows): button = Button(text=str(i + 1)) button.font_size = button.height * 0.25 self.add_widget(button) class", "from kivy.uix.screenmanager import Screen, ScreenManager from sudokuboard import SudokuBoard class GameScreen(Screen): pass class", "class SudokuApp(App): def build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current =", "**kwargs): super().__init__(**kwargs) self.spacing = 8 self.padding = 10 self.cols = 8 self.rows =", "pass class HelpScreen(Screen): pass class CreditsScreen(Screen): pass class MenuScreen(Screen): pass class Numpad(GridLayout): def", "import App from kivy.uix.label import Label from kivy.uix.button import Button from kivy.uix.checkbox import", "self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App): def build(self): sm", "kivy.uix.dropdown import DropDown from kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager import Screen, ScreenManager from", "def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing = 8 self.padding = 10 self.cols = 8", "pass class CreditsScreen(Screen): pass class MenuScreen(Screen): pass class Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs)", "None),size_hint_max_x=60, height=20)) class SudokuApp(App): def build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits'))", "class MenuScreen(Screen): pass class Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.cols = 3 self.rows", "1)) button.font_size = button.height * 0.25 self.add_widget(button) class ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs)", "= Button(text=str(i + 1)) button.font_size = button.height * 0.25 self.add_widget(button) class ColorFilter(GridLayout): def", "import SudokuBoard class GameScreen(Screen): pass class SolverScreen(Screen): pass class HelpScreen(Screen): pass class CreditsScreen(Screen):", "= 3 self.rows = 3 for i in range(self.cols * self.rows): button =", "from kivy.uix.label import Label from kivy.uix.button import Button from kivy.uix.checkbox import CheckBox from", "= 8 self.padding = 10 self.cols = 8 self.rows = 5 for i", "SudokuBoard class GameScreen(Screen): pass class SolverScreen(Screen): pass class HelpScreen(Screen): pass class CreditsScreen(Screen): pass", "5 for i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20))", "def __init__(self, **kwargs): super().__init__(**kwargs) self.cols = 3 self.rows = 3 for i in", "super().__init__(**kwargs) self.spacing = 8 self.padding = 10 self.cols = 8 self.rows = 5", "Button(text=str(i + 1)) button.font_size = button.height * 0.25 self.add_widget(button) class ColorFilter(GridLayout): def __init__(self,", "for i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class", "class HelpScreen(Screen): pass class CreditsScreen(Screen): pass class MenuScreen(Screen): pass class Numpad(GridLayout): def __init__(self,", "= button.height * 0.25 self.add_widget(button) class ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing =", "10 self.cols = 8 self.rows = 5 for i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None,", "MenuScreen(Screen): pass class Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.cols = 3 self.rows =", "build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current = 'menu' return sm", "* 0.25 self.add_widget(button) class ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing = 8 self.padding", "button = Button(text=str(i + 1)) button.font_size = button.height * 0.25 self.add_widget(button) class ColorFilter(GridLayout):", "App from kivy.uix.label import Label from kivy.uix.button import Button from kivy.uix.checkbox import CheckBox", "range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App): def build(self):", "from kivy.uix.checkbox import CheckBox from kivy.uix.dropdown import DropDown from kivy.uix.gridlayout import GridLayout from", "def build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current = 'menu' return", "class GameScreen(Screen): pass class SolverScreen(Screen): pass class HelpScreen(Screen): pass class CreditsScreen(Screen): pass class", "= ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current = 'menu' return sm if __name__", "self.spacing = 8 self.padding = 10 self.cols = 8 self.rows = 5 for", "* self.rows): button = Button(text=str(i + 1)) button.font_size = button.height * 0.25 self.add_widget(button)", "CheckBox from kivy.uix.dropdown import DropDown from kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager import Screen,", "import kivy kivy.require('1.10.0') from kivy.app import App from kivy.uix.label import Label from kivy.uix.button", "Button from kivy.uix.checkbox import CheckBox from kivy.uix.dropdown import DropDown from kivy.uix.gridlayout import GridLayout", "**kwargs): super().__init__(**kwargs) self.cols = 3 self.rows = 3 for i in range(self.cols *", "Screen, ScreenManager from sudokuboard import SudokuBoard class GameScreen(Screen): pass class SolverScreen(Screen): pass class", "HelpScreen(Screen): pass class CreditsScreen(Screen): pass class MenuScreen(Screen): pass class Numpad(GridLayout): def __init__(self, **kwargs):", "self.add_widget(button) class ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing = 8 self.padding = 10", "self.padding = 10 self.cols = 8 self.rows = 5 for i in range(20):", "None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App): def build(self): sm = ScreenManager()", "from kivy.app import App from kivy.uix.label import Label from kivy.uix.button import Button from", "GridLayout from kivy.uix.screenmanager import Screen, ScreenManager from sudokuboard import SudokuBoard class GameScreen(Screen): pass", "kivy.uix.button import Button from kivy.uix.checkbox import CheckBox from kivy.uix.dropdown import DropDown from kivy.uix.gridlayout", "import Label from kivy.uix.button import Button from kivy.uix.checkbox import CheckBox from kivy.uix.dropdown import", "size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App): def build(self): sm =", "in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App): def", "kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager import Screen, ScreenManager from sudokuboard import SudokuBoard class", "pass class MenuScreen(Screen): pass class Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.cols = 3", "GameScreen(Screen): pass class SolverScreen(Screen): pass class HelpScreen(Screen): pass class CreditsScreen(Screen): pass class MenuScreen(Screen):", "8 self.rows = 5 for i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20))", "kivy.require('1.10.0') from kivy.app import App from kivy.uix.label import Label from kivy.uix.button import Button", "CreditsScreen(Screen): pass class MenuScreen(Screen): pass class Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.cols =", "i in range(self.cols * self.rows): button = Button(text=str(i + 1)) button.font_size = button.height", "Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.cols = 3 self.rows = 3 for i", "SolverScreen(Screen): pass class HelpScreen(Screen): pass class CreditsScreen(Screen): pass class MenuScreen(Screen): pass class Numpad(GridLayout):", "super().__init__(**kwargs) self.cols = 3 self.rows = 3 for i in range(self.cols * self.rows):", "sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current = 'menu' return sm if __name__ == '__main__':", "= 10 self.cols = 8 self.rows = 5 for i in range(20): self.add_widget(CheckBox(active=True,", "DropDown from kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager import Screen, ScreenManager from sudokuboard import", "range(self.cols * self.rows): button = Button(text=str(i + 1)) button.font_size = button.height * 0.25", "3 self.rows = 3 for i in range(self.cols * self.rows): button = Button(text=str(i", "in range(self.cols * self.rows): button = Button(text=str(i + 1)) button.font_size = button.height *", "kivy.uix.checkbox import CheckBox from kivy.uix.dropdown import DropDown from kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager", "button.font_size = button.height * 0.25 self.add_widget(button) class ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing", "class ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing = 8 self.padding = 10 self.cols", "width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App): def build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game'))", "kivy.uix.label import Label from kivy.uix.button import Button from kivy.uix.checkbox import CheckBox from kivy.uix.dropdown", "kivy.app import App from kivy.uix.label import Label from kivy.uix.button import Button from kivy.uix.checkbox", "self.rows = 3 for i in range(self.cols * self.rows): button = Button(text=str(i +", "kivy kivy.require('1.10.0') from kivy.app import App from kivy.uix.label import Label from kivy.uix.button import", "ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing = 8 self.padding = 10 self.cols =", "button.height * 0.25 self.add_widget(button) class ColorFilter(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.spacing = 8", "height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60, height=20)) class SudokuApp(App): def build(self): sm = ScreenManager() sm.add_widget(MenuScreen(name='menu'))", "Label from kivy.uix.button import Button from kivy.uix.checkbox import CheckBox from kivy.uix.dropdown import DropDown", "= 5 for i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5, None),size_hint_max_x=60,", "sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(GameScreen(name='game')) sm.add_widget(HelpScreen(name='help')) sm.add_widget(CreditsScreen(name='credits')) sm.current = 'menu' return sm if", "kivy.uix.screenmanager import Screen, ScreenManager from sudokuboard import SudokuBoard class GameScreen(Screen): pass class SolverScreen(Screen):", "sudokuboard import SudokuBoard class GameScreen(Screen): pass class SolverScreen(Screen): pass class HelpScreen(Screen): pass class", "+ 1)) button.font_size = button.height * 0.25 self.add_widget(button) class ColorFilter(GridLayout): def __init__(self, **kwargs):", "self.rows = 5 for i in range(20): self.add_widget(CheckBox(active=True, size_hint=(None, None), height=20, width=20)) self.add_widget(Button(size_hint=(0.5,", "pass class Numpad(GridLayout): def __init__(self, **kwargs): super().__init__(**kwargs) self.cols = 3 self.rows = 3", "import Screen, ScreenManager from sudokuboard import SudokuBoard class GameScreen(Screen): pass class SolverScreen(Screen): pass", "from kivy.uix.dropdown import DropDown from kivy.uix.gridlayout import GridLayout from kivy.uix.screenmanager import Screen, ScreenManager", "import Button from kivy.uix.checkbox import CheckBox from kivy.uix.dropdown import DropDown from kivy.uix.gridlayout import", "from kivy.uix.button import Button from kivy.uix.checkbox import CheckBox from kivy.uix.dropdown import DropDown from", "for i in range(self.cols * self.rows): button = Button(text=str(i + 1)) button.font_size =" ]
[ "src=args) # Initialize and return the module instance = cls(**dct) if hasattr(instance, '__post_init__'):", "are two or greater params to be received assert len(call_params) == 2,('__call__ must", "an interface via argparse. ''' epilog = None if hasattr(cls, 'contributors'): # ==========================", "vars(args) # Initialize a dictionary to hold all of the necessary argument #", "if hasattr(cls, 'contributors'): # ========================== # FORMAT MODULE CONTRIBUTORS # ========================== epilog =", "authentication and update success to True if successful if success: return dict(outcome=1,username=username,password=password) else:", "ref in references: epilog += f'\\n- {ref}' # ====================== # BUILD MODULE ARGUMENTS", "''' Args: func: Function/method from which the signature will be sourced. src: Source", "underlying brute force module. ''' print('Initializing module!') # Translate the argparse arguments to", "that it's declared assert getattr(cls,'__call__'),('Modules must be callable. ' 'Declare a __call__ method", "if not isinstance(additional, dict): raise ValueError( 'additional field of contributor records ' f'must", "argument # to initialize the brute force module. dct = bindSignatureArgs(func=cls.__init__, src=args) #", "def build_interface(cls, subparsers: 'Argparse subparsers that will receive the subcommand') \\ -> argparse.ArgumentParser:", "update success to True if successful if success: return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password,", "help menu brief_description = None # Description of the module that'll be shown", "src set in dest. ''' dest = {} # Iterate over paramaters and", "{type(additional)}') for k,v in additional.items(): epilog += f'\\n {k}: {v}' epilog += '\\n'", "' \\ 'least two arguments: username, password') # Ensure that the first two", "argparse. ''' epilog = None if hasattr(cls, 'contributors'): # ========================== # FORMAT MODULE", "if not name: raise ValueError( 'contributor records must have a \"name\" field') epilog", "= None # Description of the module that'll be shown in the interface", "that'll be shown in logging name = None # Brief description to display", "when an upstream server needs to be targeted. # The __call__ Method This", "records ' f'must be a dict, not {type(additional)}') for k,v in additional.items(): epilog", "a module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers: 'Argparse subparsers that", "'username' and 'password' assert ['username','password'] == call_params,('__call__ ' \\ 'must receive the first", "values to module parameters. ''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog)", "like: ``` def __call__(self, username, password, *args, **kwargs): success = False # Do", "import argparse import inspect import re def bindSignatureArgs(func, src:dict) -> dict: ''' Args:", "method is called for each authentication attempt by BruteLoops and should check the", "to True if successful if success: return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ``` Note", "method signature must look like: ``` def __call__(self, username, password, *args, **kwargs): success", "def initialize(cls, args): '''Initialize and return the underlying brute force module. ''' print('Initializing", "bindSignatureArgs(func=cls.__init__, src=args) # Initialize and return the module instance = cls(**dct) if hasattr(instance,", "sourced. src: Source dictionary that will provide values for dest. Returns: A new", "== 2,('__call__ must receive at ' \\ 'least two arguments: username, password') #", "{call_params}') @classmethod def get_handle(cls): '''Return a simple string to use as a module", "dict): raise ValueError( 'additional field of contributor records ' f'must be a dict,", "assert ['username','password'] == call_params,('__call__ ' \\ 'must receive the first two arguments as", "arguments to a dictionary args = vars(args) # Initialize a dictionary to hold", "inspect.signature(func).parameters.items(): # Skip \"self\" references if k == 'self': continue # Extract the", "{k}: {v}' epilog += '\\n' if hasattr(cls, 'references'): # ======================== # FORMAT MODULE", "cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return instance @classmethod def validate(cls):", "that the first two are 'username' and 'password' assert ['username','password'] == call_params,('__call__ '", "help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls) parser.add_argument('--database', '-db', required=True, help='Database to target.') return parser", "parser for the command assoicated with the newly created module. This is how", "server needs to be targeted. # The __call__ Method This method is called", "username, ' \\ f'password -- not: {call_params}') @classmethod def get_handle(cls): '''Return a simple", "module. dct = bindSignatureArgs(func=cls.__init__, src=args) # Initialize and return the module instance =", "value when provided if k in src: dest[k]=src[k] # Use the default value", "k == 'self': continue # Extract the user supplied value when provided if", "''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls) parser.add_argument('--database', '-db', required=True,", "signature will be sourced. src: Source dictionary that will provide values for dest.", "to iterate over each parameter declared in __init__ and build an interface via", "{type(cls.contributors)}') for cont in cls.contributors: if not isinstance(cont, dict): raise ValueError( 'contributor records", "= cont.get('name') additional = cont.get('additional') if not name: raise ValueError( 'contributor records must", "is how we bind the name that the user will refernce at the", "authentication was successful, indicating valid credentials: 1 means success, 0 means failure. '''", "the module that'll be shown in logging name = None # Brief description", "the commandline, along with providing a mechanism to assign values to module parameters.", "of a username and password. The method signature must look like: ``` def", "paramaters and values in the function's # signature for k,v in inspect.signature(func).parameters.items(): #", "for brute force modules. It builds the interface subcommands by inspecting the __init__", "provide values for dest. Returns: A new dictionary with argument values from src", "in logging name = None # Brief description to display in the help", "username and password. The method signature must look like: ``` def __call__(self, username,", "the module that'll be shown in the interface description = None @classmethod def", "Use the default value other wise else: dest[k]=v return dest class Module: '''#", "is called for each authentication attempt by BruteLoops and should check the validity", "mechanism to assign values to module parameters. ''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description,", "''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers: 'Argparse subparsers that will receive the", "by inspecting the __init__ method while also enforcing restrictions on the __call__ method", "logging name = None # Brief description to display in the help menu", "list): raise ValueError( f'References must be a list, got {type(references)}') for ref in", "MODULE CONTRIBUTORS # ========================== epilog = 'Contributors:\\n\\n' if not isinstance(cls.contributors, list): raise ValueError(", "command assoicated with the newly created module. This is how we bind the", "valid credentials: 1 means success, 0 means failure. ''' # Name for the", "will refernce at the commandline, along with providing a mechanism to assign values", "to module parameters. ''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls)", "argparse argument parser for the command assoicated with the newly created module. This", "re def bindSignatureArgs(func, src:dict) -> dict: ''' Args: func: Function/method from which the", "are 'username' and 'password' assert ['username','password'] == call_params,('__call__ ' \\ 'must receive the", "Note the structure returned in the declaration above. The leading integer value determines", "# Initialize a dictionary to hold all of the necessary argument # to", "records must be dictionaries, ' f'not {type(cont)}') name = cont.get('name') additional = cont.get('additional')", "This method is called for each authentication attempt by BruteLoops and should check", "integer value determines if authentication was successful, indicating valid credentials: 1 means success,", "============================== # VALIDATING THE __call__ METHOD # ============================== # Ensure that it's declared", "======================== epilog += f'\\nReferences:\\n' references = cls.references if not isinstance(references, list): raise ValueError(", "method while also enforcing restrictions on the __call__ method to ensure BruteLoops can", "It's useful in situations when an upstream server needs to be targeted. #", "isinstance(cont, dict): raise ValueError( 'contributor records must be dictionaries, ' f'not {type(cont)}') name", "== 'self': continue # Extract the user supplied value when provided if k", "{} # Iterate over paramaters and values in the function's # signature for", "return instance @classmethod def validate(cls): # ============================== # VALIDATING THE __call__ METHOD #", "def get_handle(cls): '''Return a simple string to use as a module identifier. '''", "to be received assert len(call_params) == 2,('__call__ must receive at ' \\ 'least", "Initialize a dictionary to hold all of the necessary argument # to initialize", "in the help menu brief_description = None # Description of the module that'll", "k in src: dest[k]=src[k] # Use the default value other wise else: dest[k]=v", "= bindSignatureArgs(func=cls.__init__, src=args) # Initialize and return the module instance = cls(**dct) if", "not: {call_params}') @classmethod def get_handle(cls): '''Return a simple string to use as a", "+= f'\\nReferences:\\n' references = cls.references if not isinstance(references, list): raise ValueError( f'References must", "'references'): # ======================== # FORMAT MODULE REFERENCES # ======================== epilog += f'\\nReferences:\\n' references", "REFERENCES # ======================== epilog += f'\\nReferences:\\n' references = cls.references if not isinstance(references, list):", "@classmethod def initialize(cls, args): '''Initialize and return the underlying brute force module. '''", "will be sourced. src: Source dictionary that will provide values for dest. Returns:", "The __init__ Method This method can be used to set static values supporting", "epilog += f'\\n- {name}' if additional: if not isinstance(additional, dict): raise ValueError( 'additional", "if not isinstance(references, list): raise ValueError( f'References must be a list, got {type(references)}')", "__call__ method to ensure BruteLoops can make authentication callbacks. # The __init__ Method", "to set static values supporting a brute force module. It's useful in situations", "The __call__ Method This method is called for each authentication attempt by BruteLoops", "call_params,('__call__ ' \\ 'must receive the first two arguments as username, ' \\", "over paramaters and values in the function's # signature for k,v in inspect.signature(func).parameters.items():", "necessary argument # to initialize the brute force module. dct = bindSignatureArgs(func=cls.__init__, src=args)", "user will refernce at the commandline, along with providing a mechanism to assign", "authentication attempt by BruteLoops and should check the validity of a username and", "be used to set static values supporting a brute force module. It's useful", "to hold all of the necessary argument # to initialize the brute force", "list of dictionary ' f'values, not {type(cls.contributors)}') for cont in cls.contributors: if not", "force modules. It builds the interface subcommands by inspecting the __init__ method while", "create a new argparse argument parser for the command assoicated with the newly", "f'not {type(cont)}') name = cont.get('name') additional = cont.get('additional') if not name: raise ValueError(", "failure. ''' # Name for the module that'll be shown in logging name", "password. The method signature must look like: ``` def __call__(self, username, password, *args,", "of the necessary argument # to initialize the brute force module. dct =", "def validate(cls): # ============================== # VALIDATING THE __call__ METHOD # ============================== # Ensure", "inspecting the __init__ method while also enforcing restrictions on the __call__ method to", "= list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params and call_params[0] == 'self': call_params = call_params[1:3]", "the name that the user will refernce at the commandline, along with providing", "be a list of dictionary ' f'values, not {type(cls.contributors)}') for cont in cls.contributors:", "along with providing a mechanism to assign values to module parameters. ''' parser", "This is how we bind the name that the user will refernce at", "that the user will refernce at the commandline, along with providing a mechanism", "METHOD # ============================== # Ensure that it's declared assert getattr(cls,'__call__'),('Modules must be callable.", "'Argparse subparsers that will receive the subcommand') \\ -> argparse.ArgumentParser: '''Use the inspect", "# Ensure there are two or greater params to be received assert len(call_params)", "__init__ and build an interface via argparse. ''' epilog = None if hasattr(cls,", "it's declared assert getattr(cls,'__call__'),('Modules must be callable. ' 'Declare a __call__ method on", "epilog = 'Contributors:\\n\\n' if not isinstance(cls.contributors, list): raise ValueError( 'Module contributors must be", "2,('__call__ must receive at ' \\ 'least two arguments: username, password') # Ensure", "A new dictionary with argument values from src set in dest. ''' dest", "over each parameter declared in __init__ and build an interface via argparse. '''", "in __init__ and build an interface via argparse. ''' epilog = None if", "This method can be used to set static values supporting a brute force", "restrictions on the __call__ method to ensure BruteLoops can make authentication callbacks. #", "Name for the module that'll be shown in logging name = None #", "the default value other wise else: dest[k]=v return dest class Module: '''# Base", "@classmethod def build_interface(cls, subparsers: 'Argparse subparsers that will receive the subcommand') \\ ->", "if k in src: dest[k]=src[k] # Use the default value other wise else:", "dest[k]=v return dest class Module: '''# Base Module Class This class serves as", "list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params and call_params[0] == 'self': call_params = call_params[1:3] #", "'Contributors:\\n\\n' if not isinstance(cls.contributors, list): raise ValueError( 'Module contributors must be a list", "at ' \\ 'least two arguments: username, password') # Ensure that the first", "be received assert len(call_params) == 2,('__call__ must receive at ' \\ 'least two", "module that'll be shown in the interface description = None @classmethod def initialize(cls,", "value determines if authentication was successful, indicating valid credentials: 1 means success, 0", "if authentication was successful, indicating valid credentials: 1 means success, 0 means failure.", "two arguments as username, ' \\ f'password -- not: {call_params}') @classmethod def get_handle(cls):", "a dictionary args = vars(args) # Initialize a dictionary to hold all of", "an upstream server needs to be targeted. # The __call__ Method This method", "= 'Contributors:\\n\\n' if not isinstance(cls.contributors, list): raise ValueError( 'Module contributors must be a", "inspect import re def bindSignatureArgs(func, src:dict) -> dict: ''' Args: func: Function/method from", "dct = bindSignatureArgs(func=cls.__init__, src=args) # Initialize and return the module instance = cls(**dct)", "if success: return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ``` Note the structure returned in", "provided if k in src: dest[k]=src[k] # Use the default value other wise", "module!') # Translate the argparse arguments to a dictionary args = vars(args) #", "first two arguments as username, ' \\ f'password -- not: {call_params}') @classmethod def", "dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ``` Note the structure returned in the declaration above.", "= cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return instance @classmethod def", "as username, ' \\ f'password -- not: {call_params}') @classmethod def get_handle(cls): '''Return a", "import inspect import re def bindSignatureArgs(func, src:dict) -> dict: ''' Args: func: Function/method", "be dictionaries, ' f'not {type(cont)}') name = cont.get('name') additional = cont.get('additional') if not", "the underlying brute force module. ''' print('Initializing module!') # Translate the argparse arguments", "authentication callbacks. # The __init__ Method This method can be used to set", "' f'not {type(cont)}') name = cont.get('name') additional = cont.get('additional') if not name: raise", "MODULE ARGUMENTS # ====================== '''Here we create a new argparse argument parser for", "interface subcommands by inspecting the __init__ method while also enforcing restrictions on the", "# Description of the module that'll be shown in the interface description =", "supplied value when provided if k in src: dest[k]=src[k] # Use the default", "module parameters. ''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls) parser.add_argument('--database',", "parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls) parser.add_argument('--database', '-db', required=True, help='Database", "# Ensure that it's declared assert getattr(cls,'__call__'),('Modules must be callable. ' 'Declare a", "return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ``` Note the structure returned in the declaration", "'password' assert ['username','password'] == call_params,('__call__ ' \\ 'must receive the first two arguments", "how we bind the name that the user will refernce at the commandline,", "isinstance(references, list): raise ValueError( f'References must be a list, got {type(references)}') for ref", "print('Initializing module!') # Translate the argparse arguments to a dictionary args = vars(args)", "description = None @classmethod def initialize(cls, args): '''Initialize and return the underlying brute", "# Ensure that the first two are 'username' and 'password' assert ['username','password'] ==", "return dest class Module: '''# Base Module Class This class serves as a", "# Use the default value other wise else: dest[k]=v return dest class Module:", "the help menu brief_description = None # Description of the module that'll be", "module instance = cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return instance", "in additional.items(): epilog += f'\\n {k}: {v}' epilog += '\\n' if hasattr(cls, 'references'):", "src:dict) -> dict: ''' Args: func: Function/method from which the signature will be", "dest class Module: '''# Base Module Class This class serves as a template", "@classmethod def get_handle(cls): '''Return a simple string to use as a module identifier.", "k,v in inspect.signature(func).parameters.items(): # Skip \"self\" references if k == 'self': continue #", "build an interface via argparse. ''' epilog = None if hasattr(cls, 'contributors'): #", "'self': call_params = call_params[1:3] # Ensure there are two or greater params to", "'Module contributors must be a list of dictionary ' f'values, not {type(cls.contributors)}') for", "two are 'username' and 'password' assert ['username','password'] == call_params,('__call__ ' \\ 'must receive", "and should check the validity of a username and password. The method signature", "if successful if success: return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ``` Note the structure", "receive the first two arguments as username, ' \\ f'password -- not: {call_params}')", "dest. ''' dest = {} # Iterate over paramaters and values in the", "class Module: '''# Base Module Class This class serves as a template for", "def bindSignatureArgs(func, src:dict) -> dict: ''' Args: func: Function/method from which the signature", "'__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return instance @classmethod def validate(cls): # ============================== #", "references if k == 'self': continue # Extract the user supplied value when", "raise ValueError( 'contributor records must have a \"name\" field') epilog += f'\\n- {name}'", "\\ .keys()) if call_params and call_params[0] == 'self': call_params = call_params[1:3] # Ensure", "will provide values for dest. Returns: A new dictionary with argument values from", "cls.contributors: if not isinstance(cont, dict): raise ValueError( 'contributor records must be dictionaries, '", "while also enforcing restrictions on the __call__ method to ensure BruteLoops can make", "also enforcing restrictions on the __call__ method to ensure BruteLoops can make authentication", "with the newly created module. This is how we bind the name that", "def __call__(self, username, password, *args, **kwargs): success = False # Do authentication and", "to a dictionary args = vars(args) # Initialize a dictionary to hold all", "argument values from src set in dest. ''' dest = {} # Iterate", "dictionary args = vars(args) # Initialize a dictionary to hold all of the", "must have a \"name\" field') epilog += f'\\n- {name}' if additional: if not", "= False # Do authentication and update success to True if successful if", "for cont in cls.contributors: if not isinstance(cont, dict): raise ValueError( 'contributor records must", "func=instance.__post_init__, src=args)) return instance @classmethod def validate(cls): # ============================== # VALIDATING THE __call__", "\\ 'least two arguments: username, password') # Ensure that the first two are", "cont.get('name') additional = cont.get('additional') if not name: raise ValueError( 'contributor records must have", "list, got {type(references)}') for ref in references: epilog += f'\\n- {ref}' # ======================", "be sourced. src: Source dictionary that will provide values for dest. Returns: A", "dict(outcome=0,username=username,password=password, ``` Note the structure returned in the declaration above. The leading integer", "module: ' \\ f'{cls.get_handle}') # Get a list of parameter names call_params =", "which the signature will be sourced. src: Source dictionary that will provide values", "there are two or greater params to be received assert len(call_params) == 2,('__call__", "Ensure there are two or greater params to be received assert len(call_params) ==", "= cls.references if not isinstance(references, list): raise ValueError( f'References must be a list,", "Method This method can be used to set static values supporting a brute", "VALIDATING THE __call__ METHOD # ============================== # Ensure that it's declared assert getattr(cls,'__call__'),('Modules", "set in dest. ''' dest = {} # Iterate over paramaters and values", "a list of dictionary ' f'values, not {type(cls.contributors)}') for cont in cls.contributors: if", "on the __call__ method to ensure BruteLoops can make authentication callbacks. # The", "success = False # Do authentication and update success to True if successful", "Description of the module that'll be shown in the interface description = None", "values in the function's # signature for k,v in inspect.signature(func).parameters.items(): # Skip \"self\"", "*args, **kwargs): success = False # Do authentication and update success to True", "['username','password'] == call_params,('__call__ ' \\ 'must receive the first two arguments as username,", "========================== epilog = 'Contributors:\\n\\n' if not isinstance(cls.contributors, list): raise ValueError( 'Module contributors must", "\"self\" references if k == 'self': continue # Extract the user supplied value", "# Skip \"self\" references if k == 'self': continue # Extract the user", "to be targeted. # The __call__ Method This method is called for each", "continue # Extract the user supplied value when provided if k in src:", "or greater params to be received assert len(call_params) == 2,('__call__ must receive at", "each parameter declared in __init__ and build an interface via argparse. ''' epilog", "cont.get('additional') if not name: raise ValueError( 'contributor records must have a \"name\" field')", "in the function's # signature for k,v in inspect.signature(func).parameters.items(): # Skip \"self\" references", "menu brief_description = None # Description of the module that'll be shown in", "list of parameter names call_params = list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params and call_params[0]", "= call_params[1:3] # Ensure there are two or greater params to be received", "use as a module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers: 'Argparse", "from which the signature will be sourced. src: Source dictionary that will provide", "the function's # signature for k,v in inspect.signature(func).parameters.items(): # Skip \"self\" references if", "+= '\\n' if hasattr(cls, 'references'): # ======================== # FORMAT MODULE REFERENCES # ========================", "initialize the brute force module. dct = bindSignatureArgs(func=cls.__init__, src=args) # Initialize and return", "references: epilog += f'\\n- {ref}' # ====================== # BUILD MODULE ARGUMENTS # ======================", "a brute force module. It's useful in situations when an upstream server needs", "of parameter names call_params = list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params and call_params[0] ==", "= None # Brief description to display in the help menu brief_description =", "dictionaries, ' f'not {type(cont)}') name = cont.get('name') additional = cont.get('additional') if not name:", "the inspect module to iterate over each parameter declared in __init__ and build", "== call_params,('__call__ ' \\ 'must receive the first two arguments as username, '", "argparse.ArgumentParser: '''Use the inspect module to iterate over each parameter declared in __init__", "a __call__ method on the module: ' \\ f'{cls.get_handle}') # Get a list", "be a dict, not {type(additional)}') for k,v in additional.items(): epilog += f'\\n {k}:", "' \\ 'must receive the first two arguments as username, ' \\ f'password", "class serves as a template for brute force modules. It builds the interface", "hasattr(cls, 'references'): # ======================== # FORMAT MODULE REFERENCES # ======================== epilog += f'\\nReferences:\\n'", "# ====================== '''Here we create a new argparse argument parser for the command", "bind the name that the user will refernce at the commandline, along with", "assert getattr(cls,'__call__'),('Modules must be callable. ' 'Declare a __call__ method on the module:", "args = vars(args) # Initialize a dictionary to hold all of the necessary", "commandline, along with providing a mechanism to assign values to module parameters. '''", "be shown in logging name = None # Brief description to display in", "Returns: A new dictionary with argument values from src set in dest. '''", "providing a mechanism to assign values to module parameters. ''' parser = subparsers.add_parser(cls.get_handle(),", "not isinstance(additional, dict): raise ValueError( 'additional field of contributor records ' f'must be", "``` Note the structure returned in the declaration above. The leading integer value", "``` def __call__(self, username, password, *args, **kwargs): success = False # Do authentication", "if call_params and call_params[0] == 'self': call_params = call_params[1:3] # Ensure there are", "the validity of a username and password. The method signature must look like:", "and return the underlying brute force module. ''' print('Initializing module!') # Translate the", "be a list, got {type(references)}') for ref in references: epilog += f'\\n- {ref}'", "assoicated with the newly created module. This is how we bind the name", "a mechanism to assign values to module parameters. ''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description,", "# ============================== # Ensure that it's declared assert getattr(cls,'__call__'),('Modules must be callable. '", "ValueError( 'Module contributors must be a list of dictionary ' f'values, not {type(cls.contributors)}')", "upstream server needs to be targeted. # The __call__ Method This method is", "else: dest[k]=v return dest class Module: '''# Base Module Class This class serves", "value other wise else: dest[k]=v return dest class Module: '''# Base Module Class", "module. It's useful in situations when an upstream server needs to be targeted.", "in the declaration above. The leading integer value determines if authentication was successful,", "' 'Declare a __call__ method on the module: ' \\ f'{cls.get_handle}') # Get", "f'values, not {type(cls.contributors)}') for cont in cls.contributors: if not isinstance(cont, dict): raise ValueError(", "__init__ Method This method can be used to set static values supporting a", "module that'll be shown in logging name = None # Brief description to", "# The __init__ Method This method can be used to set static values", "# Do authentication and update success to True if successful if success: return", "successful if success: return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ``` Note the structure returned", "ValueError( 'additional field of contributor records ' f'must be a dict, not {type(additional)}')", "receive at ' \\ 'least two arguments: username, password') # Ensure that the", "{ref}' # ====================== # BUILD MODULE ARGUMENTS # ====================== '''Here we create a", "check the validity of a username and password. The method signature must look", "# ========================== # FORMAT MODULE CONTRIBUTORS # ========================== epilog = 'Contributors:\\n\\n' if not", "that will provide values for dest. Returns: A new dictionary with argument values", "========================== # FORMAT MODULE CONTRIBUTORS # ========================== epilog = 'Contributors:\\n\\n' if not isinstance(cls.contributors,", "isinstance(cls.contributors, list): raise ValueError( 'Module contributors must be a list of dictionary '", "if not isinstance(cont, dict): raise ValueError( 'contributor records must be dictionaries, ' f'not", "''' dest = {} # Iterate over paramaters and values in the function's", "the first two are 'username' and 'password' assert ['username','password'] == call_params,('__call__ ' \\", "records must have a \"name\" field') epilog += f'\\n- {name}' if additional: if", "name = cont.get('name') additional = cont.get('additional') if not name: raise ValueError( 'contributor records", "for k,v in additional.items(): epilog += f'\\n {k}: {v}' epilog += '\\n' if", "on the module: ' \\ f'{cls.get_handle}') # Get a list of parameter names", "raise ValueError( f'References must be a list, got {type(references)}') for ref in references:", "in the interface description = None @classmethod def initialize(cls, args): '''Initialize and return", "f'\\n- {name}' if additional: if not isinstance(additional, dict): raise ValueError( 'additional field of", "+= f'\\n {k}: {v}' epilog += '\\n' if hasattr(cls, 'references'): # ======================== #", "must receive at ' \\ 'least two arguments: username, password') # Ensure that", "above. The leading integer value determines if authentication was successful, indicating valid credentials:", "''' print('Initializing module!') # Translate the argparse arguments to a dictionary args =", "return dict(outcome=0,username=username,password=password, ``` Note the structure returned in the declaration above. The leading", "# FORMAT MODULE CONTRIBUTORS # ========================== epilog = 'Contributors:\\n\\n' if not isinstance(cls.contributors, list):", "# Extract the user supplied value when provided if k in src: dest[k]=src[k]", "The leading integer value determines if authentication was successful, indicating valid credentials: 1", "to display in the help menu brief_description = None # Description of the", "return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers: 'Argparse subparsers that will receive the subcommand')", "references = cls.references if not isinstance(references, list): raise ValueError( f'References must be a", "{type(references)}') for ref in references: epilog += f'\\n- {ref}' # ====================== # BUILD", "callable. ' 'Declare a __call__ method on the module: ' \\ f'{cls.get_handle}') #", "Module Class This class serves as a template for brute force modules. It", "declared assert getattr(cls,'__call__'),('Modules must be callable. ' 'Declare a __call__ method on the", "must look like: ``` def __call__(self, username, password, *args, **kwargs): success = False", "to use as a module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers:", "epilog = None if hasattr(cls, 'contributors'): # ========================== # FORMAT MODULE CONTRIBUTORS #", "the newly created module. This is how we bind the name that the", "# Initialize and return the module instance = cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__(", "assign values to module parameters. ''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter,", "return the module instance = cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args))", "\\ 'must receive the first two arguments as username, ' \\ f'password --", "This class serves as a template for brute force modules. It builds the", "None @classmethod def initialize(cls, args): '''Initialize and return the underlying brute force module.", "newly created module. This is how we bind the name that the user", "dictionary that will provide values for dest. Returns: A new dictionary with argument", "from src set in dest. ''' dest = {} # Iterate over paramaters", "the structure returned in the declaration above. The leading integer value determines if", "None # Brief description to display in the help menu brief_description = None", "\\ f'password -- not: {call_params}') @classmethod def get_handle(cls): '''Return a simple string to", "additional.items(): epilog += f'\\n {k}: {v}' epilog += '\\n' if hasattr(cls, 'references'): #", "parameter declared in __init__ and build an interface via argparse. ''' epilog =", "in dest. ''' dest = {} # Iterate over paramaters and values in", "can be used to set static values supporting a brute force module. It's", "hold all of the necessary argument # to initialize the brute force module.", "# Iterate over paramaters and values in the function's # signature for k,v", "shown in the interface description = None @classmethod def initialize(cls, args): '''Initialize and", "call_params = call_params[1:3] # Ensure there are two or greater params to be", "src: Source dictionary that will provide values for dest. Returns: A new dictionary", "callbacks. # The __init__ Method This method can be used to set static", "f'password -- not: {call_params}') @classmethod def get_handle(cls): '''Return a simple string to use", "+= f'\\n- {name}' if additional: if not isinstance(additional, dict): raise ValueError( 'additional field", "and update success to True if successful if success: return dict(outcome=1,username=username,password=password) else: return", "f'\\n {k}: {v}' epilog += '\\n' if hasattr(cls, 'references'): # ======================== # FORMAT", "# Brief description to display in the help menu brief_description = None #", "not {type(additional)}') for k,v in additional.items(): epilog += f'\\n {k}: {v}' epilog +=", "ValueError( f'References must be a list, got {type(references)}') for ref in references: epilog", "to ensure BruteLoops can make authentication callbacks. # The __init__ Method This method", "arguments as username, ' \\ f'password -- not: {call_params}') @classmethod def get_handle(cls): '''Return", "f'References must be a list, got {type(references)}') for ref in references: epilog +=", "parameters. ''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls) parser.add_argument('--database', '-db',", "builds the interface subcommands by inspecting the __init__ method while also enforcing restrictions", "====================== # BUILD MODULE ARGUMENTS # ====================== '''Here we create a new argparse", "params to be received assert len(call_params) == 2,('__call__ must receive at ' \\", "brief_description = None # Description of the module that'll be shown in the", "interface description = None @classmethod def initialize(cls, args): '''Initialize and return the underlying", "and call_params[0] == 'self': call_params = call_params[1:3] # Ensure there are two or", "the __call__ method to ensure BruteLoops can make authentication callbacks. # The __init__", "argparse arguments to a dictionary args = vars(args) # Initialize a dictionary to", "must be a list of dictionary ' f'values, not {type(cls.contributors)}') for cont in", "ensure BruteLoops can make authentication callbacks. # The __init__ Method This method can", "serves as a template for brute force modules. It builds the interface subcommands", "call_params and call_params[0] == 'self': call_params = call_params[1:3] # Ensure there are two", "__call__(self, username, password, *args, **kwargs): success = False # Do authentication and update", "for the module that'll be shown in logging name = None # Brief", "subparsers that will receive the subcommand') \\ -> argparse.ArgumentParser: '''Use the inspect module", "receive the subcommand') \\ -> argparse.ArgumentParser: '''Use the inspect module to iterate over", "greater params to be received assert len(call_params) == 2,('__call__ must receive at '", "== 'self': call_params = call_params[1:3] # Ensure there are two or greater params", "Do authentication and update success to True if successful if success: return dict(outcome=1,username=username,password=password)", "all of the necessary argument # to initialize the brute force module. dct", "= cont.get('additional') if not name: raise ValueError( 'contributor records must have a \"name\"", "be callable. ' 'Declare a __call__ method on the module: ' \\ f'{cls.get_handle}')", "name = None # Brief description to display in the help menu brief_description", "the argparse arguments to a dictionary args = vars(args) # Initialize a dictionary", "signature for k,v in inspect.signature(func).parameters.items(): # Skip \"self\" references if k == 'self':", "other wise else: dest[k]=v return dest class Module: '''# Base Module Class This", "success: return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ``` Note the structure returned in the", "ValueError( 'contributor records must have a \"name\" field') epilog += f'\\n- {name}' if", "username, password') # Ensure that the first two are 'username' and 'password' assert", "interface via argparse. ''' epilog = None if hasattr(cls, 'contributors'): # ========================== #", "It builds the interface subcommands by inspecting the __init__ method while also enforcing", "set static values supporting a brute force module. It's useful in situations when", "values from src set in dest. ''' dest = {} # Iterate over", "THE __call__ METHOD # ============================== # Ensure that it's declared assert getattr(cls,'__call__'),('Modules must", "isinstance(additional, dict): raise ValueError( 'additional field of contributor records ' f'must be a", "dictionary with argument values from src set in dest. ''' dest = {}", "in inspect.signature(func).parameters.items(): # Skip \"self\" references if k == 'self': continue # Extract", "# ====================== # BUILD MODULE ARGUMENTS # ====================== '''Here we create a new", "# BUILD MODULE ARGUMENTS # ====================== '''Here we create a new argparse argument", "argparse import inspect import re def bindSignatureArgs(func, src:dict) -> dict: ''' Args: func:", "' f'must be a dict, not {type(additional)}') for k,v in additional.items(): epilog +=", "-> argparse.ArgumentParser: '''Use the inspect module to iterate over each parameter declared in", "BUILD MODULE ARGUMENTS # ====================== '''Here we create a new argparse argument parser", "force module. ''' print('Initializing module!') # Translate the argparse arguments to a dictionary", "of the module that'll be shown in the interface description = None @classmethod", "declared in __init__ and build an interface via argparse. ''' epilog = None", "for the command assoicated with the newly created module. This is how we", "be shown in the interface description = None @classmethod def initialize(cls, args): '''Initialize", "must be dictionaries, ' f'not {type(cont)}') name = cont.get('name') additional = cont.get('additional') if", "for k,v in inspect.signature(func).parameters.items(): # Skip \"self\" references if k == 'self': continue", "len(call_params) == 2,('__call__ must receive at ' \\ 'least two arguments: username, password')", "called for each authentication attempt by BruteLoops and should check the validity of", "Translate the argparse arguments to a dictionary args = vars(args) # Initialize a", "when provided if k in src: dest[k]=src[k] # Use the default value other", "identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers: 'Argparse subparsers that will receive", "in src: dest[k]=src[k] # Use the default value other wise else: dest[k]=v return", "dict): raise ValueError( 'contributor records must be dictionaries, ' f'not {type(cont)}') name =", "the interface subcommands by inspecting the __init__ method while also enforcing restrictions on", "f'\\nReferences:\\n' references = cls.references if not isinstance(references, list): raise ValueError( f'References must be", "0 means failure. ''' # Name for the module that'll be shown in", "at the commandline, along with providing a mechanism to assign values to module", "structure returned in the declaration above. The leading integer value determines if authentication", "epilog += f'\\n- {ref}' # ====================== # BUILD MODULE ARGUMENTS # ====================== '''Here", "password, *args, **kwargs): success = False # Do authentication and update success to", "iterate over each parameter declared in __init__ and build an interface via argparse.", "initialize(cls, args): '''Initialize and return the underlying brute force module. ''' print('Initializing module!')", "attempt by BruteLoops and should check the validity of a username and password.", "targeted. # The __call__ Method This method is called for each authentication attempt", "and values in the function's # signature for k,v in inspect.signature(func).parameters.items(): # Skip", "getattr(cls,'__call__'),('Modules must be callable. ' 'Declare a __call__ method on the module: '", "description to display in the help menu brief_description = None # Description of", "as a template for brute force modules. It builds the interface subcommands by", "True if successful if success: return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ``` Note the", "success to True if successful if success: return dict(outcome=1,username=username,password=password) else: return dict(outcome=0,username=username,password=password, ```", "ARGUMENTS # ====================== '''Here we create a new argparse argument parser for the", ".keys()) if call_params and call_params[0] == 'self': call_params = call_params[1:3] # Ensure there", "__call__ method on the module: ' \\ f'{cls.get_handle}') # Get a list of", "import re def bindSignatureArgs(func, src:dict) -> dict: ''' Args: func: Function/method from which", "f'must be a dict, not {type(additional)}') for k,v in additional.items(): epilog += f'\\n", "got {type(references)}') for ref in references: epilog += f'\\n- {ref}' # ====================== #", "dictionary ' f'values, not {type(cls.contributors)}') for cont in cls.contributors: if not isinstance(cont, dict):", "the module instance = cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return", "via argparse. ''' epilog = None if hasattr(cls, 'contributors'): # ========================== # FORMAT", "we bind the name that the user will refernce at the commandline, along", "determines if authentication was successful, indicating valid credentials: 1 means success, 0 means", "received assert len(call_params) == 2,('__call__ must receive at ' \\ 'least two arguments:", "'''# Base Module Class This class serves as a template for brute force", "arguments: username, password') # Ensure that the first two are 'username' and 'password'", "username, password, *args, **kwargs): success = False # Do authentication and update success", "\\ -> argparse.ArgumentParser: '''Use the inspect module to iterate over each parameter declared", "useful in situations when an upstream server needs to be targeted. # The", "Method This method is called for each authentication attempt by BruteLoops and should", "not name: raise ValueError( 'contributor records must have a \"name\" field') epilog +=", "that'll be shown in the interface description = None @classmethod def initialize(cls, args):", "will receive the subcommand') \\ -> argparse.ArgumentParser: '''Use the inspect module to iterate", "Ensure that the first two are 'username' and 'password' assert ['username','password'] == call_params,('__call__", "subcommands by inspecting the __init__ method while also enforcing restrictions on the __call__", "'''Initialize and return the underlying brute force module. ''' print('Initializing module!') # Translate", "argument parser for the command assoicated with the newly created module. This is", "values supporting a brute force module. It's useful in situations when an upstream", "'least two arguments: username, password') # Ensure that the first two are 'username'", "__init__ method while also enforcing restrictions on the __call__ method to ensure BruteLoops", "string to use as a module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls,", "successful, indicating valid credentials: 1 means success, 0 means failure. ''' # Name", "raise ValueError( 'Module contributors must be a list of dictionary ' f'values, not", "a list, got {type(references)}') for ref in references: epilog += f'\\n- {ref}' #", "src: dest[k]=src[k] # Use the default value other wise else: dest[k]=v return dest", "'.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers: 'Argparse subparsers that will receive the subcommand') \\", "# signature for k,v in inspect.signature(func).parameters.items(): # Skip \"self\" references if k ==", "new dictionary with argument values from src set in dest. ''' dest =", "method can be used to set static values supporting a brute force module.", "means success, 0 means failure. ''' # Name for the module that'll be", "make authentication callbacks. # The __init__ Method This method can be used to", "dictionary to hold all of the necessary argument # to initialize the brute", "of dictionary ' f'values, not {type(cls.contributors)}') for cont in cls.contributors: if not isinstance(cont,", "{name}' if additional: if not isinstance(additional, dict): raise ValueError( 'additional field of contributor", "= vars(args) # Initialize a dictionary to hold all of the necessary argument", "if hasattr(cls, 'references'): # ======================== # FORMAT MODULE REFERENCES # ======================== epilog +=", "be targeted. # The __call__ Method This method is called for each authentication", "module. This is how we bind the name that the user will refernce", "look like: ``` def __call__(self, username, password, *args, **kwargs): success = False #", "f'{cls.get_handle}') # Get a list of parameter names call_params = list(inspect.signature(cls.__call__).parameters \\ .keys())", "contributor records ' f'must be a dict, not {type(additional)}') for k,v in additional.items():", "# ======================== # FORMAT MODULE REFERENCES # ======================== epilog += f'\\nReferences:\\n' references =", "with providing a mechanism to assign values to module parameters. ''' parser =", "BruteLoops and should check the validity of a username and password. The method", "'contributor records must have a \"name\" field') epilog += f'\\n- {name}' if additional:", "'contributor records must be dictionaries, ' f'not {type(cont)}') name = cont.get('name') additional =", "''' epilog = None if hasattr(cls, 'contributors'): # ========================== # FORMAT MODULE CONTRIBUTORS", "we create a new argparse argument parser for the command assoicated with the", "1 means success, 0 means failure. ''' # Name for the module that'll", "means failure. ''' # Name for the module that'll be shown in logging", "# ============================== # VALIDATING THE __call__ METHOD # ============================== # Ensure that it's", "@classmethod def validate(cls): # ============================== # VALIDATING THE __call__ METHOD # ============================== #", "static values supporting a brute force module. It's useful in situations when an", "description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls) parser.add_argument('--database', '-db', required=True, help='Database to target.') return", "signature must look like: ``` def __call__(self, username, password, *args, **kwargs): success =", "have a \"name\" field') epilog += f'\\n- {name}' if additional: if not isinstance(additional,", "list): raise ValueError( 'Module contributors must be a list of dictionary ' f'values,", "name that the user will refernce at the commandline, along with providing a", "'additional field of contributor records ' f'must be a dict, not {type(additional)}') for", "{type(cont)}') name = cont.get('name') additional = cont.get('additional') if not name: raise ValueError( 'contributor", "the necessary argument # to initialize the brute force module. dct = bindSignatureArgs(func=cls.__init__,", "the command assoicated with the newly created module. This is how we bind", "and return the module instance = cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__,", "default value other wise else: dest[k]=v return dest class Module: '''# Base Module", "MODULE REFERENCES # ======================== epilog += f'\\nReferences:\\n' references = cls.references if not isinstance(references,", "hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return instance @classmethod def validate(cls): # ==============================", "# Translate the argparse arguments to a dictionary args = vars(args) # Initialize", "raise ValueError( 'contributor records must be dictionaries, ' f'not {type(cont)}') name = cont.get('name')", "assert len(call_params) == 2,('__call__ must receive at ' \\ 'least two arguments: username,", "enforcing restrictions on the __call__ method to ensure BruteLoops can make authentication callbacks.", "epilog += '\\n' if hasattr(cls, 'references'): # ======================== # FORMAT MODULE REFERENCES #", "in situations when an upstream server needs to be targeted. # The __call__", "force module. dct = bindSignatureArgs(func=cls.__init__, src=args) # Initialize and return the module instance", "module to iterate over each parameter declared in __init__ and build an interface", "validate(cls): # ============================== # VALIDATING THE __call__ METHOD # ============================== # Ensure that", "brute force module. ''' print('Initializing module!') # Translate the argparse arguments to a", "cont in cls.contributors: if not isinstance(cont, dict): raise ValueError( 'contributor records must be", "epilog += f'\\n {k}: {v}' epilog += '\\n' if hasattr(cls, 'references'): # ========================", "a new argparse argument parser for the command assoicated with the newly created", "dest[k]=src[k] # Use the default value other wise else: dest[k]=v return dest class", "the user supplied value when provided if k in src: dest[k]=src[k] # Use", "= subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls) parser.add_argument('--database', '-db', required=True, help='Database to", "the declaration above. The leading integer value determines if authentication was successful, indicating", "to assign values to module parameters. ''' parser = subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args,", "method to ensure BruteLoops can make authentication callbacks. # The __init__ Method This", "the brute force module. dct = bindSignatureArgs(func=cls.__init__, src=args) # Initialize and return the", "'must receive the first two arguments as username, ' \\ f'password -- not:", "if additional: if not isinstance(additional, dict): raise ValueError( 'additional field of contributor records", "''' # Name for the module that'll be shown in logging name =", "\"name\" field') epilog += f'\\n- {name}' if additional: if not isinstance(additional, dict): raise", "k,v in additional.items(): epilog += f'\\n {k}: {v}' epilog += '\\n' if hasattr(cls,", "brute force module. dct = bindSignatureArgs(func=cls.__init__, src=args) # Initialize and return the module", "cls.references if not isinstance(references, list): raise ValueError( f'References must be a list, got", "the __init__ method while also enforcing restrictions on the __call__ method to ensure", "============================== # Ensure that it's declared assert getattr(cls,'__call__'),('Modules must be callable. ' 'Declare", "and build an interface via argparse. ''' epilog = None if hasattr(cls, 'contributors'):", "get_handle(cls): '''Return a simple string to use as a module identifier. ''' return", "else: return dict(outcome=0,username=username,password=password, ``` Note the structure returned in the declaration above. The", "validity of a username and password. The method signature must look like: ```", "not isinstance(cont, dict): raise ValueError( 'contributor records must be dictionaries, ' f'not {type(cont)}')", "brute force modules. It builds the interface subcommands by inspecting the __init__ method", "' f'values, not {type(cls.contributors)}') for cont in cls.contributors: if not isinstance(cont, dict): raise", "# Get a list of parameter names call_params = list(inspect.signature(cls.__call__).parameters \\ .keys()) if", "Source dictionary that will provide values for dest. Returns: A new dictionary with", "needs to be targeted. # The __call__ Method This method is called for", "src=args)) return instance @classmethod def validate(cls): # ============================== # VALIDATING THE __call__ METHOD", "simple string to use as a module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def", "supporting a brute force module. It's useful in situations when an upstream server", "dict: ''' Args: func: Function/method from which the signature will be sourced. src:", "if k == 'self': continue # Extract the user supplied value when provided", "# VALIDATING THE __call__ METHOD # ============================== # Ensure that it's declared assert", "call_params[0] == 'self': call_params = call_params[1:3] # Ensure there are two or greater", "+= f'\\n- {ref}' # ====================== # BUILD MODULE ARGUMENTS # ====================== '''Here we", "Module: '''# Base Module Class This class serves as a template for brute", "force module. It's useful in situations when an upstream server needs to be", "not {type(cls.contributors)}') for cont in cls.contributors: if not isinstance(cont, dict): raise ValueError( 'contributor", "call_params[1:3] # Ensure there are two or greater params to be received assert", "that will receive the subcommand') \\ -> argparse.ArgumentParser: '''Use the inspect module to", "Class This class serves as a template for brute force modules. It builds", "additional = cont.get('additional') if not name: raise ValueError( 'contributor records must have a", "method on the module: ' \\ f'{cls.get_handle}') # Get a list of parameter", "\\ f'{cls.get_handle}') # Get a list of parameter names call_params = list(inspect.signature(cls.__call__).parameters \\", "two arguments: username, password') # Ensure that the first two are 'username' and", "raise ValueError( 'additional field of contributor records ' f'must be a dict, not", "epilog += f'\\nReferences:\\n' references = cls.references if not isinstance(references, list): raise ValueError( f'References", "Function/method from which the signature will be sourced. src: Source dictionary that will", "with argument values from src set in dest. ''' dest = {} #", "function's # signature for k,v in inspect.signature(func).parameters.items(): # Skip \"self\" references if k", "success, 0 means failure. ''' # Name for the module that'll be shown", "names call_params = list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params and call_params[0] == 'self': call_params", "module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers: 'Argparse subparsers that will", "'\\n' if hasattr(cls, 'references'): # ======================== # FORMAT MODULE REFERENCES # ======================== epilog", "Args: func: Function/method from which the signature will be sourced. src: Source dictionary", "Base Module Class This class serves as a template for brute force modules.", "credentials: 1 means success, 0 means failure. ''' # Name for the module", "two or greater params to be received assert len(call_params) == 2,('__call__ must receive", "'self': continue # Extract the user supplied value when provided if k in", "BruteLoops can make authentication callbacks. # The __init__ Method This method can be", "created module. This is how we bind the name that the user will", "situations when an upstream server needs to be targeted. # The __call__ Method", "for dest. Returns: A new dictionary with argument values from src set in", "Brief description to display in the help menu brief_description = None # Description", "instance = cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return instance @classmethod", "' \\ f'{cls.get_handle}') # Get a list of parameter names call_params = list(inspect.signature(cls.__call__).parameters", "must be a list, got {type(references)}') for ref in references: epilog += f'\\n-", "subparsers: 'Argparse subparsers that will receive the subcommand') \\ -> argparse.ArgumentParser: '''Use the", "' \\ f'password -- not: {call_params}') @classmethod def get_handle(cls): '''Return a simple string", "# to initialize the brute force module. dct = bindSignatureArgs(func=cls.__init__, src=args) # Initialize", "CONTRIBUTORS # ========================== epilog = 'Contributors:\\n\\n' if not isinstance(cls.contributors, list): raise ValueError( 'Module", "'''Here we create a new argparse argument parser for the command assoicated with", "password') # Ensure that the first two are 'username' and 'password' assert ['username','password']", "should check the validity of a username and password. The method signature must", "a simple string to use as a module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod", "instance @classmethod def validate(cls): # ============================== # VALIDATING THE __call__ METHOD # ==============================", "and password. The method signature must look like: ``` def __call__(self, username, password,", "{v}' epilog += '\\n' if hasattr(cls, 'references'): # ======================== # FORMAT MODULE REFERENCES", "brute force module. It's useful in situations when an upstream server needs to", "module. ''' print('Initializing module!') # Translate the argparse arguments to a dictionary args", "Ensure that it's declared assert getattr(cls,'__call__'),('Modules must be callable. ' 'Declare a __call__", "user supplied value when provided if k in src: dest[k]=src[k] # Use the", "Extract the user supplied value when provided if k in src: dest[k]=src[k] #", "ValueError( 'contributor records must be dictionaries, ' f'not {type(cont)}') name = cont.get('name') additional", "a \"name\" field') epilog += f'\\n- {name}' if additional: if not isinstance(additional, dict):", "first two are 'username' and 'password' assert ['username','password'] == call_params,('__call__ ' \\ 'must", "args): '''Initialize and return the underlying brute force module. ''' print('Initializing module!') #", "the module: ' \\ f'{cls.get_handle}') # Get a list of parameter names call_params", "None # Description of the module that'll be shown in the interface description", "**bindSignatureArgs( func=instance.__post_init__, src=args)) return instance @classmethod def validate(cls): # ============================== # VALIDATING THE", "# ========================== epilog = 'Contributors:\\n\\n' if not isinstance(cls.contributors, list): raise ValueError( 'Module contributors", "the signature will be sourced. src: Source dictionary that will provide values for", "# ======================== epilog += f'\\nReferences:\\n' references = cls.references if not isinstance(references, list): raise", "by BruteLoops and should check the validity of a username and password. The", "the user will refernce at the commandline, along with providing a mechanism to", "Iterate over paramaters and values in the function's # signature for k,v in", "Initialize and return the module instance = cls(**dct) if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs(", "template for brute force modules. It builds the interface subcommands by inspecting the", "can make authentication callbacks. # The __init__ Method This method can be used", "contributors must be a list of dictionary ' f'values, not {type(cls.contributors)}') for cont", "Get a list of parameter names call_params = list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params", "must be callable. ' 'Declare a __call__ method on the module: ' \\", "bindSignatureArgs(func, src:dict) -> dict: ''' Args: func: Function/method from which the signature will", "= None @classmethod def initialize(cls, args): '''Initialize and return the underlying brute force", "leading integer value determines if authentication was successful, indicating valid credentials: 1 means", "in cls.contributors: if not isinstance(cont, dict): raise ValueError( 'contributor records must be dictionaries,", "the first two arguments as username, ' \\ f'password -- not: {call_params}') @classmethod", "values for dest. Returns: A new dictionary with argument values from src set", "modules. It builds the interface subcommands by inspecting the __init__ method while also", "a dictionary to hold all of the necessary argument # to initialize the", "field') epilog += f'\\n- {name}' if additional: if not isinstance(additional, dict): raise ValueError(", "name: raise ValueError( 'contributor records must have a \"name\" field') epilog += f'\\n-", "parameter names call_params = list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params and call_params[0] == 'self':", "indicating valid credentials: 1 means success, 0 means failure. ''' # Name for", "-> dict: ''' Args: func: Function/method from which the signature will be sourced.", "the subcommand') \\ -> argparse.ArgumentParser: '''Use the inspect module to iterate over each", "f'\\n- {ref}' # ====================== # BUILD MODULE ARGUMENTS # ====================== '''Here we create", "instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return instance @classmethod def validate(cls): # ============================== # VALIDATING", "to initialize the brute force module. dct = bindSignatureArgs(func=cls.__init__, src=args) # Initialize and", "if not isinstance(cls.contributors, list): raise ValueError( 'Module contributors must be a list of", "====================== '''Here we create a new argparse argument parser for the command assoicated", "shown in logging name = None # Brief description to display in the", "the interface description = None @classmethod def initialize(cls, args): '''Initialize and return the", "FORMAT MODULE CONTRIBUTORS # ========================== epilog = 'Contributors:\\n\\n' if not isinstance(cls.contributors, list): raise", "dest = {} # Iterate over paramaters and values in the function's #", "# The __call__ Method This method is called for each authentication attempt by", "__call__ Method This method is called for each authentication attempt by BruteLoops and", "-- not: {call_params}') @classmethod def get_handle(cls): '''Return a simple string to use as", "a list of parameter names call_params = list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params and", "returned in the declaration above. The leading integer value determines if authentication was", "func: Function/method from which the signature will be sourced. src: Source dictionary that", "not isinstance(cls.contributors, list): raise ValueError( 'Module contributors must be a list of dictionary", "each authentication attempt by BruteLoops and should check the validity of a username", "of contributor records ' f'must be a dict, not {type(additional)}') for k,v in", "a dict, not {type(additional)}') for k,v in additional.items(): epilog += f'\\n {k}: {v}'", "# FORMAT MODULE REFERENCES # ======================== epilog += f'\\nReferences:\\n' references = cls.references if", "**kwargs): success = False # Do authentication and update success to True if", "wise else: dest[k]=v return dest class Module: '''# Base Module Class This class", "dest. Returns: A new dictionary with argument values from src set in dest.", "for each authentication attempt by BruteLoops and should check the validity of a", "======================== # FORMAT MODULE REFERENCES # ======================== epilog += f'\\nReferences:\\n' references = cls.references", "False # Do authentication and update success to True if successful if success:", "__call__ METHOD # ============================== # Ensure that it's declared assert getattr(cls,'__call__'),('Modules must be", "'''Return a simple string to use as a module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2])", "in references: epilog += f'\\n- {ref}' # ====================== # BUILD MODULE ARGUMENTS #", "refernce at the commandline, along with providing a mechanism to assign values to", "not isinstance(references, list): raise ValueError( f'References must be a list, got {type(references)}') for", "and 'password' assert ['username','password'] == call_params,('__call__ ' \\ 'must receive the first two", "'''Use the inspect module to iterate over each parameter declared in __init__ and", "return the underlying brute force module. ''' print('Initializing module!') # Translate the argparse", "# Name for the module that'll be shown in logging name = None", "Skip \"self\" references if k == 'self': continue # Extract the user supplied", "declaration above. The leading integer value determines if authentication was successful, indicating valid", "display in the help menu brief_description = None # Description of the module", "build_interface(cls, subparsers: 'Argparse subparsers that will receive the subcommand') \\ -> argparse.ArgumentParser: '''Use", "The method signature must look like: ``` def __call__(self, username, password, *args, **kwargs):", "as a module identifier. ''' return '.'.join(cls.__module__.split('.')[-3:][:2]) @classmethod def build_interface(cls, subparsers: 'Argparse subparsers", "inspect module to iterate over each parameter declared in __init__ and build an", "a template for brute force modules. It builds the interface subcommands by inspecting", "for ref in references: epilog += f'\\n- {ref}' # ====================== # BUILD MODULE", "subcommand') \\ -> argparse.ArgumentParser: '''Use the inspect module to iterate over each parameter", "dict, not {type(additional)}') for k,v in additional.items(): epilog += f'\\n {k}: {v}' epilog", "a username and password. The method signature must look like: ``` def __call__(self,", "None if hasattr(cls, 'contributors'): # ========================== # FORMAT MODULE CONTRIBUTORS # ========================== epilog", "'contributors'): # ========================== # FORMAT MODULE CONTRIBUTORS # ========================== epilog = 'Contributors:\\n\\n' if", "if hasattr(instance, '__post_init__'): instance.__post_init__( **bindSignatureArgs( func=instance.__post_init__, src=args)) return instance @classmethod def validate(cls): #", "call_params = list(inspect.signature(cls.__call__).parameters \\ .keys()) if call_params and call_params[0] == 'self': call_params =", "new argparse argument parser for the command assoicated with the newly created module.", "= {} # Iterate over paramaters and values in the function's # signature", "'Declare a __call__ method on the module: ' \\ f'{cls.get_handle}') # Get a", "used to set static values supporting a brute force module. It's useful in", "additional: if not isinstance(additional, dict): raise ValueError( 'additional field of contributor records '", "was successful, indicating valid credentials: 1 means success, 0 means failure. ''' #", "FORMAT MODULE REFERENCES # ======================== epilog += f'\\nReferences:\\n' references = cls.references if not", "field of contributor records ' f'must be a dict, not {type(additional)}') for k,v", "subparsers.add_parser(cls.get_handle(), description=cls.description, help=cls.brief_description, parents=cls.args, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.set_defaults(module=cls) parser.add_argument('--database', '-db', required=True, help='Database to target.')", "= None if hasattr(cls, 'contributors'): # ========================== # FORMAT MODULE CONTRIBUTORS # ==========================", "hasattr(cls, 'contributors'): # ========================== # FORMAT MODULE CONTRIBUTORS # ========================== epilog = 'Contributors:\\n\\n'" ]
[ "\"log=%s\" % f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate() if p.returncode != 0:", "stdout=f, stderr=None, stdin=None) p.communicate() if p.returncode != 0: error(\"SQLLDR exited with code %d\"", "any code except 0, then there was some error if p.returncode != 0:", "args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close() p = Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init()", "stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode != 0: error(\"SQL*Plus exited with code %d\" %", "def adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" % inputscriptfilename],", "raise Exception('sqlldr', \"sqlldr exited with code %d\" % p.returncode) else: debug(\"SQLLDR execution successful\")", "stdin=PIPE) # Send the script to RMAN p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init() debug(\"RMAN", "'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE) # Send the script to", "there was some error if p.returncode != 0: error(\"RMAN execution failed with code", "p.returncode != 0: error(\"SQL*Plus exited with code %d\" % p.returncode) raise Exception('sqlplus', \"sqlplus", "If RMAN exists with any code except 0, then there was some error", "%s\" % self.oraclehome) def _setenv(self): if self.oraclesid is None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID']", "!= 0: error(\"SQLLDR exited with code %d\" % p.returncode) raise Exception('sqlldr', \"sqlldr exited", "self.oraclehome) def _setenv(self): if self.oraclesid is None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid", "tempfile import mkstemp, TemporaryFile class OracleExec(object): oraclehome = None tnspath = None oraclesid", "= None def __init__(self, oraclehome, tnspath, sid=None): self.oraclehome = oraclehome self.tnspath = tnspath", "if silent: f.seek(0,0) return f.read() def sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR execution starts\")", "'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if p.returncode != 0: raise", "Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode != 0: error(\"SQL*Plus exited with", "debug(\"RMAN execution successful\") def sqlplus(self, finalscript, silent=False): self._setenv() with TemporaryFile() as f: args", "code %d\" % p.returncode) raise Exception('sqlplus', \"sqlplus exited with code %d\" % p.returncode)", "p.returncode) raise Exception('sqlldr', \"sqlldr exited with code %d\" % p.returncode) else: debug(\"SQLLDR execution", "silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close() p = Popen(args, stdout=f, stderr=f, stdin=PIPE)", "% self.oraclehome) def _setenv(self): if self.oraclesid is None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if", "successful\") def sqlplus(self, finalscript, silent=False): self._setenv() with TemporaryFile() as f: args = [os.path.join(self.oraclehome,", "= sid debug(\"Oracle home: %s\" % self.oraclehome) def _setenv(self): if self.oraclesid is None", "exited with code %d\" % p.returncode) else: debug(\"RMAN execution successful\") def sqlplus(self, finalscript,", "%d\" % p.returncode) raise Exception('sqlldr', \"sqlldr exited with code %d\" % p.returncode) else:", "Exception('rman', \"RMAN exited with code %d\" % p.returncode) else: debug(\"RMAN execution successful\") def", "code %d\" % p.returncode) raise Exception('sqlldr', \"sqlldr exited with code %d\" % p.returncode)", "error if p.returncode != 0: error(\"RMAN execution failed with code %d\" % p.returncode)", "= Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE) # Send the", "script to RMAN p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init() debug(\"RMAN execution time %s\" %", "from subprocess import Popen, PIPE from backupcommon import BackupLogger, info, debug, error, exception", "stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode != 0: error(\"SQL*Plus exited with code", "tnspath if sid is not None: self.oraclesid = sid debug(\"Oracle home: %s\" %", "finalscript, silent=False): self._setenv() with TemporaryFile() as f: args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if", "os.environ['ORACLE_SID'] if self.oraclesid is not None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT']", "f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate() if p.returncode != 0: error(\"SQLLDR exited", "if p.returncode != 0: error(\"SQLLDR exited with code %d\" % p.returncode) raise Exception('sqlldr',", "os, sys from subprocess import Popen, PIPE from backupcommon import BackupLogger, info, debug,", "sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR execution starts\") f1 = mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0],", "oraclehome = None tnspath = None oraclesid = None def __init__(self, oraclehome, tnspath,", "\"append\"], stdout=f, stderr=f, stdin=PIPE) # Send the script to RMAN p.communicate(input=finalscript) endtime =", "= None oraclesid = None def __init__(self, oraclehome, tnspath, sid=None): self.oraclehome = oraclehome", "Popen, PIPE from backupcommon import BackupLogger, info, debug, error, exception from datetime import", "self.oraclesid is None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid is not None: os.environ['ORACLE_SID']", "self._setenv() debug(\"RMAN execution starts\") BackupLogger.close() starttime = datetime.now() with TemporaryFile() as f: p", "oraclehome self.tnspath = tnspath if sid is not None: self.oraclesid = sid debug(\"Oracle", "[os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close() p =", "self.tnspath def rman(self, finalscript): self._setenv() debug(\"RMAN execution starts\") BackupLogger.close() starttime = datetime.now() with", "outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None)", "inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if p.returncode != 0: raise Exception('adrci','Exit code was", "\"RMAN exited with code %d\" % p.returncode) else: debug(\"RMAN execution successful\") def sqlplus(self,", "0, then there was some error if p.returncode != 0: error(\"RMAN execution failed", "PIPE from backupcommon import BackupLogger, info, debug, error, exception from datetime import datetime,", "oraclesid = None def __init__(self, oraclehome, tnspath, sid=None): self.oraclehome = oraclehome self.tnspath =", "with code %d\" % p.returncode) raise Exception('sqlplus', \"sqlplus exited with code %d\" %", "\"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE) # Send the script to RMAN p.communicate(input=finalscript)", "was some error if p.returncode != 0: error(\"RMAN execution failed with code %d\"", "exited with code %d\" % p.returncode) else: debug(\"SQL*Plus execution successful\") if silent: f.seek(0,0)", "= os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as f:", "'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE) # Send the script to RMAN", "with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f,", "execution starts\") f1 = mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2 =", "then there was some error if p.returncode != 0: error(\"RMAN execution failed with", "debug(\"RMAN execution time %s\" % (endtime-starttime)) # If RMAN exists with any code", "p.returncode) else: debug(\"RMAN execution successful\") def sqlplus(self, finalscript, silent=False): self._setenv() with TemporaryFile() as", "%d\" % p.returncode) raise Exception('rman', \"RMAN exited with code %d\" % p.returncode) else:", "= oraclehome self.tnspath = tnspath if sid is not None: self.oraclesid = sid", "p.returncode) raise Exception('rman', \"RMAN exited with code %d\" % p.returncode) else: debug(\"RMAN execution", "import Popen, PIPE from backupcommon import BackupLogger, info, debug, error, exception from datetime", "p.returncode != 0: error(\"SQLLDR exited with code %d\" % p.returncode) raise Exception('sqlldr', \"sqlldr", "= 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def rman(self, finalscript): self._setenv() debug(\"RMAN execution starts\")", "sid=None): self.oraclehome = oraclehome self.tnspath = tnspath if sid is not None: self.oraclesid", "stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode != 0: error(\"SQL*Plus exited with code %d\"", "home: %s\" % self.oraclehome) def _setenv(self): if self.oraclesid is None and os.environ.get('ORACLE_SID'): del", "\"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if p.returncode != 0: raise Exception('adrci','Exit", "% p.returncode) else: debug(\"RMAN execution successful\") def sqlplus(self, finalscript, silent=False): self._setenv() with TemporaryFile()", "import datetime, timedelta from tempfile import mkstemp, TemporaryFile class OracleExec(object): oraclehome = None", "= tnspath if sid is not None: self.oraclesid = sid debug(\"Oracle home: %s\"", "else: debug(\"RMAN execution successful\") def sqlplus(self, finalscript, silent=False): self._setenv() with TemporaryFile() as f:", "os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as f: p", "p = Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode != 0: error(\"SQL*Plus", "error(\"SQL*Plus exited with code %d\" % p.returncode) raise Exception('sqlplus', \"sqlplus exited with code", "p.communicate(input=finalscript) BackupLogger.init() if p.returncode != 0: error(\"SQL*Plus exited with code %d\" % p.returncode)", "execution starts\") BackupLogger.close() starttime = datetime.now() with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome,", "else: debug(\"SQL*Plus execution successful\") if silent: f.seek(0,0) return f.read() def sqlldr(self, login, finalscript):", "% p.returncode) raise Exception('sqlplus', \"sqlplus exited with code %d\" % p.returncode) else: debug(\"SQL*Plus", "!= 0: error(\"SQL*Plus exited with code %d\" % p.returncode) raise Exception('sqlplus', \"sqlplus exited", "finalscript): self._setenv() debug(\"SQLLDR execution starts\") f1 = mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript)", "del os.environ['ORACLE_SID'] if self.oraclesid is not None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome", "BackupLogger.init() debug(\"RMAN execution time %s\" % (endtime-starttime)) # If RMAN exists with any", "BackupLogger.close() starttime = datetime.now() with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'),", "datetime, timedelta from tempfile import mkstemp, TemporaryFile class OracleExec(object): oraclehome = None tnspath", "= Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\" % f1[1], \"log=%s\" % f2[1], \"errors=0\", \"silent=all\"],", "raise Exception('sqlplus', \"sqlplus exited with code %d\" % p.returncode) else: debug(\"SQL*Plus execution successful\")", "stdout=f, stderr=f, stdin=PIPE) # Send the script to RMAN p.communicate(input=finalscript) endtime = datetime.now()", "% p.returncode) else: debug(\"SQL*Plus execution successful\") if silent: f.seek(0,0) return f.read() def sqlldr(self,", "TemporaryFile() as f: args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus", "datetime import datetime, timedelta from tempfile import mkstemp, TemporaryFile class OracleExec(object): oraclehome =", "Exception('sqlldr', \"sqlldr exited with code %d\" % p.returncode) else: debug(\"SQLLDR execution successful\") os.unlink(f1[1])", "with code %d\" % p.returncode) raise Exception('rman', \"RMAN exited with code %d\" %", "raise Exception('rman', \"RMAN exited with code %d\" % p.returncode) else: debug(\"RMAN execution successful\")", "hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def rman(self, finalscript): self._setenv() debug(\"RMAN execution starts\") BackupLogger.close() starttime", "rman(self, finalscript): self._setenv() debug(\"RMAN execution starts\") BackupLogger.close() starttime = datetime.now() with TemporaryFile() as", "code %d\" % p.returncode) else: debug(\"SQL*Plus execution successful\") if silent: f.seek(0,0) return f.read()", "stdin=None) p.communicate() if p.returncode != 0: error(\"SQLLDR exited with code %d\" % p.returncode)", "0: error(\"RMAN execution failed with code %d\" % p.returncode) raise Exception('rman', \"RMAN exited", "'sqlldr'), login, \"control=%s\" % f1[1], \"log=%s\" % f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None)", "backupcommon import BackupLogger, info, debug, error, exception from datetime import datetime, timedelta from", "= self.tnspath def rman(self, finalscript): self._setenv() debug(\"RMAN execution starts\") BackupLogger.close() starttime = datetime.now()", "def rman(self, finalscript): self._setenv() debug(\"RMAN execution starts\") BackupLogger.close() starttime = datetime.now() with TemporaryFile()", "if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close() p = Popen(args, stdout=f, stderr=f,", "debug(\"SQLLDR execution starts\") f1 = mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2", "stderr=f, stdin=PIPE) # Send the script to RMAN p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init()", "starts\") BackupLogger.close() starttime = datetime.now() with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin',", "= mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with", "!= 0: error(\"RMAN execution failed with code %d\" % p.returncode) raise Exception('rman', \"RMAN", "adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle,", "exited with code %d\" % p.returncode) raise Exception('sqlplus', \"sqlplus exited with code %d\"", "code %d\" % p.returncode) else: debug(\"RMAN execution successful\") def sqlplus(self, finalscript, silent=False): self._setenv()", "self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def rman(self,", "stderr=None, stdin=None) p.wait() if p.returncode != 0: raise Exception('adrci','Exit code was not 0.')", "def sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR execution starts\") f1 = mkstemp(suffix=\".ctl\") ftmp =", "oraclehome, tnspath, sid=None): self.oraclehome = oraclehome self.tnspath = tnspath if sid is not", "exists with any code except 0, then there was some error if p.returncode", "None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid is not None: os.environ['ORACLE_SID'] = self.oraclesid", "= datetime.now() with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile,", "\"w\") ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as f: p =", "# If RMAN exists with any code except 0, then there was some", "debug(\"RMAN execution starts\") BackupLogger.close() starttime = datetime.now() with TemporaryFile() as f: p =", "exited with code %d\" % p.returncode) else: debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def", "None tnspath = None oraclesid = None def __init__(self, oraclehome, tnspath, sid=None): self.oraclehome", "with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\" % f1[1],", "datetime.now() with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"],", "BackupLogger.init() if p.returncode != 0: error(\"SQL*Plus exited with code %d\" % p.returncode) raise", "if p.returncode != 0: error(\"RMAN execution failed with code %d\" % p.returncode) raise", "from datetime import datetime, timedelta from tempfile import mkstemp, TemporaryFile class OracleExec(object): oraclehome", "subprocess import Popen, PIPE from backupcommon import BackupLogger, info, debug, error, exception from", "self._setenv() debug(\"SQLLDR execution starts\") f1 = mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close()", "stderr=None, stdin=None) p.communicate() if p.returncode != 0: error(\"SQLLDR exited with code %d\" %", "% inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if p.returncode != 0: raise Exception('adrci','Exit code", "debug(\"SQL*Plus execution starts\") BackupLogger.close() p = Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if", "some error if p.returncode != 0: error(\"RMAN execution failed with code %d\" %", "%d\" % p.returncode) else: debug(\"RMAN execution successful\") def sqlplus(self, finalscript, silent=False): self._setenv() with", "(endtime-starttime)) # If RMAN exists with any code except 0, then there was", "os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath", "RMAN exists with any code except 0, then there was some error if", "not None: self.oraclesid = sid debug(\"Oracle home: %s\" % self.oraclehome) def _setenv(self): if", "% p.returncode) raise Exception('rman', \"RMAN exited with code %d\" % p.returncode) else: debug(\"RMAN", "execution successful\") if silent: f.seek(0,0) return f.read() def sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR", "execution time %s\" % (endtime-starttime)) # If RMAN exists with any code except", "p.returncode) else: debug(\"SQL*Plus execution successful\") if silent: f.seek(0,0) return f.read() def sqlldr(self, login,", "debug(\"SQL*Plus execution successful\") if silent: f.seek(0,0) return f.read() def sqlldr(self, login, finalscript): self._setenv()", "exited with code %d\" % p.returncode) raise Exception('sqlldr', \"sqlldr exited with code %d\"", "f: p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE) #", "timedelta from tempfile import mkstemp, TemporaryFile class OracleExec(object): oraclehome = None tnspath =", "with code %d\" % p.returncode) else: debug(\"SQL*Plus execution successful\") if silent: f.seek(0,0) return", "self.oraclehome = oraclehome self.tnspath = tnspath if sid is not None: self.oraclesid =", "os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" %", "except 0, then there was some error if p.returncode != 0: error(\"RMAN execution", "stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if p.returncode != 0: raise Exception('adrci','Exit code was not", "sqlplus(self, finalscript, silent=False): self._setenv() with TemporaryFile() as f: args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')]", "args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close()", "silent: f.seek(0,0) return f.read() def sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR execution starts\") f1", "if self.oraclesid is None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid is not None:", "= self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def", "starttime = datetime.now() with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\",", "RMAN p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init() debug(\"RMAN execution time %s\" % (endtime-starttime)) #", "'bin', 'sqlplus')] if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close() p = Popen(args,", "with any code except 0, then there was some error if p.returncode !=", "import mkstemp, TemporaryFile class OracleExec(object): oraclehome = None tnspath = None oraclesid =", "with TemporaryFile() as f: args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent: args.append('-S') args.append('/nolog')", "sid is not None: self.oraclesid = sid debug(\"Oracle home: %s\" % self.oraclehome) def", "= self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def rman(self, finalscript): self._setenv()", "\"sqlldr exited with code %d\" % p.returncode) else: debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1])", "\"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate() if p.returncode != 0: error(\"SQLLDR exited with code", "os.environ['TNS_ADMIN'] = self.tnspath def rman(self, finalscript): self._setenv() debug(\"RMAN execution starts\") BackupLogger.close() starttime =", "args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close() p = Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript)", "p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if", "Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if p.returncode !=", "% p.returncode) else: debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle): self._setenv()", "successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'),", "as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE)", "TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f,", "as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\" % f1[1], \"log=%s\" %", "error, exception from datetime import datetime, timedelta from tempfile import mkstemp, TemporaryFile class", "os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def rman(self, finalscript):", "ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin',", "Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\" % f1[1], \"log=%s\" % f2[1], \"errors=0\", \"silent=all\"], stdout=f,", "p.returncode) raise Exception('sqlplus', \"sqlplus exited with code %d\" % p.returncode) else: debug(\"SQL*Plus execution", "OracleExec(object): oraclehome = None tnspath = None oraclesid = None def __init__(self, oraclehome,", "import os, sys from subprocess import Popen, PIPE from backupcommon import BackupLogger, info,", "BackupLogger, info, debug, error, exception from datetime import datetime, timedelta from tempfile import", "self._setenv() with TemporaryFile() as f: args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent: args.append('-S')", "os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def rman(self, finalscript): self._setenv() debug(\"RMAN execution", "%d\" % p.returncode) else: debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle):", "self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def rman(self, finalscript): self._setenv() debug(\"RMAN", "endtime = datetime.now() BackupLogger.init() debug(\"RMAN execution time %s\" % (endtime-starttime)) # If RMAN", "None def __init__(self, oraclehome, tnspath, sid=None): self.oraclehome = oraclehome self.tnspath = tnspath if", "tnspath, sid=None): self.oraclehome = oraclehome self.tnspath = tnspath if sid is not None:", "exception from datetime import datetime, timedelta from tempfile import mkstemp, TemporaryFile class OracleExec(object):", "'sqlplus')] if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close() p = Popen(args, stdout=f,", "self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait()", "p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\" % f1[1], \"log=%s\" % f2[1], \"errors=0\",", "silent=False): self._setenv() with TemporaryFile() as f: args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent:", "from tempfile import mkstemp, TemporaryFile class OracleExec(object): oraclehome = None tnspath = None", "= datetime.now() BackupLogger.init() debug(\"RMAN execution time %s\" % (endtime-starttime)) # If RMAN exists", "mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile()", "code %d\" % p.returncode) raise Exception('rman', \"RMAN exited with code %d\" % p.returncode)", "None oraclesid = None def __init__(self, oraclehome, tnspath, sid=None): self.oraclehome = oraclehome self.tnspath", "self.oraclesid = sid debug(\"Oracle home: %s\" % self.oraclehome) def _setenv(self): if self.oraclesid is", "f1 = mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0])", "sys from subprocess import Popen, PIPE from backupcommon import BackupLogger, info, debug, error,", "os.close(f2[0]) with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\" %", "os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\"", "tnspath = None oraclesid = None def __init__(self, oraclehome, tnspath, sid=None): self.oraclehome =", "time %s\" % (endtime-starttime)) # If RMAN exists with any code except 0,", "starts\") BackupLogger.close() p = Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode !=", "to RMAN p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init() debug(\"RMAN execution time %s\" % (endtime-starttime))", "p.returncode != 0: error(\"RMAN execution failed with code %d\" % p.returncode) raise Exception('rman',", "execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin',", "f.read() def sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR execution starts\") f1 = mkstemp(suffix=\".ctl\") ftmp", "= [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\") BackupLogger.close() p", "self.tnspath = tnspath if sid is not None: self.oraclesid = sid debug(\"Oracle home:", "login, finalscript): self._setenv() debug(\"SQLLDR execution starts\") f1 = mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\")", "p.communicate() if p.returncode != 0: error(\"SQLLDR exited with code %d\" % p.returncode) raise", "self.oraclesid is not None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd", "with code %d\" % p.returncode) else: debug(\"RMAN execution successful\") def sqlplus(self, finalscript, silent=False):", "mkstemp, TemporaryFile class OracleExec(object): oraclehome = None tnspath = None oraclesid = None", "successful\") if silent: f.seek(0,0) return f.read() def sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR execution", "f1[1], \"log=%s\" % f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate() if p.returncode !=", "debug, error, exception from datetime import datetime, timedelta from tempfile import mkstemp, TemporaryFile", "datetime.now() BackupLogger.init() debug(\"RMAN execution time %s\" % (endtime-starttime)) # If RMAN exists with", "__init__(self, oraclehome, tnspath, sid=None): self.oraclehome = oraclehome self.tnspath = tnspath if sid is", "execution failed with code %d\" % p.returncode) raise Exception('rman', \"RMAN exited with code", "%d\" % p.returncode) raise Exception('sqlplus', \"sqlplus exited with code %d\" % p.returncode) else:", "as f: args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution", "%d\" % p.returncode) else: debug(\"SQL*Plus execution successful\") if silent: f.seek(0,0) return f.read() def", "None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] =", "the script to RMAN p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init() debug(\"RMAN execution time %s\"", "= mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login,", "from backupcommon import BackupLogger, info, debug, error, exception from datetime import datetime, timedelta", "_setenv(self): if self.oraclesid is None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid is not", "'bin', 'sqlldr'), login, \"control=%s\" % f1[1], \"log=%s\" % f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None,", "= Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode != 0: error(\"SQL*Plus exited", "% f1[1], \"log=%s\" % f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate() if p.returncode", "f.seek(0,0) return f.read() def sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR execution starts\") f1 =", "p.returncode) else: debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p", "sid debug(\"Oracle home: %s\" % self.oraclehome) def _setenv(self): if self.oraclesid is None and", "f: p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\" % f1[1], \"log=%s\" % f2[1],", "execution successful\") def sqlplus(self, finalscript, silent=False): self._setenv() with TemporaryFile() as f: args =", "ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome,", "is not None: self.oraclesid = sid debug(\"Oracle home: %s\" % self.oraclehome) def _setenv(self):", "finalscript): self._setenv() debug(\"RMAN execution starts\") BackupLogger.close() starttime = datetime.now() with TemporaryFile() as f:", "is None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid is not None: os.environ['ORACLE_SID'] =", "def _setenv(self): if self.oraclesid is None and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid is", "with code %d\" % p.returncode) raise Exception('sqlldr', \"sqlldr exited with code %d\" %", "login, \"control=%s\" % f1[1], \"log=%s\" % f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate()", "class OracleExec(object): oraclehome = None tnspath = None oraclesid = None def __init__(self,", "def __init__(self, oraclehome, tnspath, sid=None): self.oraclehome = oraclehome self.tnspath = tnspath if sid", "error(\"RMAN execution failed with code %d\" % p.returncode) raise Exception('rman', \"RMAN exited with", "Exception('sqlplus', \"sqlplus exited with code %d\" % p.returncode) else: debug(\"SQL*Plus execution successful\") if", "mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\"", "return f.read() def sqlldr(self, login, finalscript): self._setenv() debug(\"SQLLDR execution starts\") f1 = mkstemp(suffix=\".ctl\")", "inputscriptfilename, outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None,", "= None tnspath = None oraclesid = None def __init__(self, oraclehome, tnspath, sid=None):", "import BackupLogger, info, debug, error, exception from datetime import datetime, timedelta from tempfile", "\"sqlplus exited with code %d\" % p.returncode) else: debug(\"SQL*Plus execution successful\") if silent:", "Send the script to RMAN p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init() debug(\"RMAN execution time", "if p.returncode != 0: error(\"SQL*Plus exited with code %d\" % p.returncode) raise Exception('sqlplus',", "with code %d\" % p.returncode) else: debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self,", "code %d\" % p.returncode) else: debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename,", "% p.returncode) raise Exception('sqlldr', \"sqlldr exited with code %d\" % p.returncode) else: debug(\"SQLLDR", "= Popen([os.path.join(self.oraclehome, 'bin', 'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if p.returncode", "0: error(\"SQLLDR exited with code %d\" % p.returncode) raise Exception('sqlldr', \"sqlldr exited with", "None: self.oraclesid = sid debug(\"Oracle home: %s\" % self.oraclehome) def _setenv(self): if self.oraclesid", "'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN'] = self.tnspath def rman(self, finalscript): self._setenv() debug(\"RMAN execution starts\") BackupLogger.close()", "f: args = [os.path.join(self.oraclehome, 'bin', 'sqlplus')] if silent: args.append('-S') args.append('/nolog') debug(\"SQL*Plus execution starts\")", "is not None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss'", "and os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid is not None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME']", "% f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate() if p.returncode != 0: error(\"SQLLDR", "p = Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE) # Send", "error(\"SQLLDR exited with code %d\" % p.returncode) raise Exception('sqlldr', \"sqlldr exited with code", "debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p = Popen([os.path.join(self.oraclehome,", "else: debug(\"SQLLDR execution successful\") os.unlink(f1[1]) os.unlink(f2[1]) def adrci(self, inputscriptfilename, outputfilehandle): self._setenv() p =", "code except 0, then there was some error if p.returncode != 0: error(\"RMAN", "\"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate() if p.returncode != 0: error(\"SQLLDR exited with", "starts\") f1 = mkstemp(suffix=\".ctl\") ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\")", "%s\" % (endtime-starttime)) # If RMAN exists with any code except 0, then", "Popen([os.path.join(self.oraclehome, 'bin', 'rman'), \"log\", BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE) # Send the script", "'bin', 'adrci'), \"script=%s\" % inputscriptfilename], stdout=outputfilehandle, stderr=None, stdin=None) p.wait() if p.returncode != 0:", "def sqlplus(self, finalscript, silent=False): self._setenv() with TemporaryFile() as f: args = [os.path.join(self.oraclehome, 'bin',", "f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'),", "debug(\"Oracle home: %s\" % self.oraclehome) def _setenv(self): if self.oraclesid is None and os.environ.get('ORACLE_SID'):", "failed with code %d\" % p.returncode) raise Exception('rman', \"RMAN exited with code %d\"", "ftmp = os.fdopen(f1[0], \"w\") ftmp.write(finalscript) ftmp.close() f2 = mkstemp(suffix=\".log\") os.close(f2[0]) with TemporaryFile() as", "TemporaryFile class OracleExec(object): oraclehome = None tnspath = None oraclesid = None def", "os.environ.get('ORACLE_SID'): del os.environ['ORACLE_SID'] if self.oraclesid is not None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] =", "if sid is not None: self.oraclesid = sid debug(\"Oracle home: %s\" % self.oraclehome)", "info, debug, error, exception from datetime import datetime, timedelta from tempfile import mkstemp,", "BackupLogger.logfile, \"append\"], stdout=f, stderr=f, stdin=PIPE) # Send the script to RMAN p.communicate(input=finalscript) endtime", "% (endtime-starttime)) # If RMAN exists with any code except 0, then there", "p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init() debug(\"RMAN execution time %s\" % (endtime-starttime)) # If", "TemporaryFile() as f: p = Popen([os.path.join(self.oraclehome, 'bin', 'sqlldr'), login, \"control=%s\" % f1[1], \"log=%s\"", "\"control=%s\" % f1[1], \"log=%s\" % f2[1], \"errors=0\", \"silent=all\"], stdout=f, stderr=None, stdin=None) p.communicate() if", "BackupLogger.close() p = Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode != 0:", "execution starts\") BackupLogger.close() p = Popen(args, stdout=f, stderr=f, stdin=PIPE) p.communicate(input=finalscript) BackupLogger.init() if p.returncode", "0: error(\"SQL*Plus exited with code %d\" % p.returncode) raise Exception('sqlplus', \"sqlplus exited with", "not None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] = 'yyyy-mm-dd hh24:mi:ss' os.environ['TNS_ADMIN']", "# Send the script to RMAN p.communicate(input=finalscript) endtime = datetime.now() BackupLogger.init() debug(\"RMAN execution", "if self.oraclesid is not None: os.environ['ORACLE_SID'] = self.oraclesid os.environ['ORACLE_HOME'] = self.oraclehome os.environ['NLS_DATE_FORMAT'] =" ]
[ "4 self.eval_metric = 'auc' self.num_round = 10 self.verbose = 1 self.help = {", "self.max_depth = 2 self.eta = 1 self.objective = 'binary:logistic' self.nthread = 4 self.eval_metric", "self.eval_metric = 'auc' self.num_round = 10 self.verbose = 1 self.help = { }", "XGB: class Params: def __init__(self): self.max_depth = 2 self.eta = 1 self.objective =", "__init__(self): self.max_depth = 2 self.eta = 1 self.objective = 'binary:logistic' self.nthread = 4", "Params: def __init__(self): self.max_depth = 2 self.eta = 1 self.objective = 'binary:logistic' self.nthread", "= 2 self.eta = 1 self.objective = 'binary:logistic' self.nthread = 4 self.eval_metric =", "= 1 self.objective = 'binary:logistic' self.nthread = 4 self.eval_metric = 'auc' self.num_round =", "1 self.objective = 'binary:logistic' self.nthread = 4 self.eval_metric = 'auc' self.num_round = 10", "def __init__(self): self.max_depth = 2 self.eta = 1 self.objective = 'binary:logistic' self.nthread =", "= 'binary:logistic' self.nthread = 4 self.eval_metric = 'auc' self.num_round = 10 self.verbose =", "class Params: def __init__(self): self.max_depth = 2 self.eta = 1 self.objective = 'binary:logistic'", "self.objective = 'binary:logistic' self.nthread = 4 self.eval_metric = 'auc' self.num_round = 10 self.verbose", "= 4 self.eval_metric = 'auc' self.num_round = 10 self.verbose = 1 self.help =", "'binary:logistic' self.nthread = 4 self.eval_metric = 'auc' self.num_round = 10 self.verbose = 1", "self.nthread = 4 self.eval_metric = 'auc' self.num_round = 10 self.verbose = 1 self.help", "2 self.eta = 1 self.objective = 'binary:logistic' self.nthread = 4 self.eval_metric = 'auc'", "self.eta = 1 self.objective = 'binary:logistic' self.nthread = 4 self.eval_metric = 'auc' self.num_round", "class XGB: class Params: def __init__(self): self.max_depth = 2 self.eta = 1 self.objective" ]
[ "hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game, winner) if dealer == 0: print('{} is the", "is 'posix') else: _ = system('clear') def create_deck(): deck = [] map =", "if got_wish and not game_over and not player.Computer: player.print_hand() return game_over def request_wish(player,", "players[1].Sets else: opp_hand = players[0].Hand opp_sets = players[0].Sets display_current_status(players, card_deck) player = players[current_player]", "0 most_cards = -1 while most_cards == -1: for denom, count in player.HandCounts.items():", "wish? ') if wish.upper() not in card_map.values(): print('You must wish for a valid", "{} while got_wish and not game_over: if player.Computer: player.hand_counts() wish = generate_wish(player, opp_sets,", "new_game or winner == 2: r = randint(1, 100) if r <= 50:", "else: if winner == 0: dealer = 1 else: dealer = 0 return", "= True game_over = False card_map_keys = list(card_map.keys()) card_map_values = list(card_map.values()) if current_player", "list(card_map.values()) if current_player == 0: opp_hand = players[1].Hand opp_sets = players[1].Sets else: opp_hand", "sets1 = len(players[1].Sets) if sets0 > sets1: print('{} wins, {} sets to {}'.format(players[0].Name,", "not game_over and not player.Computer: player.print_hand() return game_over def request_wish(player, card_map, card_map_values): while", "most_cards = -1 while most_cards == -1: for denom, count in player.HandCounts.items(): if", "opp_hand.keys(): print('Fish, fish, you got your wish!') opp_cards = opp_hand.pop(wish) if wish in", "while True: wait = input('Enter an R to return to the game .", "self.Hand.keys(): for card in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return True else: print('Nope!", "wish) return got_wish def fish(self, card_deck, wish): draw_card = card_deck.pop() if draw_card[0] in", "else: print('{}\\'s hand: '.format(self.Name), end=' ') for denom in sorted(self.Hand): for suit in", "self.Hand.update({wish: opp_cards}) return True else: print('Nope! Go fish.') sleep(2) got_wish = self.fish(card_deck, wish)", "'5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] #suits = [hearts, diamonds,", "players, False, winner) return dealer, card_deck, card_map def display_current_status(players, card_deck): break_line = '\\u274C'", "return value_position def generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count = 0 most_cards = -1", "card value in your hand.') else: break return value_position def generate_wish(player, opp_sets, card_map):", "dealer. {} goes first. Dealing the hands . . .'.format(players[1].Name, players[0].Name)) sleep(2) return", "else: value_position = card_map_values.index(wish.upper()) if value_position not in player.Hand.keys(): print('You must wish for", "= computer self.new_hand() def new_hand(self): self.Hand = {} self.HandCounts = {} self.Sets =", "card_deck.pop() if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] == wish:", "True: wish = input('What is your wish? ') if wish.upper() not in card_map.values():", "name self.Computer = computer self.new_hand() def new_hand(self): self.Hand = {} self.HandCounts = {}", "= ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q',", "False, winner) return dealer, card_deck, card_map def display_current_status(players, card_deck): break_line = '\\u274C' *", "1 else: dealer = 0 return dealer def deal_hand(deck, players, new_game=True, winner=None): hands", "= 0 while not game_over: game_over = play_hand(players, current_player, card_deck, card_map) if players[current_player].Computer:", "most_cards == -1: player.Wishes = [] player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards) print('{} is", "Go Fish game \"\"\" from random import (shuffle, randint) from time import sleep", "generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count = 0 most_cards = -1 while most_cards ==", "in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()): self.Wishes = []", "= play_hand(players, current_player, card_deck, card_map) if players[current_player].Computer: while True: wait = input('Enter a", "wait = input('Enter an R to return to the game . . .')", "= card if new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) == 0: return", "if winner == 0: dealer = 1 else: dealer = 0 return dealer", "check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()): self.Wishes = [] elif len(self.Wishes) == 5: self.Wishes.pop(0)", "and (denom not in opp_sets) and (denom not in player.Sets): most_cards = denom", "else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] == wish: print('Fish, fish, you got your wish!')", "else: print('Booooo, you didn\\'t get your wish.') sleep(3) return False def lay_set(self): new_set", "dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False, winner) return dealer, card_deck, card_map def", "wish = input('What is your wish? ') if wish.upper() not in card_map.values(): print('You", ".') if wait.upper() == 'R': clear_screen() break def main(): global card_deck, card_map clear_screen()", "for card in range(10): for hand in range(2): top_card = deck.pop() if top_card[0]", "= player.lay_set() if got_wish and not game_over and not player.Computer: player.print_hand() return game_over", "return dealer, hands[0], hands[1] def start_new_game(): while(True): player_name = '--help' while player_name.upper() ==", "wish.') sleep(3) return False def lay_set(self): new_set = -1 for card, suits in", "else: dealer = 0 return dealer def deal_hand(deck, players, new_game=True, winner=None): hands =", "Is that correct (y/n)? '.format(player_name)) if yn.upper() == 'Y': print('OK, then let\\'s play!')", "+ break_line) def determine_winner(players, card_deck): display_current_status(players, card_deck) sets0 = len(players[0].Sets) sets1 = len(players[1].Sets)", "card_deck) sets0 = len(players[0].Sets) sets1 = len(players[1].Sets) if sets0 > sets1: print('{} wins,", "and not game_over and not player.Computer: player.print_hand() return game_over def request_wish(player, card_map, card_map_values):", "suit in suits: deck.append([card, suit]) shuffle(deck) return deck, map def choose_dealer(new_game, winner): if", "def deal_hand(deck, players, new_game=True, winner=None): hands = [] hands.append({}) hands.append({}) for card in", "'\\u2666', '\\u2660', '\\u2663'] for card in range(13): map.update({card: card_denominations[card]}) for suit in suits:", "= players[1].Hand opp_sets = players[1].Sets else: opp_hand = players[0].Hand opp_sets = players[0].Sets display_current_status(players,", "len(card_deck) clear_screen() print(break_line + '\\n') players[1].print_hand() print() print('Draw pile: {} cards'.format(cards_left)) print() players[0].print_hand()", "'r') as rules: rules_text = rules.read() print(rules_text) while True: wait = input('Enter an", "True game_over = False card_map_keys = list(card_map.keys()) card_map_values = list(card_map.values()) if current_player ==", "denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()): self.Wishes =", "rules_text = rules.read() print(rules_text) while True: wait = input('Enter an R to return", "= input('What is your wish? ') if wish.upper() not in card_map.values(): print('You must", "for suit in suits: deck.append([card, suit]) shuffle(deck) return deck, map def choose_dealer(new_game, winner):", "play = False else: dealer, card_deck, card_map = play_again(players, winner) print('Goodbye!') if __name__", "= False): self.Name = name self.Computer = computer self.new_hand() def new_hand(self): self.Hand =", "break return value_position def generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count = 0 most_cards =", "') if player_name.upper() == '--HELP': print_rules() yn = input('You\\'ve chosen {} for your", "players[1].new_hand() card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False, winner)", "for suit in self.Hand[denom]: if self.Computer: print('\\u2733', end=' ') else: print(card_map[denom] + suit,", "== 'C': break if current_player == 0: current_player = 1 else: current_player =", "players[0].Sets display_current_status(players, card_deck) player = players[current_player] print('Your turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts =", "in your hand.') else: break return value_position def generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count", "-1 for card, suits in self.Hand.items(): if len(suits) == 4: new_set = card", "= [] def hand_counts(self): for denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if", "yn = input('You\\'ve chosen {} for your name. Is that correct (y/n)? '.format(player_name))", "Create a Go Fish game \"\"\" from random import (shuffle, randint) from time", "0 while not game_over: game_over = play_hand(players, current_player, card_deck, card_map) if players[current_player].Computer: while", "in opp_hand.keys(): print('Fish, fish, you got your wish!') opp_cards = opp_hand.pop(wish) if wish", "= players[current_player] print('Your turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts = {} while got_wish and", "to JFL Go Fish! This is a one-player game against the Computer.') players,", "you got your wish!') opp_cards = opp_hand.pop(wish) if wish in self.Hand.keys(): for card", "= ['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for card in range(13): map.update({card: card_denominations[card]}) for suit", ". .'.format(players[0].Name, players[1].Name)) sleep(2) return dealer, hands[1], hands[0] else: print('{} is the dealer.", "player_name.upper() == '--HELP': print_rules() yn = input('You\\'ve chosen {} for your name. Is", "= name self.Computer = computer self.new_hand() def new_hand(self): self.Hand = {} self.HandCounts =", "player_name = '--help' while player_name.upper() == '--HELP': player_name = input('Please enter your name", "0: opp_hand = players[1].Hand opp_sets = players[1].Sets else: opp_hand = players[0].Hand opp_sets =", "player_name = input('Please enter your name (or --help for the rules): ') if", "else: print('Oops! Try again.') players = [Player(player_name), Player('Computer', True)] card_deck, card_map = create_deck()", "Dealing the hands . . .'.format(players[1].Name, players[0].Name)) sleep(2) return dealer, hands[0], hands[1] def", "0: current_player = 1 else: current_player = 0 return determine_winner(players, card_deck) def play_hand(players,", "print('{} is the dealer. {} goes first. Dealing the hands . . .'.format(players[1].Name,", "display_current_status(players, card_deck) player = players[current_player] print('Your turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts = {}", "if wish.upper() not in card_map.values(): print('You must wish for a valid card value", "'Q', 'K'] #suits = [hearts, diamonds, spades, clubs] suits = ['\\u2665', '\\u2666', '\\u2660',", "winner): players[0].new_hand() players[1].new_hand() card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players,", "= randint(1, 100) if r <= 50: dealer = 0 else: dealer =", "wishing for a {}.'.format(player.Name, card_map[most_cards])) return most_cards def print_rules(): clear_screen() with open('data/rules.txt', 'r')", "if wait.upper() == 'R': clear_screen() break def main(): global card_deck, card_map clear_screen() print('Welcome", "= {} player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing for a {}.'.format(player.Name, card_map[most_cards])) return most_cards", "yn = input('Would you like to play again (y/n)? ') if yn.upper() !=", "Go fish.') sleep(2) got_wish = self.fish(card_deck, wish) return got_wish def fish(self, card_deck, wish):", "didn\\'t get your wish.') sleep(3) return False def lay_set(self): new_set = -1 for", "self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return True else: print('Nope! Go fish.') sleep(2) got_wish =", "= -1 for card, suits in self.Hand.items(): if len(suits) == 4: new_set =", "{} sets to {}'.format(players[0].Name, sets0, sets1)) winner = 0 elif sets1 > sets0:", "hands . . .'.format(players[1].Name, players[0].Name)) sleep(2) return dealer, hands[0], hands[1] def start_new_game(): while(True):", "= 0 else: dealer = 1 else: if winner == 0: dealer =", "cards_left = len(card_deck) clear_screen() print(break_line + '\\n') players[1].print_hand() print() print('Draw pile: {} cards'.format(cards_left))", "len(suits) == 4: new_set = card if new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set) if", "sets to {}'.format(players[0].Name, sets0, sets1)) winner = 0 elif sets1 > sets0: print('{}", "True else: print('Nope! Go fish.') sleep(2) got_wish = self.fish(card_deck, wish) return got_wish def", "your wish!') opp_cards = opp_hand.pop(wish) if wish in self.Hand.keys(): for card in opp_cards:", "= input('Enter a C to continue . . .') if wait.upper() == 'C':", "in card_map.values(): print('You must wish for a valid card value (2-10, J, Q,", "suit]) shuffle(deck) return deck, map def choose_dealer(new_game, winner): if new_game or winner ==", "self.new_hand() def new_hand(self): self.Hand = {} self.HandCounts = {} self.Sets = [] self.Wishes", "for the rules): ') if player_name.upper() == '--HELP': print_rules() yn = input('You\\'ve chosen", "= len(players[1].Sets) if sets0 > sets1: print('{} wins, {} sets to {}'.format(players[0].Name, sets0,", "end=' ') for denom in sorted(self.Sets): print(card_map[denom] + 's', end=' ') print() def", "= True else: game_over = player.lay_set() if got_wish and not game_over and not", "wish in self.Hand.keys(): for card in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return True", "else: current_player = 0 return determine_winner(players, card_deck) def play_hand(players, current_player, card_deck, card_map): got_wish", "[] elif len(self.Wishes) == 5: self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name),", "else: dealer, card_deck, card_map = play_again(players, winner) print('Goodbye!') if __name__ == '__main__': main()", "False def clear_screen(): # for windows if name == 'nt': _ = system('cls')", "print(card_map[denom] + suit, end=' ') print() if self.Sets != []: self.print_sets() def print_sets(self):", "Player: def __init__(self, name, computer = False): self.Name = name self.Computer = computer", "hand in range(2): top_card = deck.pop() if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]:", "range(10): for hand in range(2): top_card = deck.pop() if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1])", "random import (shuffle, randint) from time import sleep from os import system, name", "in self.Hand.items(): if len(suits) == 4: new_set = card if new_set != -1:", "self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end=' ') else: print('{}\\'s hand:", "= input('Enter an R to return to the game . . .') if", "card_deck, card_map = start_new_game() play = True while play: winner = play_game(players, dealer,", "start_new_game(): while(True): player_name = '--help' while player_name.upper() == '--HELP': player_name = input('Please enter", "game_over: game_over = play_hand(players, current_player, card_deck, card_map) if players[current_player].Computer: while True: wait =", "sleep(2) return dealer, hands[0], hands[1] def start_new_game(): while(True): player_name = '--help' while player_name.upper()", "= [] self.Wishes = [] def hand_counts(self): for denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])})", "is a one-player game against the Computer.') players, dealer, card_deck, card_map = start_new_game()", "= [] player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing for a {}.'.format(player.Name,", "opp_sets = players[1].Sets else: opp_hand = players[0].Hand opp_sets = players[0].Sets display_current_status(players, card_deck) player", "opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return True else: print('Nope! Go fish.') sleep(2) got_wish", "players[1].print_hand() print() print('Draw pile: {} cards'.format(cards_left)) print() players[0].print_hand() print('\\n' + break_line) def determine_winner(players,", "True: wait = input('Enter a C to continue . . .') if wait.upper()", ". . .') if wait.upper() == 'C': break if current_player == 0: current_player", "#suits = [hearts, diamonds, spades, clubs] suits = ['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for", "if (count > highest_count) and (denom not in player.Wishes)\\ and (denom not in", "<filename>final_project/main.py<gh_stars>0 \"\"\" pirple/python/final_project/main.py Final Project Create a Go Fish game \"\"\" from random", "for a {}.'.format(player.Name, card_map[most_cards])) return most_cards def print_rules(): clear_screen() with open('data/rules.txt', 'r') as", "sets0 = len(players[0].Sets) sets1 = len(players[1].Sets) if sets0 > sets1: print('{} wins, {}", "clear_screen() with open('data/rules.txt', 'r') as rules: rules_text = rules.read() print(rules_text) while True: wait", "card_map def display_current_status(players, card_deck): break_line = '\\u274C' * 25 cards_left = len(card_deck) clear_screen()", "def cast(self, opp_hand, card_deck, wish): if wish in opp_hand.keys(): print('Fish, fish, you got", "card in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return True else: print('Nope! Go fish.')", "(shuffle, randint) from time import sleep from os import system, name class Player:", "current_player, card_deck, card_map): got_wish = True game_over = False card_map_keys = list(card_map.keys()) card_map_values", "card_map): got_wish = True game_over = False card_map_keys = list(card_map.keys()) card_map_values = list(card_map.values())", "while play: winner = play_game(players, dealer, card_deck) yn = input('Would you like to", "else: current_player = 0 while not game_over: game_over = play_hand(players, current_player, card_deck, card_map)", "current_player == 0: current_player = 1 else: current_player = 0 return determine_winner(players, card_deck)", "card_deck, card_map clear_screen() print('Welcome to JFL Go Fish! This is a one-player game", "= opp_hand.pop(wish) if wish in self.Hand.keys(): for card in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish:", "you didn\\'t get your wish.') sleep(3) return False def lay_set(self): new_set = -1", "top_card = deck.pop() if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer =", "== 'Y': print('OK, then let\\'s play!') break else: print('Oops! Try again.') players =", "players[1].Hand = deal_hand(card_deck, players, False, winner) return dealer, card_deck, card_map def display_current_status(players, card_deck):", "return determine_winner(players, card_deck) def play_hand(players, current_player, card_deck, card_map): got_wish = True game_over =", "create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False, winner) return dealer, card_deck, card_map", "opp_sets) and (denom not in player.Sets): most_cards = denom highest_count = count if", "def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end=' ') else: print('{}\\'s sets: '.format(self.Name),", "card, suits in self.Hand.items(): if len(suits) == 4: new_set = card if new_set", "len(self.Wishes) == 5: self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end=' ')", "players = [Player(player_name), Player('Computer', True)] card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand =", "return dealer, hands[1], hands[0] else: print('{} is the dealer. {} goes first. Dealing", "'4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] #suits = [hearts,", "must wish for a valid card value (2-10, J, Q, K, A)') else:", "= card_map_values.index(wish.upper()) if value_position not in player.Hand.keys(): print('You must wish for a card", "print('{}\\'s sets: '.format(self.Name), end=' ') for denom in sorted(self.Sets): print(card_map[denom] + 's', end='", "{} card_denominations = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10',", "valid card value (2-10, J, Q, K, A)') else: value_position = card_map_values.index(wish.upper()) if", "== set(self.Hand.keys()): self.Wishes = [] elif len(self.Wishes) == 5: self.Wishes.pop(0) def print_hand(self): if", "if new_game or winner == 2: r = randint(1, 100) if r <=", "windows if name == 'nt': _ = system('cls') # for mac and linux(here,", "draw_card[0] == wish: print('Fish, fish, you got your wish!') return True else: print('Booooo,", "current_player == 0: opp_hand = players[1].Hand opp_sets = players[1].Sets else: opp_hand = players[0].Hand", "return True else: print('Nope! Go fish.') sleep(2) got_wish = self.fish(card_deck, wish) return got_wish", "'--HELP': print_rules() yn = input('You\\'ve chosen {} for your name. Is that correct", "turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts = {} while got_wish and not game_over: if", "= False if dealer == 0: current_player = 1 else: current_player = 0", "determine_winner(players, card_deck) def play_hand(players, current_player, card_deck, card_map): got_wish = True game_over = False", "end=' ') for denom in sorted(self.Hand): for suit in self.Hand[denom]: if self.Computer: print('\\u2733',", "== 4: new_set = card if new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand)", "C to continue . . .') if wait.upper() == 'C': break if current_player", "{} for your name. Is that correct (y/n)? '.format(player_name)) if yn.upper() == 'Y':", "as rules: rules_text = rules.read() print(rules_text) while True: wait = input('Enter an R", "from time import sleep from os import system, name class Player: def __init__(self,", "card_deck) def play_hand(players, current_player, card_deck, card_map): got_wish = True game_over = False card_map_keys", "rules.read() print(rules_text) while True: wait = input('Enter an R to return to the", "game_over = False if dealer == 0: current_player = 1 else: current_player =", "the hands . . .'.format(players[1].Name, players[0].Name)) sleep(2) return dealer, hands[0], hands[1] def start_new_game():", "card_map def play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand", "else: return False def clear_screen(): # for windows if name == 'nt': _", "wish = generate_wish(player, opp_sets, card_map) else: wish = request_wish(player, card_map, card_map_values) got_wish =", "') if yn.upper() != 'Y': play = False else: dealer, card_deck, card_map =", "= '\\u274C' * 25 cards_left = len(card_deck) clear_screen() print(break_line + '\\n') players[1].print_hand() print()", "request_wish(player, card_map, card_map_values) got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck == []: game_over", "wins, {} sets to {}'.format(players[1].Name, sets1, sets0)) winner = 1 else: print('It\\'s a", "winner == 0: dealer = 1 else: dealer = 0 return dealer def", "= {} self.Sets = [] self.Wishes = [] def hand_counts(self): for denom in", "for denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()): self.Wishes", "def display_current_status(players, card_deck): break_line = '\\u274C' * 25 cards_left = len(card_deck) clear_screen() print(break_line", "') for denom in sorted(self.Sets): print(card_map[denom] + 's', end=' ') print() def cast(self,", "linux(here, os.name is 'posix') else: _ = system('clear') def create_deck(): deck = []", "def new_hand(self): self.Hand = {} self.HandCounts = {} self.Sets = [] self.Wishes =", "one-player game against the Computer.') players, dealer, card_deck, card_map = start_new_game() play =", "if self.Sets != []: self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end='", "= deal_hand(card_deck, players) return players, dealer, card_deck, card_map def play_again(players, winner): players[0].new_hand() players[1].new_hand()", "player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck == []: game_over = True else: game_over =", "value (2-10, J, Q, K, A)') else: value_position = card_map_values.index(wish.upper()) if value_position not", "players[0].new_hand() players[1].new_hand() card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False,", "print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end=' ') else: print('{}\\'s hand: '.format(self.Name), end='", "card_map_values = list(card_map.values()) if current_player == 0: opp_hand = players[1].Hand opp_sets = players[1].Sets", "choose_dealer(new_game, winner) if dealer == 0: print('{} is the dealer. {} goes first.", "= players[0].Hand opp_sets = players[0].Sets display_current_status(players, card_deck) player = players[current_player] print('Your turn, {}.'.format(player.Name))", "card_map): player.check_wishes() highest_count = 0 most_cards = -1 while most_cards == -1: for", "') else: print('{}\\'s sets: '.format(self.Name), end=' ') for denom in sorted(self.Sets): print(card_map[denom] +", "map = {} card_denominations = ['A', '2', '3', '4', '5', '6', '7', '8',", "return deck, map def choose_dealer(new_game, winner): if new_game or winner == 2: r", "def determine_winner(players, card_deck): display_current_status(players, card_deck) sets0 = len(players[0].Sets) sets1 = len(players[1].Sets) if sets0", "print('Nope! Go fish.') sleep(2) got_wish = self.fish(card_deck, wish) return got_wish def fish(self, card_deck,", "deal_hand(card_deck, players) return players, dealer, card_deck, card_map def play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck,", "False card_map_keys = list(card_map.keys()) card_map_values = list(card_map.values()) if current_player == 0: opp_hand =", "in player.Wishes)\\ and (denom not in opp_sets) and (denom not in player.Sets): most_cards", "self.HandCounts = {} self.Sets = [] self.Wishes = [] def hand_counts(self): for denom", "hands.append({}) hands.append({}) for card in range(10): for hand in range(2): top_card = deck.pop()", "0 elif sets1 > sets0: print('{} wins, {} sets to {}'.format(players[1].Name, sets1, sets0))", "= input('Please enter your name (or --help for the rules): ') if player_name.upper()", "[Player(player_name), Player('Computer', True)] card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players)", "print('\\n' + break_line) def determine_winner(players, card_deck): display_current_status(players, card_deck) sets0 = len(players[0].Sets) sets1 =", "= card_deck.pop() if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] ==", "25 cards_left = len(card_deck) clear_screen() print(break_line + '\\n') players[1].print_hand() print() print('Draw pile: {}", "your hand.') else: break return value_position def generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count =", "= [] map = {} card_denominations = ['A', '2', '3', '4', '5', '6',", "set(self.Wishes) == set(self.Hand.keys()): self.Wishes = [] elif len(self.Wishes) == 5: self.Wishes.pop(0) def print_hand(self):", "a Go Fish game \"\"\" from random import (shuffle, randint) from time import", "play_game(players, dealer, card_deck) yn = input('Would you like to play again (y/n)? ')", "print('\\u2733', end=' ') else: print(card_map[denom] + suit, end=' ') print() if self.Sets !=", ".'.format(players[0].Name, players[1].Name)) sleep(2) return dealer, hands[1], hands[0] else: print('{} is the dealer. {}", "play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck,", "a card value in your hand.') else: break return value_position def generate_wish(player, opp_sets,", "denom, count in player.HandCounts.items(): if (count > highest_count) and (denom not in player.Wishes)\\", "print(rules_text) while True: wait = input('Enter an R to return to the game", "Project Create a Go Fish game \"\"\" from random import (shuffle, randint) from", "100) if r <= 50: dealer = 0 else: dealer = 1 else:", "card_deck, card_map def play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck, card_map = create_deck() dealer, players[0].Hand,", "and (denom not in player.Wishes)\\ and (denom not in opp_sets) and (denom not", "you like to play again (y/n)? ') if yn.upper() != 'Y': play =", "clear_screen() print('Welcome to JFL Go Fish! This is a one-player game against the", "print('Booooo, you didn\\'t get your wish.') sleep(3) return False def lay_set(self): new_set =", "winner) return dealer, card_deck, card_map def display_current_status(players, card_deck): break_line = '\\u274C' * 25", "game_over = player.lay_set() if got_wish and not game_over and not player.Computer: player.print_hand() return", "most_cards == -1: for denom, count in player.HandCounts.items(): if (count > highest_count) and", "def clear_screen(): # for windows if name == 'nt': _ = system('cls') #", "def play_hand(players, current_player, card_deck, card_map): got_wish = True game_over = False card_map_keys =", "'Y': play = False else: dealer, card_deck, card_map = play_again(players, winner) print('Goodbye!') if", "= False else: dealer, card_deck, card_map = play_again(players, winner) print('Goodbye!') if __name__ ==", "player.hand_counts() wish = generate_wish(player, opp_sets, card_map) else: wish = request_wish(player, card_map, card_map_values) got_wish", "{}'.format(players[0].Name, sets0, sets1)) winner = 0 elif sets1 > sets0: print('{} wins, {}", "new_hand(self): self.Hand = {} self.HandCounts = {} self.Sets = [] self.Wishes = []", "global card_deck, card_map clear_screen() print('Welcome to JFL Go Fish! This is a one-player", "<= 50: dealer = 0 else: dealer = 1 else: if winner ==", "self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] == wish: print('Fish, fish, you got your", "yn.upper() == 'Y': print('OK, then let\\'s play!') break else: print('Oops! Try again.') players", "def hand_counts(self): for denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes) ==", ". .') if wait.upper() == 'R': clear_screen() break def main(): global card_deck, card_map", "R to return to the game . . .') if wait.upper() == 'R':", "JFL Go Fish! This is a one-player game against the Computer.') players, dealer,", "to the game . . .') if wait.upper() == 'R': clear_screen() break def", "deal_hand(deck, players, new_game=True, winner=None): hands = [] hands.append({}) hands.append({}) for card in range(10):", "player.check_wishes() highest_count = 0 most_cards = -1 while most_cards == -1: for denom,", "dealer = 0 else: dealer = 1 else: if winner == 0: dealer", "50: dealer = 0 else: dealer = 1 else: if winner == 0:", "== 0: current_player = 1 else: current_player = 0 return determine_winner(players, card_deck) def", "player.Wishes)\\ and (denom not in opp_sets) and (denom not in player.Sets): most_cards =", "pirple/python/final_project/main.py Final Project Create a Go Fish game \"\"\" from random import (shuffle,", "your wish.') sleep(3) return False def lay_set(self): new_set = -1 for card, suits", "in sorted(self.Hand): for suit in self.Hand[denom]: if self.Computer: print('\\u2733', end=' ') else: print(card_map[denom]", "self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end=' ') else: print('{}\\'s sets: '.format(self.Name), end=' ') for", "hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game, winner) if dealer == 0: print('{}", "= '--help' while player_name.upper() == '--HELP': player_name = input('Please enter your name (or", "len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()): self.Wishes = [] elif len(self.Wishes) ==", "while(True): player_name = '--help' while player_name.upper() == '--HELP': player_name = input('Please enter your", "self.fish(card_deck, wish) return got_wish def fish(self, card_deck, wish): draw_card = card_deck.pop() if draw_card[0]", "highest_count) and (denom not in player.Wishes)\\ and (denom not in opp_sets) and (denom", "== 0: return True else: return False def clear_screen(): # for windows if", "[draw_card[1]]}) if draw_card[0] == wish: print('Fish, fish, you got your wish!') return True", "players[current_player] print('Your turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts = {} while got_wish and not", "wish!') opp_cards = opp_hand.pop(wish) if wish in self.Hand.keys(): for card in opp_cards: self.Hand[wish].append(card)", "card_map_values) got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck == []: game_over = True", "for denom in sorted(self.Sets): print(card_map[denom] + 's', end=' ') print() def cast(self, opp_hand,", "wish!') return True else: print('Booooo, you didn\\'t get your wish.') sleep(3) return False", "card if new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) == 0: return True", "if yn.upper() == 'Y': print('OK, then let\\'s play!') break else: print('Oops! Try again.')", "player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing for a {}.'.format(player.Name, card_map[most_cards])) return most_cards def print_rules():", "= 0 elif sets1 > sets0: print('{} wins, {} sets to {}'.format(players[1].Name, sets1,", "+ '\\n') players[1].print_hand() print() print('Draw pile: {} cards'.format(cards_left)) print() players[0].print_hand() print('\\n' + break_line)", "== '--HELP': player_name = input('Please enter your name (or --help for the rules):", "{} self.Sets = [] self.Wishes = [] def hand_counts(self): for denom in self.Hand.keys():", "!= -1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) == 0: return True else: return False", "not game_over: game_over = play_hand(players, current_player, card_deck, card_map) if players[current_player].Computer: while True: wait", "clear_screen() print(break_line + '\\n') players[1].print_hand() print() print('Draw pile: {} cards'.format(cards_left)) print() players[0].print_hand() print('\\n'", "'3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] #suits =", "play again (y/n)? ') if yn.upper() != 'Y': play = False else: dealer,", "self.Hand[denom]: if self.Computer: print('\\u2733', end=' ') else: print(card_map[denom] + suit, end=' ') print()", "a C to continue . . .') if wait.upper() == 'C': break if", "like to play again (y/n)? ') if yn.upper() != 'Y': play = False", "denom highest_count = count if most_cards == -1: player.Wishes = [] player.HandCounts =", "= 0 return determine_winner(players, card_deck) def play_hand(players, current_player, card_deck, card_map): got_wish = True", "in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game, winner) if dealer ==", "name, computer = False): self.Name = name self.Computer = computer self.new_hand() def new_hand(self):", "r <= 50: dealer = 0 else: dealer = 1 else: if winner", "opp_hand.pop(wish) if wish in self.Hand.keys(): for card in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards})", "most_cards = denom highest_count = count if most_cards == -1: player.Wishes = []", "[]: game_over = True else: game_over = player.lay_set() if got_wish and not game_over", "import sleep from os import system, name class Player: def __init__(self, name, computer", "break def main(): global card_deck, card_map clear_screen() print('Welcome to JFL Go Fish! This", "= play_game(players, dealer, card_deck) yn = input('Would you like to play again (y/n)?", "if player_name.upper() == '--HELP': print_rules() yn = input('You\\'ve chosen {} for your name.", "opp_sets = players[0].Sets display_current_status(players, card_deck) player = players[current_player] print('Your turn, {}.'.format(player.Name)) if player.Computer:", "dealer = 0 return dealer def deal_hand(deck, players, new_game=True, winner=None): hands = []", "{} cards'.format(cards_left)) print() players[0].print_hand() print('\\n' + break_line) def determine_winner(players, card_deck): display_current_status(players, card_deck) sets0", "player.HandCounts.items(): if (count > highest_count) and (denom not in player.Wishes)\\ and (denom not", "== 5: self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end=' ') else:", "dealer, card_deck, card_map = start_new_game() play = True while play: winner = play_game(players,", "'\\u2660', '\\u2663'] for card in range(13): map.update({card: card_denominations[card]}) for suit in suits: deck.append([card,", "got_wish def fish(self, card_deck, wish): draw_card = card_deck.pop() if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1])", "game . . .') if wait.upper() == 'R': clear_screen() break def main(): global", "wish = request_wish(player, card_map, card_map_values) got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck ==", "'9', '10', 'J', 'Q', 'K'] #suits = [hearts, diamonds, spades, clubs] suits =", "['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']", "True else: game_over = player.lay_set() if got_wish and not game_over and not player.Computer:", "to continue . . .') if wait.upper() == 'C': break if current_player ==", "Final Project Create a Go Fish game \"\"\" from random import (shuffle, randint)", "wait.upper() == 'C': break if current_player == 0: current_player = 1 else: current_player", "value_position def generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count = 0 most_cards = -1 while", "= [] hands.append({}) hands.append({}) for card in range(10): for hand in range(2): top_card", "'s', end=' ') print() def cast(self, opp_hand, card_deck, wish): if wish in opp_hand.keys():", "'posix') else: _ = system('clear') def create_deck(): deck = [] map = {}", "draw_card = card_deck.pop() if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0]", "else: dealer = 1 else: if winner == 0: dealer = 1 else:", "dealer, card_deck): game_over = False if dealer == 0: current_player = 1 else:", "'.format(self.Name), end=' ') for denom in sorted(self.Sets): print(card_map[denom] + 's', end=' ') print()", "name == 'nt': _ = system('cls') # for mac and linux(here, os.name is", "deck = [] map = {} card_denominations = ['A', '2', '3', '4', '5',", "deck.append([card, suit]) shuffle(deck) return deck, map def choose_dealer(new_game, winner): if new_game or winner", "for denom, count in player.HandCounts.items(): if (count > highest_count) and (denom not in", "Player('Computer', True)] card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players) return", "def check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()): self.Wishes = [] elif len(self.Wishes) == 5:", "to return to the game . . .') if wait.upper() == 'R': clear_screen()", "if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] == wish: print('Fish,", "card_denominations = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J',", "play_hand(players, current_player, card_deck, card_map): got_wish = True game_over = False card_map_keys = list(card_map.keys())", "= system('cls') # for mac and linux(here, os.name is 'posix') else: _ =", "# for windows if name == 'nt': _ = system('cls') # for mac", "return winner def play_game(players, dealer, card_deck): game_over = False if dealer == 0:", ".') if wait.upper() == 'C': break if current_player == 0: current_player = 1", "1 else: current_player = 0 while not game_over: game_over = play_hand(players, current_player, card_deck,", "if wish in self.Hand.keys(): for card in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return", "play_hand(players, current_player, card_deck, card_map) if players[current_player].Computer: while True: wait = input('Enter a C", "print('Oops! Try again.') players = [Player(player_name), Player('Computer', True)] card_deck, card_map = create_deck() dealer,", "break if current_player == 0: current_player = 1 else: current_player = 0 return", "value in your hand.') else: break return value_position def generate_wish(player, opp_sets, card_map): player.check_wishes()", "== []: game_over = True else: game_over = player.lay_set() if got_wish and not", "in self.Hand.keys(): for card in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return True else:", "card_deck, card_map): got_wish = True game_over = False card_map_keys = list(card_map.keys()) card_map_values =", "fish, you got your wish!') opp_cards = opp_hand.pop(wish) if wish in self.Hand.keys(): for", "= denom highest_count = count if most_cards == -1: player.Wishes = [] player.HandCounts", "= create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False, winner) return dealer, card_deck,", "sets to {}'.format(players[1].Name, sets1, sets0)) winner = 1 else: print('It\\'s a tie!') winner", "got_wish and not game_over: if player.Computer: player.hand_counts() wish = generate_wish(player, opp_sets, card_map) else:", "self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end=' ') else: print('{}\\'s hand: '.format(self.Name), end=' ') for", "if players[current_player].Computer: while True: wait = input('Enter a C to continue . .", "wish for a card value in your hand.') else: break return value_position def", "False): self.Name = name self.Computer = computer self.new_hand() def new_hand(self): self.Hand = {}", ". . .'.format(players[1].Name, players[0].Name)) sleep(2) return dealer, hands[0], hands[1] def start_new_game(): while(True): player_name", "= list(card_map.keys()) card_map_values = list(card_map.values()) if current_player == 0: opp_hand = players[1].Hand opp_sets", "[] map = {} card_denominations = ['A', '2', '3', '4', '5', '6', '7',", "if current_player == 0: opp_hand = players[1].Hand opp_sets = players[1].Sets else: opp_hand =", "is the dealer. {} goes first. Dealing the hands . . .'.format(players[1].Name, players[0].Name))", "deal_hand(card_deck, players, False, winner) return dealer, card_deck, card_map def display_current_status(players, card_deck): break_line =", "wish): draw_card = card_deck.pop() if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if", "game_over = play_hand(players, current_player, card_deck, card_map) if players[current_player].Computer: while True: wait = input('Enter", "'10', 'J', 'Q', 'K'] #suits = [hearts, diamonds, spades, clubs] suits = ['\\u2665',", "value_position not in player.Hand.keys(): print('You must wish for a card value in your", "== 'R': clear_screen() break def main(): global card_deck, card_map clear_screen() print('Welcome to JFL", "'.format(player_name)) if yn.upper() == 'Y': print('OK, then let\\'s play!') break else: print('Oops! Try", "players[0].Hand, players[1].Hand = deal_hand(card_deck, players) return players, dealer, card_deck, card_map def play_again(players, winner):", ". . .'.format(players[0].Name, players[1].Name)) sleep(2) return dealer, hands[1], hands[0] else: print('{} is the", "name (or --help for the rules): ') if player_name.upper() == '--HELP': print_rules() yn", "dealer, hands[1], hands[0] else: print('{} is the dealer. {} goes first. Dealing the", "return dealer, card_deck, card_map def display_current_status(players, card_deck): break_line = '\\u274C' * 25 cards_left", "= 2 print('Good game!') return winner def play_game(players, dealer, card_deck): game_over = False", "False else: dealer, card_deck, card_map = play_again(players, winner) print('Goodbye!') if __name__ == '__main__':", "print('{}\\'s hand: '.format(self.Name), end=' ') for denom in sorted(self.Hand): for suit in self.Hand[denom]:", "else: print('{}\\'s sets: '.format(self.Name), end=' ') for denom in sorted(self.Sets): print(card_map[denom] + 's',", "winner=None): hands = [] hands.append({}) hands.append({}) for card in range(10): for hand in", "True while play: winner = play_game(players, dealer, card_deck) yn = input('Would you like", "= players[1].Sets else: opp_hand = players[0].Hand opp_sets = players[0].Sets display_current_status(players, card_deck) player =", "for a card value in your hand.') else: break return value_position def generate_wish(player,", "clear_screen(): # for windows if name == 'nt': _ = system('cls') # for", "print() players[0].print_hand() print('\\n' + break_line) def determine_winner(players, card_deck): display_current_status(players, card_deck) sets0 = len(players[0].Sets)", "generate_wish(player, opp_sets, card_map) else: wish = request_wish(player, card_map, card_map_values) got_wish = player.cast(opp_hand, card_deck,", "self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] == wish: print('Fish, fish, you got your wish!') return", "choose_dealer(new_game, winner): if new_game or winner == 2: r = randint(1, 100) if", "count in player.HandCounts.items(): if (count > highest_count) and (denom not in player.Wishes)\\ and", "== '--HELP': print_rules() yn = input('You\\'ve chosen {} for your name. Is that", "hands[0], hands[1] def start_new_game(): while(True): player_name = '--help' while player_name.upper() == '--HELP': player_name", "players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False, winner) return dealer, card_deck, card_map def display_current_status(players,", "winner def play_game(players, dealer, card_deck): game_over = False if dealer == 0: current_player", "print('You must wish for a card value in your hand.') else: break return", "wait.upper() == 'R': clear_screen() break def main(): global card_deck, card_map clear_screen() print('Welcome to", "highest_count = count if most_cards == -1: player.Wishes = [] player.HandCounts = {}", "dealer, card_deck) yn = input('Would you like to play again (y/n)? ') if", "if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end=' ') else: print('{}\\'s sets: '.format(self.Name), end=' ')", "def __init__(self, name, computer = False): self.Name = name self.Computer = computer self.new_hand()", "count if most_cards == -1: player.Wishes = [] player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards)", "self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()): self.Wishes = [] elif len(self.Wishes)", "card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players) return players, dealer, card_deck,", "hands[1], hands[0] else: print('{} is the dealer. {} goes first. Dealing the hands", "else: break return value_position def generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count = 0 most_cards", "= rules.read() print(rules_text) while True: wait = input('Enter an R to return to", "the rules): ') if player_name.upper() == '--HELP': print_rules() yn = input('You\\'ve chosen {}", "players, dealer, card_deck, card_map def play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck, card_map = create_deck()", "to {}'.format(players[1].Name, sets1, sets0)) winner = 1 else: print('It\\'s a tie!') winner =", "print() if self.Sets != []: self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name),", "elif len(self.Wishes) == 5: self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end='", "got_wish and not game_over and not player.Computer: player.print_hand() return game_over def request_wish(player, card_map,", "Go Fish! This is a one-player game against the Computer.') players, dealer, card_deck,", "len(self.Hand) == 0: return True else: return False def clear_screen(): # for windows", "print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end=' ') else: print('{}\\'s sets: '.format(self.Name), end='", "print('{}\\' sets: '.format(self.Name), end=' ') else: print('{}\\'s sets: '.format(self.Name), end=' ') for denom", "you got your wish!') return True else: print('Booooo, you didn\\'t get your wish.')", "player.Wishes = [] player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing for a", "'nt': _ = system('cls') # for mac and linux(here, os.name is 'posix') else:", "determine_winner(players, card_deck): display_current_status(players, card_deck) sets0 = len(players[0].Sets) sets1 = len(players[1].Sets) if sets0 >", "len(players[0].Sets) sets1 = len(players[1].Sets) if sets0 > sets1: print('{} wins, {} sets to", "0 return determine_winner(players, card_deck) def play_hand(players, current_player, card_deck, card_map): got_wish = True game_over", "a tie!') winner = 2 print('Good game!') return winner def play_game(players, dealer, card_deck):", "J, Q, K, A)') else: value_position = card_map_values.index(wish.upper()) if value_position not in player.Hand.keys():", ". .'.format(players[1].Name, players[0].Name)) sleep(2) return dealer, hands[0], hands[1] def start_new_game(): while(True): player_name =", "== -1: player.Wishes = [] player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing", "self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end=' ') else: print('{}\\'s sets:", "suits in self.Hand.items(): if len(suits) == 4: new_set = card if new_set !=", "return game_over def request_wish(player, card_map, card_map_values): while True: wish = input('What is your", "suit in self.Hand[denom]: if self.Computer: print('\\u2733', end=' ') else: print(card_map[denom] + suit, end='", "True)] card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players) return players,", "(2-10, J, Q, K, A)') else: value_position = card_map_values.index(wish.upper()) if value_position not in", "current_player, card_deck, card_map) if players[current_player].Computer: while True: wait = input('Enter a C to", "self.Wishes = [] def hand_counts(self): for denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self):", "[top_card[1]]}) dealer = choose_dealer(new_game, winner) if dealer == 0: print('{} is the dealer.", "== 0: print('{} is the dealer. {} goes first. Dealing the hands .", "(y/n)? '.format(player_name)) if yn.upper() == 'Y': print('OK, then let\\'s play!') break else: print('Oops!", "== 0: opp_hand = players[1].Hand opp_sets = players[1].Sets else: opp_hand = players[0].Hand opp_sets", "== 0: dealer = 1 else: dealer = 0 return dealer def deal_hand(deck,", "hands . . .'.format(players[0].Name, players[1].Name)) sleep(2) return dealer, hands[1], hands[0] else: print('{} is", "and not player.Computer: player.print_hand() return game_over def request_wish(player, card_map, card_map_values): while True: wish", "[hearts, diamonds, spades, clubs] suits = ['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for card in", "return got_wish def fish(self, card_deck, wish): draw_card = card_deck.pop() if draw_card[0] in self.Hand.keys():", "= player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck == []: game_over = True else: game_over", "range(2): top_card = deck.pop() if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer", "'8', '9', '10', 'J', 'Q', 'K'] #suits = [hearts, diamonds, spades, clubs] suits", "got your wish!') return True else: print('Booooo, you didn\\'t get your wish.') sleep(3)", "def play_game(players, dealer, card_deck): game_over = False if dealer == 0: current_player =", "print('{} is the dealer. {} goes first. Dealing the hands . . .'.format(players[0].Name,", "list(card_map.keys()) card_map_values = list(card_map.values()) if current_player == 0: opp_hand = players[1].Hand opp_sets =", "in player.Sets): most_cards = denom highest_count = count if most_cards == -1: player.Wishes", "player.Hand.keys(): print('You must wish for a card value in your hand.') else: break", "system('cls') # for mac and linux(here, os.name is 'posix') else: _ = system('clear')", "game_over = True else: game_over = player.lay_set() if got_wish and not game_over and", "def print_rules(): clear_screen() with open('data/rules.txt', 'r') as rules: rules_text = rules.read() print(rules_text) while", "in self.Hand[denom]: if self.Computer: print('\\u2733', end=' ') else: print(card_map[denom] + suit, end=' ')", "print_rules() yn = input('You\\'ve chosen {} for your name. Is that correct (y/n)?", "def main(): global card_deck, card_map clear_screen() print('Welcome to JFL Go Fish! This is", "A)') else: value_position = card_map_values.index(wish.upper()) if value_position not in player.Hand.keys(): print('You must wish", "first. Dealing the hands . . .'.format(players[1].Name, players[0].Name)) sleep(2) return dealer, hands[0], hands[1]", "print('Fish, fish, you got your wish!') opp_cards = opp_hand.pop(wish) if wish in self.Hand.keys():", "[]: self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end=' ') else: print('{}\\'s", "return players, dealer, card_deck, card_map def play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck, card_map =", "randint(1, 100) if r <= 50: dealer = 0 else: dealer = 1", "0 else: dealer = 1 else: if winner == 0: dealer = 1", "hands = [] hands.append({}) hands.append({}) for card in range(10): for hand in range(2):", "self.Computer = computer self.new_hand() def new_hand(self): self.Hand = {} self.HandCounts = {} self.Sets", "if current_player == 0: current_player = 1 else: current_player = 0 return determine_winner(players,", "card_map, card_map_values) got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck == []: game_over =", "{}.'.format(player.Name)) if player.Computer: player.HandCounts = {} while got_wish and not game_over: if player.Computer:", "an R to return to the game . . .') if wait.upper() ==", "time import sleep from os import system, name class Player: def __init__(self, name,", "--help for the rules): ') if player_name.upper() == '--HELP': print_rules() yn = input('You\\'ve", "a {}.'.format(player.Name, card_map[most_cards])) return most_cards def print_rules(): clear_screen() with open('data/rules.txt', 'r') as rules:", "') print() def cast(self, opp_hand, card_deck, wish): if wish in opp_hand.keys(): print('Fish, fish,", "set(self.Hand.keys()): self.Wishes = [] elif len(self.Wishes) == 5: self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'):", "else: _ = system('clear') def create_deck(): deck = [] map = {} card_denominations", "'--help' while player_name.upper() == '--HELP': player_name = input('Please enter your name (or --help", "while most_cards == -1: for denom, count in player.HandCounts.items(): if (count > highest_count)", "not game_over: if player.Computer: player.hand_counts() wish = generate_wish(player, opp_sets, card_map) else: wish =", "self.Hand = {} self.HandCounts = {} self.Sets = [] self.Wishes = [] def", "def create_deck(): deck = [] map = {} card_denominations = ['A', '2', '3',", "True else: print('Booooo, you didn\\'t get your wish.') sleep(3) return False def lay_set(self):", "['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for card in range(13): map.update({card: card_denominations[card]}) for suit in", "sorted(self.Sets): print(card_map[denom] + 's', end=' ') print() def cast(self, opp_hand, card_deck, wish): if", "== 2: r = randint(1, 100) if r <= 50: dealer = 0", "game \"\"\" from random import (shuffle, randint) from time import sleep from os", "cast(self, opp_hand, card_deck, wish): if wish in opp_hand.keys(): print('Fish, fish, you got your", "print('Your turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts = {} while got_wish and not game_over:", "hand.') else: break return value_position def generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count = 0", "lay_set(self): new_set = -1 for card, suits in self.Hand.items(): if len(suits) == 4:", "not in player.Wishes)\\ and (denom not in opp_sets) and (denom not in player.Sets):", "else: print('Nope! Go fish.') sleep(2) got_wish = self.fish(card_deck, wish) return got_wish def fish(self,", "True: wait = input('Enter an R to return to the game . .", "= len(players[0].Sets) sets1 = len(players[1].Sets) if sets0 > sets1: print('{} wins, {} sets", "card_deck == []: game_over = True else: game_over = player.lay_set() if got_wish and", "[] player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing for a {}.'.format(player.Name, card_map[most_cards]))", "in suits: deck.append([card, suit]) shuffle(deck) return deck, map def choose_dealer(new_game, winner): if new_game", "deck, map def choose_dealer(new_game, winner): if new_game or winner == 2: r =", "= input('Would you like to play again (y/n)? ') if yn.upper() != 'Y':", "2 print('Good game!') return winner def play_game(players, dealer, card_deck): game_over = False if", "return False def lay_set(self): new_set = -1 for card, suits in self.Hand.items(): if", "[] def hand_counts(self): for denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes)", "fish, you got your wish!') return True else: print('Booooo, you didn\\'t get your", "players[0].Name)) sleep(2) return dealer, hands[0], hands[1] def start_new_game(): while(True): player_name = '--help' while", "continue . . .') if wait.upper() == 'C': break if current_player == 0:", "(count > highest_count) and (denom not in player.Wishes)\\ and (denom not in opp_sets)", "and linux(here, os.name is 'posix') else: _ = system('clear') def create_deck(): deck =", "while not game_over: game_over = play_hand(players, current_player, card_deck, card_map) if players[current_player].Computer: while True:", "wish: print('Fish, fish, you got your wish!') return True else: print('Booooo, you didn\\'t", "input('Please enter your name (or --help for the rules): ') if player_name.upper() ==", "if card_deck == []: game_over = True else: game_over = player.lay_set() if got_wish", "players[1].Hand = deal_hand(card_deck, players) return players, dealer, card_deck, card_map def play_again(players, winner): players[0].new_hand()", "input('Enter an R to return to the game . . .') if wait.upper()", "def start_new_game(): while(True): player_name = '--help' while player_name.upper() == '--HELP': player_name = input('Please", "= 0 return dealer def deal_hand(deck, players, new_game=True, winner=None): hands = [] hands.append({})", "[] hands.append({}) hands.append({}) for card in range(10): for hand in range(2): top_card =", "main(): global card_deck, card_map clear_screen() print('Welcome to JFL Go Fish! This is a", "if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game, winner) if", "'6', '7', '8', '9', '10', 'J', 'Q', 'K'] #suits = [hearts, diamonds, spades,", "players[0].Hand opp_sets = players[0].Sets display_current_status(players, card_deck) player = players[current_player] print('Your turn, {}.'.format(player.Name)) if", "sleep(2) got_wish = self.fish(card_deck, wish) return got_wish def fish(self, card_deck, wish): draw_card =", "else: wish = request_wish(player, card_map, card_map_values) got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck", "print('You must wish for a valid card value (2-10, J, Q, K, A)')", "def lay_set(self): new_set = -1 for card, suits in self.Hand.items(): if len(suits) ==", "play = True while play: winner = play_game(players, dealer, card_deck) yn = input('Would", "draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] == wish: print('Fish, fish,", "card_deck): break_line = '\\u274C' * 25 cards_left = len(card_deck) clear_screen() print(break_line + '\\n')", "def generate_wish(player, opp_sets, card_map): player.check_wishes() highest_count = 0 most_cards = -1 while most_cards", "player.Wishes.append(most_cards) print('{} is wishing for a {}.'.format(player.Name, card_map[most_cards])) return most_cards def print_rules(): clear_screen()", "card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False, winner) return", "= [] elif len(self.Wishes) == 5: self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand:", "create_deck(): deck = [] map = {} card_denominations = ['A', '2', '3', '4',", "winner = play_game(players, dealer, card_deck) yn = input('Would you like to play again", "hand: '.format(self.Name), end=' ') else: print('{}\\'s hand: '.format(self.Name), end=' ') for denom in", "sorted(self.Hand): for suit in self.Hand[denom]: if self.Computer: print('\\u2733', end=' ') else: print(card_map[denom] +", "fish.') sleep(2) got_wish = self.fish(card_deck, wish) return got_wish def fish(self, card_deck, wish): draw_card", "self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) == 0: return True else: return False def clear_screen():", "> sets1: print('{} wins, {} sets to {}'.format(players[0].Name, sets0, sets1)) winner = 0", "print_rules(): clear_screen() with open('data/rules.txt', 'r') as rules: rules_text = rules.read() print(rules_text) while True:", "'.format(self.Name), end=' ') else: print('{}\\'s hand: '.format(self.Name), end=' ') for denom in sorted(self.Hand):", "sets0 > sets1: print('{} wins, {} sets to {}'.format(players[0].Name, sets0, sets1)) winner =", "self.Sets = [] self.Wishes = [] def hand_counts(self): for denom in self.Hand.keys(): self.HandCounts.update({denom:", "pile: {} cards'.format(cards_left)) print() players[0].print_hand() print('\\n' + break_line) def determine_winner(players, card_deck): display_current_status(players, card_deck)", "mac and linux(here, os.name is 'posix') else: _ = system('clear') def create_deck(): deck", "if value_position not in player.Hand.keys(): print('You must wish for a card value in", "card_deck): game_over = False if dealer == 0: current_player = 1 else: current_player", "card_deck): display_current_status(players, card_deck) sets0 = len(players[0].Sets) sets1 = len(players[1].Sets) if sets0 > sets1:", "hand_counts(self): for denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()):", "players[current_player].Computer: while True: wait = input('Enter a C to continue . . .')", "self.Wishes = [] elif len(self.Wishes) == 5: self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\'", "to play again (y/n)? ') if yn.upper() != 'Y': play = False else:", "return most_cards def print_rules(): clear_screen() with open('data/rules.txt', 'r') as rules: rules_text = rules.read()", "request_wish(player, card_map, card_map_values): while True: wish = input('What is your wish? ') if", "not in card_map.values(): print('You must wish for a valid card value (2-10, J,", "'\\u2663'] for card in range(13): map.update({card: card_denominations[card]}) for suit in suits: deck.append([card, suit])", "return True else: return False def clear_screen(): # for windows if name ==", "again.') players = [Player(player_name), Player('Computer', True)] card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand", "hand: '.format(self.Name), end=' ') for denom in sorted(self.Hand): for suit in self.Hand[denom]: if", "sets1: print('{} wins, {} sets to {}'.format(players[0].Name, sets0, sets1)) winner = 0 elif", "if player.Computer: player.hand_counts() wish = generate_wish(player, opp_sets, card_map) else: wish = request_wish(player, card_map,", "winner = 2 print('Good game!') return winner def play_game(players, dealer, card_deck): game_over =", "= len(card_deck) clear_screen() print(break_line + '\\n') players[1].print_hand() print() print('Draw pile: {} cards'.format(cards_left)) print()", "= -1 while most_cards == -1: for denom, count in player.HandCounts.items(): if (count", "elif sets1 > sets0: print('{} wins, {} sets to {}'.format(players[1].Name, sets1, sets0)) winner", "sleep(3) return False def lay_set(self): new_set = -1 for card, suits in self.Hand.items():", "\"\"\" from random import (shuffle, randint) from time import sleep from os import", "= {} card_denominations = ['A', '2', '3', '4', '5', '6', '7', '8', '9',", "if player.Computer: player.HandCounts = {} while got_wish and not game_over: if player.Computer: player.hand_counts()", "(denom not in opp_sets) and (denom not in player.Sets): most_cards = denom highest_count", "current_player = 1 else: current_player = 0 while not game_over: game_over = play_hand(players,", "opp_sets, card_map): player.check_wishes() highest_count = 0 most_cards = -1 while most_cards == -1:", "opp_hand, card_deck, wish): if wish in opp_hand.keys(): print('Fish, fish, you got your wish!')", "'\\u274C' * 25 cards_left = len(card_deck) clear_screen() print(break_line + '\\n') players[1].print_hand() print() print('Draw", "[] self.Wishes = [] def hand_counts(self): for denom in self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def", "map.update({card: card_denominations[card]}) for suit in suits: deck.append([card, suit]) shuffle(deck) return deck, map def", "print('It\\'s a tie!') winner = 2 print('Good game!') return winner def play_game(players, dealer,", "sets0, sets1)) winner = 0 elif sets1 > sets0: print('{} wins, {} sets", "to {}'.format(players[0].Name, sets0, sets1)) winner = 0 elif sets1 > sets0: print('{} wins,", "= {} self.HandCounts = {} self.Sets = [] self.Wishes = [] def hand_counts(self):", "return to the game . . .') if wait.upper() == 'R': clear_screen() break", "spades, clubs] suits = ['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for card in range(13): map.update({card:", "wait = input('Enter a C to continue . . .') if wait.upper() ==", "if yn.upper() != 'Y': play = False else: dealer, card_deck, card_map = play_again(players,", "game against the Computer.') players, dealer, card_deck, card_map = start_new_game() play = True", "players, dealer, card_deck, card_map = start_new_game() play = True while play: winner =", "end=' ') else: print('{}\\'s sets: '.format(self.Name), end=' ') for denom in sorted(self.Sets): print(card_map[denom]", "import (shuffle, randint) from time import sleep from os import system, name class", "denom in sorted(self.Hand): for suit in self.Hand[denom]: if self.Computer: print('\\u2733', end=' ') else:", "display_current_status(players, card_deck) sets0 = len(players[0].Sets) sets1 = len(players[1].Sets) if sets0 > sets1: print('{}", "enter your name (or --help for the rules): ') if player_name.upper() == '--HELP':", "__init__(self, name, computer = False): self.Name = name self.Computer = computer self.new_hand() def", "= system('clear') def create_deck(): deck = [] map = {} card_denominations = ['A',", "current_player = 0 return determine_winner(players, card_deck) def play_hand(players, current_player, card_deck, card_map): got_wish =", "') else: print('{}\\'s hand: '.format(self.Name), end=' ') for denom in sorted(self.Hand): for suit", "card_deck) yn = input('Would you like to play again (y/n)? ') if yn.upper()", "in opp_sets) and (denom not in player.Sets): most_cards = denom highest_count = count", ". .') if wait.upper() == 'C': break if current_player == 0: current_player =", "{} player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing for a {}.'.format(player.Name, card_map[most_cards])) return most_cards def", "end=' ') else: print('{}\\'s hand: '.format(self.Name), end=' ') for denom in sorted(self.Hand): for", "print() def cast(self, opp_hand, card_deck, wish): if wish in opp_hand.keys(): print('Fish, fish, you", "break else: print('Oops! Try again.') players = [Player(player_name), Player('Computer', True)] card_deck, card_map =", "cards'.format(cards_left)) print() players[0].print_hand() print('\\n' + break_line) def determine_winner(players, card_deck): display_current_status(players, card_deck) sets0 =", "r = randint(1, 100) if r <= 50: dealer = 0 else: dealer", "correct (y/n)? '.format(player_name)) if yn.upper() == 'Y': print('OK, then let\\'s play!') break else:", "sets: '.format(self.Name), end=' ') else: print('{}\\'s sets: '.format(self.Name), end=' ') for denom in", "not in opp_sets) and (denom not in player.Sets): most_cards = denom highest_count =", "winner): if new_game or winner == 2: r = randint(1, 100) if r", "for card in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return True else: print('Nope! Go", "name class Player: def __init__(self, name, computer = False): self.Name = name self.Computer", "is your wish? ') if wish.upper() not in card_map.values(): print('You must wish for", "play!') break else: print('Oops! Try again.') players = [Player(player_name), Player('Computer', True)] card_deck, card_map", "that correct (y/n)? '.format(player_name)) if yn.upper() == 'Y': print('OK, then let\\'s play!') break", "players) return players, dealer, card_deck, card_map def play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck, card_map", "sets1 > sets0: print('{} wins, {} sets to {}'.format(players[1].Name, sets1, sets0)) winner =", "= {} while got_wish and not game_over: if player.Computer: player.hand_counts() wish = generate_wish(player,", "= True while play: winner = play_game(players, dealer, card_deck) yn = input('Would you", "play: winner = play_game(players, dealer, card_deck) yn = input('Would you like to play", "dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players) return players, dealer, card_deck, card_map def play_again(players,", "-1: for denom, count in player.HandCounts.items(): if (count > highest_count) and (denom not", "def choose_dealer(new_game, winner): if new_game or winner == 2: r = randint(1, 100)", "sets1, sets0)) winner = 1 else: print('It\\'s a tie!') winner = 2 print('Good", "if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end=' ') else: print('{}\\'s hand: '.format(self.Name), end=' ')", "'R': clear_screen() break def main(): global card_deck, card_map clear_screen() print('Welcome to JFL Go", "from os import system, name class Player: def __init__(self, name, computer = False):", "card_denominations[card]}) for suit in suits: deck.append([card, suit]) shuffle(deck) return deck, map def choose_dealer(new_game,", "# for mac and linux(here, os.name is 'posix') else: _ = system('clear') def", "winner == 2: r = randint(1, 100) if r <= 50: dealer =", "card_deck, wish): draw_card = card_deck.pop() if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]})", "dealer = 1 else: dealer = 0 return dealer def deal_hand(deck, players, new_game=True,", "= 0 most_cards = -1 while most_cards == -1: for denom, count in", "') for denom in sorted(self.Hand): for suit in self.Hand[denom]: if self.Computer: print('\\u2733', end='", "= deck.pop() if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game,", "sets0: print('{} wins, {} sets to {}'.format(players[1].Name, sets1, sets0)) winner = 1 else:", "while True: wish = input('What is your wish? ') if wish.upper() not in", "get your wish.') sleep(3) return False def lay_set(self): new_set = -1 for card,", "self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] == wish: print('Fish, fish, you got", "sets1)) winner = 0 elif sets1 > sets0: print('{} wins, {} sets to", "for hand in range(2): top_card = deck.pop() if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else:", "* 25 cards_left = len(card_deck) clear_screen() print(break_line + '\\n') players[1].print_hand() print() print('Draw pile:", "card_map_values): while True: wish = input('What is your wish? ') if wish.upper() not", "fish(self, card_deck, wish): draw_card = card_deck.pop() if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]:", "and not game_over: if player.Computer: player.hand_counts() wish = generate_wish(player, opp_sets, card_map) else: wish", "wish for a valid card value (2-10, J, Q, K, A)') else: value_position", "') if wish.upper() not in card_map.values(): print('You must wish for a valid card", "player.Computer: player.HandCounts = {} while got_wish and not game_over: if player.Computer: player.hand_counts() wish", "is the dealer. {} goes first. Dealing the hands . . .'.format(players[0].Name, players[1].Name))", "in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else: self.Hand.update({draw_card[0]: [draw_card[1]]}) if draw_card[0] == wish: print('Fish, fish, you", "suits = ['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for card in range(13): map.update({card: card_denominations[card]}) for", "if self.Computer: print('\\u2733', end=' ') else: print(card_map[denom] + suit, end=' ') print() if", "hands.append({}) for card in range(10): for hand in range(2): top_card = deck.pop() if", "== 0: current_player = 1 else: current_player = 0 while not game_over: game_over", "input('Enter a C to continue . . .') if wait.upper() == 'C': break", "card_map = start_new_game() play = True while play: winner = play_game(players, dealer, card_deck)", "again (y/n)? ') if yn.upper() != 'Y': play = False else: dealer, card_deck,", "== 'nt': _ = system('cls') # for mac and linux(here, os.name is 'posix')", "= start_new_game() play = True while play: winner = play_game(players, dealer, card_deck) yn", "winner) if dealer == 0: print('{} is the dealer. {} goes first. Dealing", "value_position = card_map_values.index(wish.upper()) if value_position not in player.Hand.keys(): print('You must wish for a", "!= 'Y': play = False else: dealer, card_deck, card_map = play_again(players, winner) print('Goodbye!')", "if wish in opp_hand.keys(): print('Fish, fish, you got your wish!') opp_cards = opp_hand.pop(wish)", "with open('data/rules.txt', 'r') as rules: rules_text = rules.read() print(rules_text) while True: wait =", "opp_cards}) return True else: print('Nope! Go fish.') sleep(2) got_wish = self.fish(card_deck, wish) return", "Try again.') players = [Player(player_name), Player('Computer', True)] card_deck, card_map = create_deck() dealer, players[0].Hand,", "K, A)') else: value_position = card_map_values.index(wish.upper()) if value_position not in player.Hand.keys(): print('You must", "{} goes first. Dealing the hands . . .'.format(players[1].Name, players[0].Name)) sleep(2) return dealer,", "rules): ') if player_name.upper() == '--HELP': print_rules() yn = input('You\\'ve chosen {} for", "'\\n') players[1].print_hand() print() print('Draw pile: {} cards'.format(cards_left)) print() players[0].print_hand() print('\\n' + break_line) def", "card_map_keys = list(card_map.keys()) card_map_values = list(card_map.values()) if current_player == 0: opp_hand = players[1].Hand", "input('What is your wish? ') if wish.upper() not in card_map.values(): print('You must wish", "player.Computer: player.print_hand() return game_over def request_wish(player, card_map, card_map_values): while True: wish = input('What", "class Player: def __init__(self, name, computer = False): self.Name = name self.Computer =", "your wish!') return True else: print('Booooo, you didn\\'t get your wish.') sleep(3) return", "card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players, False, winner) return dealer,", "+ 's', end=' ') print() def cast(self, opp_hand, card_deck, wish): if wish in", "{}.'.format(player.Name, card_map[most_cards])) return most_cards def print_rules(): clear_screen() with open('data/rules.txt', 'r') as rules: rules_text", "= [Player(player_name), Player('Computer', True)] card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck,", "') print() if self.Sets != []: self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets:", "denom in sorted(self.Sets): print(card_map[denom] + 's', end=' ') print() def cast(self, opp_hand, card_deck,", "player_name.upper() == '--HELP': player_name = input('Please enter your name (or --help for the", "+ suit, end=' ') print() if self.Sets != []: self.print_sets() def print_sets(self): if", "card_map_keys[wish]) if card_deck == []: game_over = True else: game_over = player.lay_set() if", "> sets0: print('{} wins, {} sets to {}'.format(players[1].Name, sets1, sets0)) winner = 1", "while True: wait = input('Enter a C to continue . . .') if", "print('{} wins, {} sets to {}'.format(players[1].Name, sets1, sets0)) winner = 1 else: print('It\\'s", "(or --help for the rules): ') if player_name.upper() == '--HELP': print_rules() yn =", "the Computer.') players, dealer, card_deck, card_map = start_new_game() play = True while play:", "not in player.Hand.keys(): print('You must wish for a card value in your hand.')", "not in player.Sets): most_cards = denom highest_count = count if most_cards == -1:", "player.Computer: player.hand_counts() wish = generate_wish(player, opp_sets, card_map) else: wish = request_wish(player, card_map, card_map_values)", "Fish game \"\"\" from random import (shuffle, randint) from time import sleep from", "def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end=' ') else: print('{}\\'s hand: '.format(self.Name),", "in range(10): for hand in range(2): top_card = deck.pop() if top_card[0] in hands[hand].keys():", "if name == 'nt': _ = system('cls') # for mac and linux(here, os.name", "let\\'s play!') break else: print('Oops! Try again.') players = [Player(player_name), Player('Computer', True)] card_deck,", "return True else: print('Booooo, you didn\\'t get your wish.') sleep(3) return False def", "if dealer == 0: current_player = 1 else: current_player = 0 while not", "or winner == 2: r = randint(1, 100) if r <= 50: dealer", "break_line) def determine_winner(players, card_deck): display_current_status(players, card_deck) sets0 = len(players[0].Sets) sets1 = len(players[1].Sets) if", "tie!') winner = 2 print('Good game!') return winner def play_game(players, dealer, card_deck): game_over", "{} self.HandCounts = {} self.Sets = [] self.Wishes = [] def hand_counts(self): for", "card_map) else: wish = request_wish(player, card_map, card_map_values) got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish]) if", "in range(2): top_card = deck.pop() if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]})", "dealer == 0: print('{} is the dealer. {} goes first. Dealing the hands", "in player.Hand.keys(): print('You must wish for a card value in your hand.') else:", "randint) from time import sleep from os import system, name class Player: def", "Q, K, A)') else: value_position = card_map_values.index(wish.upper()) if value_position not in player.Hand.keys(): print('You", ".'.format(players[1].Name, players[0].Name)) sleep(2) return dealer, hands[0], hands[1] def start_new_game(): while(True): player_name = '--help'", "\"\"\" pirple/python/final_project/main.py Final Project Create a Go Fish game \"\"\" from random import", "self.Name = name self.Computer = computer self.new_hand() def new_hand(self): self.Hand = {} self.HandCounts", "else: print('{} is the dealer. {} goes first. Dealing the hands . .", "top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game, winner) if dealer", "start_new_game() play = True while play: winner = play_game(players, dealer, card_deck) yn =", "== wish: print('Fish, fish, you got your wish!') return True else: print('Booooo, you", "1 else: if winner == 0: dealer = 1 else: dealer = 0", "card_deck, card_map) if players[current_player].Computer: while True: wait = input('Enter a C to continue", "the hands . . .'.format(players[0].Name, players[1].Name)) sleep(2) return dealer, hands[1], hands[0] else: print('{}", "print() print('Draw pile: {} cards'.format(cards_left)) print() players[0].print_hand() print('\\n' + break_line) def determine_winner(players, card_deck):", "opp_hand = players[1].Hand opp_sets = players[1].Sets else: opp_hand = players[0].Hand opp_sets = players[0].Sets", "if len(suits) == 4: new_set = card if new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set)", "self.Sets.append(new_set) if len(self.Hand) == 0: return True else: return False def clear_screen(): #", "else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game, winner) if dealer == 0: print('{} is", "current_player = 0 while not game_over: game_over = play_hand(players, current_player, card_deck, card_map) if", "goes first. Dealing the hands . . .'.format(players[0].Name, players[1].Name)) sleep(2) return dealer, hands[1],", "opp_cards = opp_hand.pop(wish) if wish in self.Hand.keys(): for card in opp_cards: self.Hand[wish].append(card) else:", "False if dealer == 0: current_player = 1 else: current_player = 0 while", "def play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand =", "print(card_map[denom] + 's', end=' ') print() def cast(self, opp_hand, card_deck, wish): if wish", "wish): if wish in opp_hand.keys(): print('Fish, fish, you got your wish!') opp_cards =", "_ = system('clear') def create_deck(): deck = [] map = {} card_denominations =", "your name (or --help for the rules): ') if player_name.upper() == '--HELP': print_rules()", "card_deck, card_map def display_current_status(players, card_deck): break_line = '\\u274C' * 25 cards_left = len(card_deck)", "print(break_line + '\\n') players[1].print_hand() print() print('Draw pile: {} cards'.format(cards_left)) print() players[0].print_hand() print('\\n' +", "else: print('It\\'s a tie!') winner = 2 print('Good game!') return winner def play_game(players,", "end=' ') else: print(card_map[denom] + suit, end=' ') print() if self.Sets != []:", "your name. Is that correct (y/n)? '.format(player_name)) if yn.upper() == 'Y': print('OK, then", "hands[0] else: print('{} is the dealer. {} goes first. Dealing the hands .", "= deal_hand(card_deck, players, False, winner) return dealer, card_deck, card_map def display_current_status(players, card_deck): break_line", "card_map.values(): print('You must wish for a valid card value (2-10, J, Q, K,", "'2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] #suits", "display_current_status(players, card_deck): break_line = '\\u274C' * 25 cards_left = len(card_deck) clear_screen() print(break_line +", "card_map_values.index(wish.upper()) if value_position not in player.Hand.keys(): print('You must wish for a card value", "the game . . .') if wait.upper() == 'R': clear_screen() break def main():", "This is a one-player game against the Computer.') players, dealer, card_deck, card_map =", "'C': break if current_player == 0: current_player = 1 else: current_player = 0", "got your wish!') opp_cards = opp_hand.pop(wish) if wish in self.Hand.keys(): for card in", "dealer, hands[0], hands[1] def start_new_game(): while(True): player_name = '--help' while player_name.upper() == '--HELP':", "computer self.new_hand() def new_hand(self): self.Hand = {} self.HandCounts = {} self.Sets = []", "= request_wish(player, card_map, card_map_values) got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck == []:", "players[1].Hand opp_sets = players[1].Sets else: opp_hand = players[0].Hand opp_sets = players[0].Sets display_current_status(players, card_deck)", "card_map) if players[current_player].Computer: while True: wait = input('Enter a C to continue .", "if set(self.Wishes) == set(self.Hand.keys()): self.Wishes = [] elif len(self.Wishes) == 5: self.Wishes.pop(0) def", "'.format(self.Name), end=' ') else: print('{}\\'s sets: '.format(self.Name), end=' ') for denom in sorted(self.Sets):", "and (denom not in player.Sets): most_cards = denom highest_count = count if most_cards", "suit, end=' ') print() if self.Sets != []: self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'):", "card value (2-10, J, Q, K, A)') else: value_position = card_map_values.index(wish.upper()) if value_position", "while got_wish and not game_over: if player.Computer: player.hand_counts() wish = generate_wish(player, opp_sets, card_map)", "game_over: if player.Computer: player.hand_counts() wish = generate_wish(player, opp_sets, card_map) else: wish = request_wish(player,", "card in range(13): map.update({card: card_denominations[card]}) for suit in suits: deck.append([card, suit]) shuffle(deck) return", "(denom not in player.Sets): most_cards = denom highest_count = count if most_cards ==", "print('{} wins, {} sets to {}'.format(players[0].Name, sets0, sets1)) winner = 0 elif sets1", "print('Draw pile: {} cards'.format(cards_left)) print() players[0].print_hand() print('\\n' + break_line) def determine_winner(players, card_deck): display_current_status(players,", "= create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players) return players, dealer, card_deck, card_map", "is wishing for a {}.'.format(player.Name, card_map[most_cards])) return most_cards def print_rules(): clear_screen() with open('data/rules.txt',", "else: opp_hand = players[0].Hand opp_sets = players[0].Sets display_current_status(players, card_deck) player = players[current_player] print('Your", "suits: deck.append([card, suit]) shuffle(deck) return deck, map def choose_dealer(new_game, winner): if new_game or", "new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) == 0: return True else: return", "for windows if name == 'nt': _ = system('cls') # for mac and", "deck.pop() if top_card[0] in hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game, winner)", "self.Sets != []: self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end=' ')", "card_deck, wish): if wish in opp_hand.keys(): print('Fish, fish, you got your wish!') opp_cards", "got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish]) if card_deck == []: game_over = True else:", "else: game_over = player.lay_set() if got_wish and not game_over and not player.Computer: player.print_hand()", "card_map[most_cards])) return most_cards def print_rules(): clear_screen() with open('data/rules.txt', 'r') as rules: rules_text =", "return dealer def deal_hand(deck, players, new_game=True, winner=None): hands = [] hands.append({}) hands.append({}) for", "system, name class Player: def __init__(self, name, computer = False): self.Name = name", "game!') return winner def play_game(players, dealer, card_deck): game_over = False if dealer ==", "self.Computer: print('\\u2733', end=' ') else: print(card_map[denom] + suit, end=' ') print() if self.Sets", "hands[hand].keys(): hands[hand][top_card[0]].append(top_card[1]) else: hands[hand].update({top_card[0]: [top_card[1]]}) dealer = choose_dealer(new_game, winner) if dealer == 0:", "= 1 else: current_player = 0 while not game_over: game_over = play_hand(players, current_player,", "-1 while most_cards == -1: for denom, count in player.HandCounts.items(): if (count >", "game_over = False card_map_keys = list(card_map.keys()) card_map_values = list(card_map.values()) if current_player == 0:", "> highest_count) and (denom not in player.Wishes)\\ and (denom not in opp_sets) and", "-1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) == 0: return True else: return False def", "= input('You\\'ve chosen {} for your name. Is that correct (y/n)? '.format(player_name)) if", "True else: return False def clear_screen(): # for windows if name == 'nt':", "card_deck, card_map = create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players) return players, dealer,", "else: print(card_map[denom] + suit, end=' ') print() if self.Sets != []: self.print_sets() def", "-1: player.Wishes = [] player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing for", "for card in range(13): map.update({card: card_denominations[card]}) for suit in suits: deck.append([card, suit]) shuffle(deck)", "{} goes first. Dealing the hands . . .'.format(players[0].Name, players[1].Name)) sleep(2) return dealer,", "dealer. {} goes first. Dealing the hands . . .'.format(players[0].Name, players[1].Name)) sleep(2) return", "most_cards def print_rules(): clear_screen() with open('data/rules.txt', 'r') as rules: rules_text = rules.read() print(rules_text)", "goes first. Dealing the hands . . .'.format(players[1].Name, players[0].Name)) sleep(2) return dealer, hands[0],", "rules: rules_text = rules.read() print(rules_text) while True: wait = input('Enter an R to", "play_game(players, dealer, card_deck): game_over = False if dealer == 0: current_player = 1", "got_wish = True game_over = False card_map_keys = list(card_map.keys()) card_map_values = list(card_map.values()) if", "dealer, card_deck, card_map def display_current_status(players, card_deck): break_line = '\\u274C' * 25 cards_left =", "(y/n)? ') if yn.upper() != 'Y': play = False else: dealer, card_deck, card_map", "players[0].print_hand() print('\\n' + break_line) def determine_winner(players, card_deck): display_current_status(players, card_deck) sets0 = len(players[0].Sets) sets1", "card_deck) player = players[current_player] print('Your turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts = {} while", "end=' ') print() def cast(self, opp_hand, card_deck, wish): if wish in opp_hand.keys(): print('Fish,", "dealer def deal_hand(deck, players, new_game=True, winner=None): hands = [] hands.append({}) hands.append({}) for card", "game_over and not player.Computer: player.print_hand() return game_over def request_wish(player, card_map, card_map_values): while True:", "= generate_wish(player, opp_sets, card_map) else: wish = request_wish(player, card_map, card_map_values) got_wish = player.cast(opp_hand,", "if r <= 50: dealer = 0 else: dealer = 1 else: if", "'--HELP': player_name = input('Please enter your name (or --help for the rules): ')", "if most_cards == -1: player.Wishes = [] player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards) print('{}", "sets0)) winner = 1 else: print('It\\'s a tie!') winner = 2 print('Good game!')", "sets: '.format(self.Name), end=' ') for denom in sorted(self.Sets): print(card_map[denom] + 's', end=' ')", "= choose_dealer(new_game, winner) if dealer == 0: print('{} is the dealer. {} goes", "input('Would you like to play again (y/n)? ') if yn.upper() != 'Y': play", "map def choose_dealer(new_game, winner): if new_game or winner == 2: r = randint(1,", "card in range(10): for hand in range(2): top_card = deck.pop() if top_card[0] in", "clubs] suits = ['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for card in range(13): map.update({card: card_denominations[card]})", "opp_sets, card_map) else: wish = request_wish(player, card_map, card_map_values) got_wish = player.cast(opp_hand, card_deck, card_map_keys[wish])", "the dealer. {} goes first. Dealing the hands . . .'.format(players[1].Name, players[0].Name)) sleep(2)", "return False def clear_screen(): # for windows if name == 'nt': _ =", "then let\\'s play!') break else: print('Oops! Try again.') players = [Player(player_name), Player('Computer', True)]", "player.HandCounts = {} while got_wish and not game_over: if player.Computer: player.hand_counts() wish =", "player.print_hand() return game_over def request_wish(player, card_map, card_map_values): while True: wish = input('What is", "winner = 0 elif sets1 > sets0: print('{} wins, {} sets to {}'.format(players[1].Name,", "range(13): map.update({card: card_denominations[card]}) for suit in suits: deck.append([card, suit]) shuffle(deck) return deck, map", ". . .') if wait.upper() == 'R': clear_screen() break def main(): global card_deck,", "in player.HandCounts.items(): if (count > highest_count) and (denom not in player.Wishes)\\ and (denom", "if new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) == 0: return True else:", "in opp_cards: self.Hand[wish].append(card) else: self.Hand.update({wish: opp_cards}) return True else: print('Nope! Go fish.') sleep(2)", "not player.Computer: player.print_hand() return game_over def request_wish(player, card_map, card_map_values): while True: wish =", "open('data/rules.txt', 'r') as rules: rules_text = rules.read() print(rules_text) while True: wait = input('Enter", "2: r = randint(1, 100) if r <= 50: dealer = 0 else:", "new_set = -1 for card, suits in self.Hand.items(): if len(suits) == 4: new_set", "new_set = card if new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) == 0:", "for card, suits in self.Hand.items(): if len(suits) == 4: new_set = card if", "Fish! This is a one-player game against the Computer.') players, dealer, card_deck, card_map", "print('Good game!') return winner def play_game(players, dealer, card_deck): game_over = False if dealer", "4: new_set = card if new_set != -1: self.Hand.pop(new_set) self.Sets.append(new_set) if len(self.Hand) ==", "= 1 else: if winner == 0: dealer = 1 else: dealer =", "card_map, card_map_values): while True: wish = input('What is your wish? ') if wish.upper()", "if len(self.Hand) == 0: return True else: return False def clear_screen(): # for", "os import system, name class Player: def __init__(self, name, computer = False): self.Name", "'7', '8', '9', '10', 'J', 'Q', 'K'] #suits = [hearts, diamonds, spades, clubs]", "wish.upper() not in card_map.values(): print('You must wish for a valid card value (2-10,", "clear_screen() break def main(): global card_deck, card_map clear_screen() print('Welcome to JFL Go Fish!", "name. Is that correct (y/n)? '.format(player_name)) if yn.upper() == 'Y': print('OK, then let\\'s", "shuffle(deck) return deck, map def choose_dealer(new_game, winner): if new_game or winner == 2:", "player = players[current_player] print('Your turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts = {} while got_wish", "0 return dealer def deal_hand(deck, players, new_game=True, winner=None): hands = [] hands.append({}) hands.append({})", "if wait.upper() == 'C': break if current_player == 0: current_player = 1 else:", "') else: print(card_map[denom] + suit, end=' ') print() if self.Sets != []: self.print_sets()", "for denom in sorted(self.Hand): for suit in self.Hand[denom]: if self.Computer: print('\\u2733', end=' ')", "0: current_player = 1 else: current_player = 0 while not game_over: game_over =", "print('OK, then let\\'s play!') break else: print('Oops! Try again.') players = [Player(player_name), Player('Computer',", "winner = 1 else: print('It\\'s a tie!') winner = 2 print('Good game!') return", "(denom not in player.Wishes)\\ and (denom not in opp_sets) and (denom not in", "players[1].Name)) sleep(2) return dealer, hands[1], hands[0] else: print('{} is the dealer. {} goes", "if draw_card[0] == wish: print('Fish, fish, you got your wish!') return True else:", "else: self.Hand.update({wish: opp_cards}) return True else: print('Nope! Go fish.') sleep(2) got_wish = self.fish(card_deck,", "def request_wish(player, card_map, card_map_values): while True: wish = input('What is your wish? ')", "sleep(2) return dealer, hands[1], hands[0] else: print('{} is the dealer. {} goes first.", "= self.fish(card_deck, wish) return got_wish def fish(self, card_deck, wish): draw_card = card_deck.pop() if", "print('{} is wishing for a {}.'.format(player.Name, card_map[most_cards])) return most_cards def print_rules(): clear_screen() with", "print('Welcome to JFL Go Fish! This is a one-player game against the Computer.')", "chosen {} for your name. Is that correct (y/n)? '.format(player_name)) if yn.upper() ==", "got_wish = self.fish(card_deck, wish) return got_wish def fish(self, card_deck, wish): draw_card = card_deck.pop()", "1 else: print('It\\'s a tie!') winner = 2 print('Good game!') return winner def", "0: print('{} is the dealer. {} goes first. Dealing the hands . .", "create_deck() dealer, players[0].Hand, players[1].Hand = deal_hand(card_deck, players) return players, dealer, card_deck, card_map def", "must wish for a card value in your hand.') else: break return value_position", "dealer == 0: current_player = 1 else: current_player = 0 while not game_over:", "the dealer. {} goes first. Dealing the hands . . .'.format(players[0].Name, players[1].Name)) sleep(2)", "= 1 else: dealer = 0 return dealer def deal_hand(deck, players, new_game=True, winner=None):", "player.HandCounts = {} player.hand_counts() player.Wishes.append(most_cards) print('{} is wishing for a {}.'.format(player.Name, card_map[most_cards])) return", "5: self.Wishes.pop(0) def print_hand(self): if self.Name.upper().endswith('S'): print('{}\\' hand: '.format(self.Name), end=' ') else: print('{}\\'s", "{} sets to {}'.format(players[1].Name, sets1, sets0)) winner = 1 else: print('It\\'s a tie!')", "from random import (shuffle, randint) from time import sleep from os import system,", "print('Fish, fish, you got your wish!') return True else: print('Booooo, you didn\\'t get", "current_player = 1 else: current_player = 0 return determine_winner(players, card_deck) def play_hand(players, current_player,", "_ = system('cls') # for mac and linux(here, os.name is 'posix') else: _", "0: dealer = 1 else: dealer = 0 return dealer def deal_hand(deck, players,", "your wish? ') if wish.upper() not in card_map.values(): print('You must wish for a", "if sets0 > sets1: print('{} wins, {} sets to {}'.format(players[0].Name, sets0, sets1)) winner", "if dealer == 0: print('{} is the dealer. {} goes first. Dealing the", "dealer, card_deck, card_map def play_again(players, winner): players[0].new_hand() players[1].new_hand() card_deck, card_map = create_deck() dealer,", "{}'.format(players[1].Name, sets1, sets0)) winner = 1 else: print('It\\'s a tie!') winner = 2", "highest_count = 0 most_cards = -1 while most_cards == -1: for denom, count", "new_game=True, winner=None): hands = [] hands.append({}) hands.append({}) for card in range(10): for hand", "break_line = '\\u274C' * 25 cards_left = len(card_deck) clear_screen() print(break_line + '\\n') players[1].print_hand()", "'K'] #suits = [hearts, diamonds, spades, clubs] suits = ['\\u2665', '\\u2666', '\\u2660', '\\u2663']", "diamonds, spades, clubs] suits = ['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for card in range(13):", "system('clear') def create_deck(): deck = [] map = {} card_denominations = ['A', '2',", "hands[1] def start_new_game(): while(True): player_name = '--help' while player_name.upper() == '--HELP': player_name =", "!= []: self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\' sets: '.format(self.Name), end=' ') else:", "= list(card_map.values()) if current_player == 0: opp_hand = players[1].Hand opp_sets = players[1].Sets else:", "= count if most_cards == -1: player.Wishes = [] player.HandCounts = {} player.hand_counts()", "1 else: current_player = 0 return determine_winner(players, card_deck) def play_hand(players, current_player, card_deck, card_map):", "card_map clear_screen() print('Welcome to JFL Go Fish! This is a one-player game against", "game_over def request_wish(player, card_map, card_map_values): while True: wish = input('What is your wish?", "Dealing the hands . . .'.format(players[0].Name, players[1].Name)) sleep(2) return dealer, hands[1], hands[0] else:", "player.Sets): most_cards = denom highest_count = count if most_cards == -1: player.Wishes =", "a one-player game against the Computer.') players, dealer, card_deck, card_map = start_new_game() play", "self.Hand.keys(): self.HandCounts.update({denom: len(self.Hand[denom])}) def check_wishes(self): if set(self.Wishes) == set(self.Hand.keys()): self.Wishes = [] elif", "a valid card value (2-10, J, Q, K, A)') else: value_position = card_map_values.index(wish.upper())", "dealer = 1 else: if winner == 0: dealer = 1 else: dealer", "for mac and linux(here, os.name is 'posix') else: _ = system('clear') def create_deck():", "False def lay_set(self): new_set = -1 for card, suits in self.Hand.items(): if len(suits)", "'J', 'Q', 'K'] #suits = [hearts, diamonds, spades, clubs] suits = ['\\u2665', '\\u2666',", "dealer = choose_dealer(new_game, winner) if dealer == 0: print('{} is the dealer. {}", "def fish(self, card_deck, wish): draw_card = card_deck.pop() if draw_card[0] in self.Hand.keys(): self.Hand[draw_card[0]].append(draw_card[1]) else:", "import system, name class Player: def __init__(self, name, computer = False): self.Name =", "players, new_game=True, winner=None): hands = [] hands.append({}) hands.append({}) for card in range(10): for", "== -1: for denom, count in player.HandCounts.items(): if (count > highest_count) and (denom", "= [hearts, diamonds, spades, clubs] suits = ['\\u2665', '\\u2666', '\\u2660', '\\u2663'] for card", "self.Hand.items(): if len(suits) == 4: new_set = card if new_set != -1: self.Hand.pop(new_set)", "'Y': print('OK, then let\\'s play!') break else: print('Oops! Try again.') players = [Player(player_name),", "= 1 else: current_player = 0 return determine_winner(players, card_deck) def play_hand(players, current_player, card_deck,", "opp_hand = players[0].Hand opp_sets = players[0].Sets display_current_status(players, card_deck) player = players[current_player] print('Your turn,", "for a valid card value (2-10, J, Q, K, A)') else: value_position =", "card_deck, card_map_keys[wish]) if card_deck == []: game_over = True else: game_over = player.lay_set()", "for your name. Is that correct (y/n)? '.format(player_name)) if yn.upper() == 'Y': print('OK,", "'.format(self.Name), end=' ') for denom in sorted(self.Hand): for suit in self.Hand[denom]: if self.Computer:", "wins, {} sets to {}'.format(players[0].Name, sets0, sets1)) winner = 0 elif sets1 >", "yn.upper() != 'Y': play = False else: dealer, card_deck, card_map = play_again(players, winner)", "in range(13): map.update({card: card_denominations[card]}) for suit in suits: deck.append([card, suit]) shuffle(deck) return deck,", "Computer.') players, dealer, card_deck, card_map = start_new_game() play = True while play: winner", "input('You\\'ve chosen {} for your name. Is that correct (y/n)? '.format(player_name)) if yn.upper()", "wish in opp_hand.keys(): print('Fish, fish, you got your wish!') opp_cards = opp_hand.pop(wish) if", "os.name is 'posix') else: _ = system('clear') def create_deck(): deck = [] map", "len(players[1].Sets) if sets0 > sets1: print('{} wins, {} sets to {}'.format(players[0].Name, sets0, sets1))", "end=' ') print() if self.Sets != []: self.print_sets() def print_sets(self): if self.Name.upper().endswith('S'): print('{}\\'", "while player_name.upper() == '--HELP': player_name = input('Please enter your name (or --help for", "in sorted(self.Sets): print(card_map[denom] + 's', end=' ') print() def cast(self, opp_hand, card_deck, wish):", "first. Dealing the hands . . .'.format(players[0].Name, players[1].Name)) sleep(2) return dealer, hands[1], hands[0]", "0: return True else: return False def clear_screen(): # for windows if name", "computer = False): self.Name = name self.Computer = computer self.new_hand() def new_hand(self): self.Hand", "= 1 else: print('It\\'s a tie!') winner = 2 print('Good game!') return winner", "sleep from os import system, name class Player: def __init__(self, name, computer =", "against the Computer.') players, dealer, card_deck, card_map = start_new_game() play = True while", "= False card_map_keys = list(card_map.keys()) card_map_values = list(card_map.values()) if current_player == 0: opp_hand", "print('{}\\' hand: '.format(self.Name), end=' ') else: print('{}\\'s hand: '.format(self.Name), end=' ') for denom", "player.lay_set() if got_wish and not game_over and not player.Computer: player.print_hand() return game_over def", "= players[0].Sets display_current_status(players, card_deck) player = players[current_player] print('Your turn, {}.'.format(player.Name)) if player.Computer: player.HandCounts" ]
[ "template.policies: policy.entity_tpl[\"targets\"] = [ node.name for node in policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove", "param.default for param in template.inputs if param.name == key ][0] except IndexError: logger.error(f\"Input", "| parsed_params: dictionary containing the input to change | path: local or remote", "raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise ValidationError(error, \"TOSCA Parser could", "lambda x: x is not None, _get_input_value, template, ) # Update nodetemplate properties", "TypeError): logger.debug(f\"Input '{key}' not given, using default\") try: return [ param.default for param", "template def get_template(path, parsed_params): \"\"\"Return a ToscaTemplate object Args: path (string): path to", "handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template def get_template(path, parsed_params): \"\"\"Return a ToscaTemplate", "node._create_properties() def _get_input_value(key, template): \"\"\"Custom get_input resolution using parsed_params\"\"\" try: return template.parsed_params[key] except", "just looks at entity_tpl, so update that # references to renamed nodes exist", "https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise ValidationError(error, \"TOSCA Parser could not parse the ADT...\")", "= list(requirement)[0] # for shorthand requirement notation, just replace the string try: requirement[key]", "the tosca-parser has trouble parsing Returns: ToscaTemplate: Parsed template object \"\"\" error =", "get_template(path, parsed_params): \"\"\"Return a ToscaTemplate object Args: path (string): path to the saved", "as e: error = f\"YAML Error\\n {e}\" except Exception as e: error =", "path to the file to parse \"\"\" errors = None if path.endswith(\".csar\"): template", "in template.inputs if param.name == key ][0] except IndexError: logger.error(f\"Input '{key}' has no", "names and refs\"\"\" # tpl and entity_tpl are not ever (I think) used", "_normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods from node references\"\"\" for requirement in requirements: key", "the key and update 'node' in the inner dictionary except AttributeError: requirement[key][\"node\"] =", "and update 'node' in the inner dictionary except AttributeError: requirement[key][\"node\"] = ( requirement[key][\"node\"].replace(\"_\",", ") if error: raise ValidationError(error, \"TOSCA Parser could not parse the ADT...\") return", "so update that # references to renamed nodes exist in targets_list, so use", "in template.nodetemplates: node._properties = node._create_properties() def _get_input_value(key, template): \"\"\"Custom get_input resolution using parsed_params\"\"\"", "node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the targets property just looks at entity_tpl, so", "in template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the targets property just", "are not ever (I think) used to pull node names # so update", "the ADT...\") return template def _find_other_inputs(template): \"\"\"Find `get_input` tags in the template, then", "template object \"\"\" error = \"\" try: template = ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True", "HINT: This might be due to a wrong type - check your imports.\"", "= f\"{e}\\n HINT: This might be due to a wrong type - check", "tpl and entity_tpl are not ever (I think) used to pull node names", "then resolve and update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda x: x is not None,", "to renamed nodes exist in targets_list, so use that for policy in template.policies:", "= node._create_properties() def _get_input_value(key, template): \"\"\"Custom get_input resolution using parsed_params\"\"\" try: return template.parsed_params[key]", "no default\") def _normalise_node_names(template): \"\"\"Remove underscores and periods from node names and refs\"\"\"", "template = handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template def get_template(path, parsed_params): \"\"\"Return", "- check your imports.\" except YAMLError as e: error = f\"YAML Error\\n {e}\"", "] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods from node references\"\"\" for requirement in", "errors = None if path.endswith(\".csar\"): template = handle_csar(path, parsed_params) else: template = handle_yaml(path,", "using default\") try: return [ param.default for param in template.inputs if param.name ==", "in policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods from node references\"\"\" for", "as e: error = f\"{e}\\n HINT: This might be due to a wrong", "tosca-parser has trouble parsing Returns: ToscaTemplate: Parsed template object \"\"\" error = \"\"", "in template.policies: policy.entity_tpl[\"targets\"] = [ node.name for node in policy.targets_list ] def _normalise_requirement_node_refs(requirements):", "param in template.inputs if param.name == key ][0] except IndexError: logger.error(f\"Input '{key}' has", "# Update nodetemplate properties for node in template.nodetemplates: node._properties = node._create_properties() def _get_input_value(key,", "`get_input` tags in the template, then resolve and update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda", "+ __name__) def set_template(path, parsed_params=None): \"\"\"Parses any ADT and returns a ToscaTemplate :params:", "list(requirement)[0] # for shorthand requirement notation, just replace the string try: requirement[key] =", "saved ADT parsed_params (dict): tosca inputs Raises: ValueError: If the tosca-parser has trouble", "not parse the ADT...\") return template def _find_other_inputs(template): \"\"\"Find `get_input` tags in the", "from node names and refs\"\"\" # tpl and entity_tpl are not ever (I", "property just looks at entity_tpl, so update that # references to renamed nodes", "any ADT and returns a ToscaTemplate :params: path, parsed_params :type: string, dictionary :return:", "import handle_csar from micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils import resolve_get_functions logger = logging.getLogger(\"micadoparser.\"", "replace the string try: requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) # otherwise", "template, ) # Update nodetemplate properties for node in template.nodetemplates: node._properties = node._create_properties()", "default\") try: return [ param.default for param in template.inputs if param.name == key", "Returns: ToscaTemplate: Parsed template object \"\"\" error = \"\" try: template = ToscaTemplate(", "except YAMLError as e: error = f\"YAML Error\\n {e}\" except Exception as e:", "YAMLError from micadoparser import validator from micadoparser.exceptions import ValidationError from micadoparser.utils.csar import handle_csar", "update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda x: x is not None, _get_input_value, template, )", "import resolve_get_functions logger = logging.getLogger(\"micadoparser.\" + __name__) def set_template(path, parsed_params=None): \"\"\"Parses any ADT", "toscaparser.tosca_template import ToscaTemplate from toscaparser.common.exception import ValidationError as TOSCAParserError from yaml.error import YAMLError", "e: error = f\"{e}\\n HINT: This might be due to a wrong type", "a_file=True ) except TOSCAParserError as e: error = [ line for line in", "template def _find_other_inputs(template): \"\"\"Find `get_input` tags in the template, then resolve and update\"\"\"", "'{key}' not given, using default\") try: return [ param.default for param in template.inputs", "the name property of the nodetemplate object for node in template.nodetemplates: node.name =", "template.inputs if param.name == key ][0] except IndexError: logger.error(f\"Input '{key}' has no default\")", "in targets_list, so use that for policy in template.policies: policy.entity_tpl[\"targets\"] = [ node.name", "<filename>micadoparser/parser.py import logging from toscaparser.tosca_template import ToscaTemplate from toscaparser.common.exception import ValidationError as TOSCAParserError", "update the name property of the nodetemplate object for node in template.nodetemplates: node.name", "as e: error = ( f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise a ticket at", "[ param.default for param in template.inputs if param.name == key ][0] except IndexError:", "template = handle_csar(path, parsed_params) else: template = handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return", "\"-\") ) # otherwise get the key and update 'node' in the inner", "entity_tpl, so update that # references to renamed nodes exist in targets_list, so", "shorthand requirement notation, just replace the string try: requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\",", "TOSCAParserError as e: error = [ line for line in e.message.splitlines() if all([line,", "# for shorthand requirement notation, just replace the string try: requirement[key] = (", "containing the input to change | path: local or remote path to the", "default\") def _normalise_node_names(template): \"\"\"Remove underscores and periods from node names and refs\"\"\" #", "except IndexError: logger.error(f\"Input '{key}' has no default\") def _normalise_node_names(template): \"\"\"Remove underscores and periods", "def get_template(path, parsed_params): \"\"\"Return a ToscaTemplate object Args: path (string): path to the", "\"get_input\", lambda x: x is not None, _get_input_value, template, ) # Update nodetemplate", "notation, just replace the string try: requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") )", "_normalise_requirement_node_refs(node._requirements) # the targets property just looks at entity_tpl, so update that #", "from micadoparser.exceptions import ValidationError from micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml import handle_yaml from", ":return: template | parsed_params: dictionary containing the input to change | path: local", "path.endswith(\".csar\"): template = handle_csar(path, parsed_params) else: template = handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template)", "path=path, parsed_params=parsed_params, a_file=True ) except TOSCAParserError as e: error = [ line for", "in the inner dictionary except AttributeError: requirement[key][\"node\"] = ( requirement[key][\"node\"].replace(\"_\", \"-\").replace(\".\", \"-\") )", "the file to parse \"\"\" errors = None if path.endswith(\".csar\"): template = handle_csar(path,", "the nodetemplate object for node in template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements)", "the string try: requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) # otherwise get", "if error: raise ValidationError(error, \"TOSCA Parser could not parse the ADT...\") return template", "If the tosca-parser has trouble parsing Returns: ToscaTemplate: Parsed template object \"\"\" error", "requirement in requirements: key = list(requirement)[0] # for shorthand requirement notation, just replace", "\"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the targets property just looks at entity_tpl, so update", "template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input '{key}' not given, using default\") try: return [", "Exception as e: error = ( f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise a ticket", "local or remote path to the file to parse \"\"\" errors = None", "to the saved ADT parsed_params (dict): tosca inputs Raises: ValueError: If the tosca-parser", "error = ( f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\" )", "Error\\n {e}\" except Exception as e: error = ( f\"Unknown Error:\\n {e}\\n\\n\" \"Please", "using parsed_params\"\"\" try: return template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input '{key}' not given, using", "or remote path to the file to parse \"\"\" errors = None if", "a ToscaTemplate object Args: path (string): path to the saved ADT parsed_params (dict):", "use that for policy in template.policies: policy.entity_tpl[\"targets\"] = [ node.name for node in", "= \"\\n\".join(error) except AttributeError as e: error = f\"{e}\\n HINT: This might be", "import ValidationError as TOSCAParserError from yaml.error import YAMLError from micadoparser import validator from", "at https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise ValidationError(error, \"TOSCA Parser could not parse the", "\"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise ValidationError(error, \"TOSCA Parser", "for param in template.inputs if param.name == key ][0] except IndexError: logger.error(f\"Input '{key}'", "not line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error) except AttributeError as e: error = f\"{e}\\n", "try: return [ param.default for param in template.inputs if param.name == key ][0]", "= logging.getLogger(\"micadoparser.\" + __name__) def set_template(path, parsed_params=None): \"\"\"Parses any ADT and returns a", "of the nodetemplate object for node in template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\")", "ADT and returns a ToscaTemplate :params: path, parsed_params :type: string, dictionary :return: template", "_find_other_inputs(template) _normalise_node_names(template) return template def get_template(path, parsed_params): \"\"\"Return a ToscaTemplate object Args: path", "x: x is not None, _get_input_value, template, ) # Update nodetemplate properties for", "as e: error = [ line for line in e.message.splitlines() if all([line, not", "given, using default\") try: return [ param.default for param in template.inputs if param.name", "string, dictionary :return: template | parsed_params: dictionary containing the input to change |", "import YAMLError from micadoparser import validator from micadoparser.exceptions import ValidationError from micadoparser.utils.csar import", "'node' in the inner dictionary except AttributeError: requirement[key][\"node\"] = ( requirement[key][\"node\"].replace(\"_\", \"-\").replace(\".\", \"-\")", "file to parse \"\"\" errors = None if path.endswith(\".csar\"): template = handle_csar(path, parsed_params)", "due to a wrong type - check your imports.\" except YAMLError as e:", "None, _get_input_value, template, ) # Update nodetemplate properties for node in template.nodetemplates: node._properties", "# references to renamed nodes exist in targets_list, so use that for policy", "import ValidationError from micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils import", "= [ line for line in e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")]) ] error", "periods from node references\"\"\" for requirement in requirements: key = list(requirement)[0] # for", "error: raise ValidationError(error, \"TOSCA Parser could not parse the ADT...\") return template def", "dictionary :return: template | parsed_params: dictionary containing the input to change | path:", "set_template(path, parsed_params=None): \"\"\"Parses any ADT and returns a ToscaTemplate :params: path, parsed_params :type:", "Parsed template object \"\"\" error = \"\" try: template = ToscaTemplate( path=path, parsed_params=parsed_params,", "if path.endswith(\".csar\"): template = handle_csar(path, parsed_params) else: template = handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template)", "_get_input_value, template, ) # Update nodetemplate properties for node in template.nodetemplates: node._properties =", "object \"\"\" error = \"\" try: template = ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True )", "object Args: path (string): path to the saved ADT parsed_params (dict): tosca inputs", "not ever (I think) used to pull node names # so update the", "try: template = ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True ) except TOSCAParserError as e: error", "error = f\"YAML Error\\n {e}\" except Exception as e: error = ( f\"Unknown", "AttributeError as e: error = f\"{e}\\n HINT: This might be due to a", "think) used to pull node names # so update the name property of", "from toscaparser.tosca_template import ToscaTemplate from toscaparser.common.exception import ValidationError as TOSCAParserError from yaml.error import", "# tpl and entity_tpl are not ever (I think) used to pull node", "looks at entity_tpl, so update that # references to renamed nodes exist in", "template.nodetemplates: node._properties = node._create_properties() def _get_input_value(key, template): \"\"\"Custom get_input resolution using parsed_params\"\"\" try:", "key and update 'node' in the inner dictionary except AttributeError: requirement[key][\"node\"] = (", "requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) # otherwise get the key and update 'node' in", "if all([line, not line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error) except AttributeError as e: error", "Raises: ValueError: If the tosca-parser has trouble parsing Returns: ToscaTemplate: Parsed template object", "try: requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) # otherwise get the key", "has trouble parsing Returns: ToscaTemplate: Parsed template object \"\"\" error = \"\" try:", "_find_other_inputs(template): \"\"\"Find `get_input` tags in the template, then resolve and update\"\"\" resolve_get_functions( template.tpl,", "for policy in template.policies: policy.entity_tpl[\"targets\"] = [ node.name for node in policy.targets_list ]", "has no default\") def _normalise_node_names(template): \"\"\"Remove underscores and periods from node names and", "Parser could not parse the ADT...\") return template def _find_other_inputs(template): \"\"\"Find `get_input` tags", "\"\"\"Parses any ADT and returns a ToscaTemplate :params: path, parsed_params :type: string, dictionary", "and update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda x: x is not None, _get_input_value, template,", "template.tpl, \"get_input\", lambda x: x is not None, _get_input_value, template, ) # Update", "YAMLError as e: error = f\"YAML Error\\n {e}\" except Exception as e: error", "underscores and periods from node references\"\"\" for requirement in requirements: key = list(requirement)[0]", "get_input resolution using parsed_params\"\"\" try: return template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input '{key}' not", "tosca inputs Raises: ValueError: If the tosca-parser has trouble parsing Returns: ToscaTemplate: Parsed", "# the targets property just looks at entity_tpl, so update that # references", "for requirement in requirements: key = list(requirement)[0] # for shorthand requirement notation, just", "nodetemplate object for node in template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) #", "f\"{e}\\n HINT: This might be due to a wrong type - check your", "try: return template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input '{key}' not given, using default\") try:", "error = [ line for line in e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")]) ]", "handle_csar(path, parsed_params) else: template = handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template def", "resolve and update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda x: x is not None, _get_input_value,", "ToscaTemplate: Parsed template object \"\"\" error = \"\" try: template = ToscaTemplate( path=path,", "resolve_get_functions( template.tpl, \"get_input\", lambda x: x is not None, _get_input_value, template, ) #", "e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error) except AttributeError as e:", "to parse \"\"\" errors = None if path.endswith(\".csar\"): template = handle_csar(path, parsed_params) else:", "node.name for node in policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods from", "type - check your imports.\" except YAMLError as e: error = f\"YAML Error\\n", "( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) # otherwise get the key and update 'node'", "ToscaTemplate object Args: path (string): path to the saved ADT parsed_params (dict): tosca", "_normalise_node_names(template) return template def get_template(path, parsed_params): \"\"\"Return a ToscaTemplate object Args: path (string):", "for node in template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the targets", "requirement notation, just replace the string try: requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\")", "from micadoparser import validator from micadoparser.exceptions import ValidationError from micadoparser.utils.csar import handle_csar from", "handle_yaml from micadoparser.utils.utils import resolve_get_functions logger = logging.getLogger(\"micadoparser.\" + __name__) def set_template(path, parsed_params=None):", "for shorthand requirement notation, just replace the string try: requirement[key] = ( requirement[key].replace(\"_\",", "the input to change | path: local or remote path to the file", "periods from node names and refs\"\"\" # tpl and entity_tpl are not ever", "string try: requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) # otherwise get the", "This might be due to a wrong type - check your imports.\" except", "in the template, then resolve and update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda x: x", "ever (I think) used to pull node names # so update the name", "parsed_params (dict): tosca inputs Raises: ValueError: If the tosca-parser has trouble parsing Returns:", "parsed_params\"\"\" try: return template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input '{key}' not given, using default\")", "e: error = [ line for line in e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")])", "ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True ) except TOSCAParserError as e: error = [ line", "not None, _get_input_value, template, ) # Update nodetemplate properties for node in template.nodetemplates:", "resolve_get_functions logger = logging.getLogger(\"micadoparser.\" + __name__) def set_template(path, parsed_params=None): \"\"\"Parses any ADT and", "to the file to parse \"\"\" errors = None if path.endswith(\".csar\"): template =", "policy in template.policies: policy.entity_tpl[\"targets\"] = [ node.name for node in policy.targets_list ] def", "\"-\").replace(\".\", \"-\") ) # otherwise get the key and update 'node' in the", "pull node names # so update the name property of the nodetemplate object", "path (string): path to the saved ADT parsed_params (dict): tosca inputs Raises: ValueError:", "# so update the name property of the nodetemplate object for node in", "used to pull node names # so update the name property of the", "parsed_params :type: string, dictionary :return: template | parsed_params: dictionary containing the input to", "ADT...\") return template def _find_other_inputs(template): \"\"\"Find `get_input` tags in the template, then resolve", "just replace the string try: requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) #", "= ( f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if", ":type: string, dictionary :return: template | parsed_params: dictionary containing the input to change", "import ToscaTemplate from toscaparser.common.exception import ValidationError as TOSCAParserError from yaml.error import YAMLError from", "that for policy in template.policies: policy.entity_tpl[\"targets\"] = [ node.name for node in policy.targets_list", "requirement[key] = ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) # otherwise get the key and", "and returns a ToscaTemplate :params: path, parsed_params :type: string, dictionary :return: template |", "# otherwise get the key and update 'node' in the inner dictionary except", "for node in template.nodetemplates: node._properties = node._create_properties() def _get_input_value(key, template): \"\"\"Custom get_input resolution", "def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods from node references\"\"\" for requirement in requirements:", "def _get_input_value(key, template): \"\"\"Custom get_input resolution using parsed_params\"\"\" try: return template.parsed_params[key] except (KeyError,", "| path: local or remote path to the file to parse \"\"\" errors", "ADT parsed_params (dict): tosca inputs Raises: ValueError: If the tosca-parser has trouble parsing", "return template def _find_other_inputs(template): \"\"\"Find `get_input` tags in the template, then resolve and", ") except TOSCAParserError as e: error = [ line for line in e.message.splitlines()", "parsed_params: dictionary containing the input to change | path: local or remote path", "node names and refs\"\"\" # tpl and entity_tpl are not ever (I think)", "ValueError: If the tosca-parser has trouble parsing Returns: ToscaTemplate: Parsed template object \"\"\"", "remote path to the file to parse \"\"\" errors = None if path.endswith(\".csar\"):", "= handle_csar(path, parsed_params) else: template = handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template", "nodetemplate properties for node in template.nodetemplates: node._properties = node._create_properties() def _get_input_value(key, template): \"\"\"Custom", "parsed_params) else: template = handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template def get_template(path,", ") # otherwise get the key and update 'node' in the inner dictionary", "Args: path (string): path to the saved ADT parsed_params (dict): tosca inputs Raises:", "if param.name == key ][0] except IndexError: logger.error(f\"Input '{key}' has no default\") def", "update that # references to renamed nodes exist in targets_list, so use that", "could not parse the ADT...\") return template def _find_other_inputs(template): \"\"\"Find `get_input` tags in", "parsed_params): \"\"\"Return a ToscaTemplate object Args: path (string): path to the saved ADT", "path to the saved ADT parsed_params (dict): tosca inputs Raises: ValueError: If the", "'{key}' has no default\") def _normalise_node_names(template): \"\"\"Remove underscores and periods from node names", "validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template def get_template(path, parsed_params): \"\"\"Return a ToscaTemplate object Args:", "_get_input_value(key, template): \"\"\"Custom get_input resolution using parsed_params\"\"\" try: return template.parsed_params[key] except (KeyError, TypeError):", "entity_tpl are not ever (I think) used to pull node names # so", "check your imports.\" except YAMLError as e: error = f\"YAML Error\\n {e}\" except", "targets property just looks at entity_tpl, so update that # references to renamed", "yaml.error import YAMLError from micadoparser import validator from micadoparser.exceptions import ValidationError from micadoparser.utils.csar", "from yaml.error import YAMLError from micadoparser import validator from micadoparser.exceptions import ValidationError from", "path: local or remote path to the file to parse \"\"\" errors =", "from micadoparser.utils.utils import resolve_get_functions logger = logging.getLogger(\"micadoparser.\" + __name__) def set_template(path, parsed_params=None): \"\"\"Parses", "from micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils import resolve_get_functions logger = logging.getLogger(\"micadoparser.\" + __name__)", "a ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise ValidationError(error, \"TOSCA Parser could not", "as TOSCAParserError from yaml.error import YAMLError from micadoparser import validator from micadoparser.exceptions import", "the template, then resolve and update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda x: x is", "error = f\"{e}\\n HINT: This might be due to a wrong type -", "names # so update the name property of the nodetemplate object for node", "\"\"\"Find `get_input` tags in the template, then resolve and update\"\"\" resolve_get_functions( template.tpl, \"get_input\",", "template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the targets property just looks", "key ][0] except IndexError: logger.error(f\"Input '{key}' has no default\") def _normalise_node_names(template): \"\"\"Remove underscores", "f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise", "a ToscaTemplate :params: path, parsed_params :type: string, dictionary :return: template | parsed_params: dictionary", "update 'node' in the inner dictionary except AttributeError: requirement[key][\"node\"] = ( requirement[key][\"node\"].replace(\"_\", \"-\").replace(\".\",", "from toscaparser.common.exception import ValidationError as TOSCAParserError from yaml.error import YAMLError from micadoparser import", "def _find_other_inputs(template): \"\"\"Find `get_input` tags in the template, then resolve and update\"\"\" resolve_get_functions(", "except (KeyError, TypeError): logger.debug(f\"Input '{key}' not given, using default\") try: return [ param.default", "references to renamed nodes exist in targets_list, so use that for policy in", "in e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error) except AttributeError as", "logger.error(f\"Input '{key}' has no default\") def _normalise_node_names(template): \"\"\"Remove underscores and periods from node", "ValidationError from micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils import resolve_get_functions", "ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise ValidationError(error, \"TOSCA Parser could not parse", "a wrong type - check your imports.\" except YAMLError as e: error =", "parsed_params=parsed_params, a_file=True ) except TOSCAParserError as e: error = [ line for line", "exist in targets_list, so use that for policy in template.policies: policy.entity_tpl[\"targets\"] = [", "node references\"\"\" for requirement in requirements: key = list(requirement)[0] # for shorthand requirement", "returns a ToscaTemplate :params: path, parsed_params :type: string, dictionary :return: template | parsed_params:", "targets_list, so use that for policy in template.policies: policy.entity_tpl[\"targets\"] = [ node.name for", "in requirements: key = list(requirement)[0] # for shorthand requirement notation, just replace the", "ValidationError as TOSCAParserError from yaml.error import YAMLError from micadoparser import validator from micadoparser.exceptions", "line for line in e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error)", "node._properties = node._create_properties() def _get_input_value(key, template): \"\"\"Custom get_input resolution using parsed_params\"\"\" try: return", "][0] except IndexError: logger.error(f\"Input '{key}' has no default\") def _normalise_node_names(template): \"\"\"Remove underscores and", "logging.getLogger(\"micadoparser.\" + __name__) def set_template(path, parsed_params=None): \"\"\"Parses any ADT and returns a ToscaTemplate", "micadoparser import validator from micadoparser.exceptions import ValidationError from micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml", "import handle_yaml from micadoparser.utils.utils import resolve_get_functions logger = logging.getLogger(\"micadoparser.\" + __name__) def set_template(path,", "and periods from node names and refs\"\"\" # tpl and entity_tpl are not", "and refs\"\"\" # tpl and entity_tpl are not ever (I think) used to", "might be due to a wrong type - check your imports.\" except YAMLError", "ValidationError(error, \"TOSCA Parser could not parse the ADT...\") return template def _find_other_inputs(template): \"\"\"Find", "def _normalise_node_names(template): \"\"\"Remove underscores and periods from node names and refs\"\"\" # tpl", "the targets property just looks at entity_tpl, so update that # references to", "requirements: key = list(requirement)[0] # for shorthand requirement notation, just replace the string", "policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods from node references\"\"\" for requirement", "template | parsed_params: dictionary containing the input to change | path: local or", "= [ node.name for node in policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and", "and entity_tpl are not ever (I think) used to pull node names #", "references\"\"\" for requirement in requirements: key = list(requirement)[0] # for shorthand requirement notation,", "= node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the targets property just looks at entity_tpl,", "renamed nodes exist in targets_list, so use that for policy in template.policies: policy.entity_tpl[\"targets\"]", "{e}\" except Exception as e: error = ( f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise", "f\"YAML Error\\n {e}\" except Exception as e: error = ( f\"Unknown Error:\\n {e}\\n\\n\"", "node in template.nodetemplates: node._properties = node._create_properties() def _get_input_value(key, template): \"\"\"Custom get_input resolution using", "inputs Raises: ValueError: If the tosca-parser has trouble parsing Returns: ToscaTemplate: Parsed template", "(KeyError, TypeError): logger.debug(f\"Input '{key}' not given, using default\") try: return [ param.default for", "= None if path.endswith(\".csar\"): template = handle_csar(path, parsed_params) else: template = handle_yaml(path, parsed_params)", "get the key and update 'node' in the inner dictionary except AttributeError: requirement[key][\"node\"]", "handle_csar from micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils import resolve_get_functions logger = logging.getLogger(\"micadoparser.\" +", "\"\"\" error = \"\" try: template = ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True ) except", "from node references\"\"\" for requirement in requirements: key = list(requirement)[0] # for shorthand", "node in template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the targets property", "__name__) def set_template(path, parsed_params=None): \"\"\"Parses any ADT and returns a ToscaTemplate :params: path,", "policy.entity_tpl[\"targets\"] = [ node.name for node in policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores", "micadoparser.utils.utils import resolve_get_functions logger = logging.getLogger(\"micadoparser.\" + __name__) def set_template(path, parsed_params=None): \"\"\"Parses any", "error = \"\" try: template = ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True ) except TOSCAParserError", "to a wrong type - check your imports.\" except YAMLError as e: error", "line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error) except AttributeError as e: error = f\"{e}\\n HINT:", "properties for node in template.nodetemplates: node._properties = node._create_properties() def _get_input_value(key, template): \"\"\"Custom get_input", "change | path: local or remote path to the file to parse \"\"\"", "error = \"\\n\".join(error) except AttributeError as e: error = f\"{e}\\n HINT: This might", "import logging from toscaparser.tosca_template import ToscaTemplate from toscaparser.common.exception import ValidationError as TOSCAParserError from", "template = ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True ) except TOSCAParserError as e: error =", "for line in e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error) except", "node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the targets property just looks at", "except AttributeError as e: error = f\"{e}\\n HINT: This might be due to", "== key ][0] except IndexError: logger.error(f\"Input '{key}' has no default\") def _normalise_node_names(template): \"\"\"Remove", "not given, using default\") try: return [ param.default for param in template.inputs if", "line in e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error) except AttributeError", "object for node in template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\", \"-\") _normalise_requirement_node_refs(node._requirements) # the", "to pull node names # so update the name property of the nodetemplate", "\"\" try: template = ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True ) except TOSCAParserError as e:", "trouble parsing Returns: ToscaTemplate: Parsed template object \"\"\" error = \"\" try: template", "resolution using parsed_params\"\"\" try: return template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input '{key}' not given,", "tags in the template, then resolve and update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda x:", "return [ param.default for param in template.inputs if param.name == key ][0] except", "Update nodetemplate properties for node in template.nodetemplates: node._properties = node._create_properties() def _get_input_value(key, template):", "for node in policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods from node", "so use that for policy in template.policies: policy.entity_tpl[\"targets\"] = [ node.name for node", "\"\"\"Remove underscores and periods from node references\"\"\" for requirement in requirements: key =", "IndexError: logger.error(f\"Input '{key}' has no default\") def _normalise_node_names(template): \"\"\"Remove underscores and periods from", "Error:\\n {e}\\n\\n\" \"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise ValidationError(error,", "TOSCAParserError from yaml.error import YAMLError from micadoparser import validator from micadoparser.exceptions import ValidationError", "return template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input '{key}' not given, using default\") try: return", "] error = \"\\n\".join(error) except AttributeError as e: error = f\"{e}\\n HINT: This", "input to change | path: local or remote path to the file to", "parsing Returns: ToscaTemplate: Parsed template object \"\"\" error = \"\" try: template =", "validator from micadoparser.exceptions import ValidationError from micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml import handle_yaml", "except TOSCAParserError as e: error = [ line for line in e.message.splitlines() if", "node names # so update the name property of the nodetemplate object for", "\"-\") _normalise_requirement_node_refs(node._requirements) # the targets property just looks at entity_tpl, so update that", "else: template = handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template def get_template(path, parsed_params):", "\"\\n\".join(error) except AttributeError as e: error = f\"{e}\\n HINT: This might be due", "(string): path to the saved ADT parsed_params (dict): tosca inputs Raises: ValueError: If", ") # Update nodetemplate properties for node in template.nodetemplates: node._properties = node._create_properties() def", "property of the nodetemplate object for node in template.nodetemplates: node.name = node.name.replace(\"_\", \"-\").replace(\".\",", "refs\"\"\" # tpl and entity_tpl are not ever (I think) used to pull", "nodes exist in targets_list, so use that for policy in template.policies: policy.entity_tpl[\"targets\"] =", "key = list(requirement)[0] # for shorthand requirement notation, just replace the string try:", "parsed_params=None): \"\"\"Parses any ADT and returns a ToscaTemplate :params: path, parsed_params :type: string,", "logging from toscaparser.tosca_template import ToscaTemplate from toscaparser.common.exception import ValidationError as TOSCAParserError from yaml.error", "and periods from node references\"\"\" for requirement in requirements: key = list(requirement)[0] #", "( f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if error:", "return template def get_template(path, parsed_params): \"\"\"Return a ToscaTemplate object Args: path (string): path", "= ( requirement[key].replace(\"_\", \"-\").replace(\".\", \"-\") ) # otherwise get the key and update", "parse \"\"\" errors = None if path.endswith(\".csar\"): template = handle_csar(path, parsed_params) else: template", "wrong type - check your imports.\" except YAMLError as e: error = f\"YAML", "is not None, _get_input_value, template, ) # Update nodetemplate properties for node in", "logger.debug(f\"Input '{key}' not given, using default\") try: return [ param.default for param in", "ToscaTemplate from toscaparser.common.exception import ValidationError as TOSCAParserError from yaml.error import YAMLError from micadoparser", "so update the name property of the nodetemplate object for node in template.nodetemplates:", "toscaparser.common.exception import ValidationError as TOSCAParserError from yaml.error import YAMLError from micadoparser import validator", "except Exception as e: error = ( f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise a", "ToscaTemplate :params: path, parsed_params :type: string, dictionary :return: template | parsed_params: dictionary containing", "that # references to renamed nodes exist in targets_list, so use that for", "dictionary containing the input to change | path: local or remote path to", "def set_template(path, parsed_params=None): \"\"\"Parses any ADT and returns a ToscaTemplate :params: path, parsed_params", "import validator from micadoparser.exceptions import ValidationError from micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml import", "your imports.\" except YAMLError as e: error = f\"YAML Error\\n {e}\" except Exception", "template, then resolve and update\"\"\" resolve_get_functions( template.tpl, \"get_input\", lambda x: x is not", "(dict): tosca inputs Raises: ValueError: If the tosca-parser has trouble parsing Returns: ToscaTemplate:", "micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils import resolve_get_functions logger = logging.getLogger(\"micadoparser.\" + __name__) def", "= handle_yaml(path, parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template def get_template(path, parsed_params): \"\"\"Return a", ":params: path, parsed_params :type: string, dictionary :return: template | parsed_params: dictionary containing the", "logger = logging.getLogger(\"micadoparser.\" + __name__) def set_template(path, parsed_params=None): \"\"\"Parses any ADT and returns", "underscores and periods from node names and refs\"\"\" # tpl and entity_tpl are", "micadoparser.exceptions import ValidationError from micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils", "parse the ADT...\") return template def _find_other_inputs(template): \"\"\"Find `get_input` tags in the template,", "otherwise get the key and update 'node' in the inner dictionary except AttributeError:", "e: error = ( f\"Unknown Error:\\n {e}\\n\\n\" \"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\"", "template): \"\"\"Custom get_input resolution using parsed_params\"\"\" try: return template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input", "to change | path: local or remote path to the file to parse", "= \"\" try: template = ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True ) except TOSCAParserError as", "at entity_tpl, so update that # references to renamed nodes exist in targets_list,", "the saved ADT parsed_params (dict): tosca inputs Raises: ValueError: If the tosca-parser has", "raise ValidationError(error, \"TOSCA Parser could not parse the ADT...\") return template def _find_other_inputs(template):", "e: error = f\"YAML Error\\n {e}\" except Exception as e: error = (", "parsed_params) validator.validation(template) _find_other_inputs(template) _normalise_node_names(template) return template def get_template(path, parsed_params): \"\"\"Return a ToscaTemplate object", "= ToscaTemplate( path=path, parsed_params=parsed_params, a_file=True ) except TOSCAParserError as e: error = [", "\"\"\"Remove underscores and periods from node names and refs\"\"\" # tpl and entity_tpl", "imports.\" except YAMLError as e: error = f\"YAML Error\\n {e}\" except Exception as", "_normalise_node_names(template): \"\"\"Remove underscores and periods from node names and refs\"\"\" # tpl and", "param.name == key ][0] except IndexError: logger.error(f\"Input '{key}' has no default\") def _normalise_node_names(template):", "[ line for line in e.message.splitlines() if all([line, not line.startswith(\"\\t\\t\")]) ] error =", "= f\"YAML Error\\n {e}\" except Exception as e: error = ( f\"Unknown Error:\\n", "\"\"\" errors = None if path.endswith(\".csar\"): template = handle_csar(path, parsed_params) else: template =", "(I think) used to pull node names # so update the name property", "x is not None, _get_input_value, template, ) # Update nodetemplate properties for node", "name property of the nodetemplate object for node in template.nodetemplates: node.name = node.name.replace(\"_\",", "path, parsed_params :type: string, dictionary :return: template | parsed_params: dictionary containing the input", "{e}\\n\\n\" \"Please raise a ticket at https://github.com/micado-scale/ansible-micado/issues.\" ) if error: raise ValidationError(error, \"TOSCA", "from micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils import resolve_get_functions logger", "\"TOSCA Parser could not parse the ADT...\") return template def _find_other_inputs(template): \"\"\"Find `get_input`", "\"\"\"Custom get_input resolution using parsed_params\"\"\" try: return template.parsed_params[key] except (KeyError, TypeError): logger.debug(f\"Input '{key}'", "node in policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods from node references\"\"\"", "be due to a wrong type - check your imports.\" except YAMLError as", "[ node.name for node in policy.targets_list ] def _normalise_requirement_node_refs(requirements): \"\"\"Remove underscores and periods", "\"\"\"Return a ToscaTemplate object Args: path (string): path to the saved ADT parsed_params", "all([line, not line.startswith(\"\\t\\t\")]) ] error = \"\\n\".join(error) except AttributeError as e: error =", "micadoparser.utils.csar import handle_csar from micadoparser.utils.yaml import handle_yaml from micadoparser.utils.utils import resolve_get_functions logger =", "None if path.endswith(\".csar\"): template = handle_csar(path, parsed_params) else: template = handle_yaml(path, parsed_params) validator.validation(template)" ]
[ "id = Column(Integer, primary_key = True) discord_id = Column(String, nullable = False) score", "class to keep track of community giveaways class Giveaway(Base): __tablename__ = 'giveaway' id", "= False) image_url = Column(String, nullable = True) position = Column(Integer, nullable =", "class EventMessage(Base): __tablename__ = 'message' id = Column(Integer, primary_key = True) token =", "'message' id = Column(Integer, primary_key = True) token = Column(String, nullable = False)", "Base = declarative_base() # Abuser class to insert people abusing the bugreporting feature", "Table column definitions id = Column(Integer, primary_key = True) discord_id = Column(String, nullable", "= True) position = Column(Integer, nullable = False) # Giveaway class to keep", "sqlalchemy import create_engine Base = declarative_base() # Abuser class to insert people abusing", "definitions id = Column(Integer, primary_key = True) discord_id = Column(String, nullable = False)", "engine = create_engine('sqlite:///database/database.sqlite') # Handles the creation of tables (if none exist etc.)", "column definitions id = Column(Integer, primary_key = True) discord_id = Column(String, nullable =", "Entry(Base): __tablename__ = 'entries' # Table column definitions id = Column(Integer, primary_key =", "class Giveaway(Base): __tablename__ = 'giveaway' id = Column(Integer, primary_key = True) discord_id =", "Giveaway(Base): __tablename__ = 'giveaway' id = Column(Integer, primary_key = True) discord_id = Column(String,", "= False) # Entry class for voice-related giveaway entries class Entry(Base): __tablename__ =", "= True) discord_id = Column(String, nullable = False) # Entry class for voice-related", "track of community giveaways class Giveaway(Base): __tablename__ = 'giveaway' id = Column(Integer, primary_key", "discord_id = Column(String, nullable = False) # Create the engine to the sqlite", "nullable = False) # Create the engine to the sqlite database engine =", "id = Column(Integer, primary_key = True) discord_id = Column(String, nullable = False) #", "Entry class for voice-related giveaway entries class Entry(Base): __tablename__ = 'entries' # Table", "= False) content = Column(String, nullable = False) image_url = Column(String, nullable =", "to insert people abusing the bugreporting feature class Abuser(Base): __tablename__ = 'abusers' id", "EventMessage(Base): __tablename__ = 'message' id = Column(Integer, primary_key = True) token = Column(String,", "image_url = Column(String, nullable = True) position = Column(Integer, nullable = False) #", "= True) token = Column(String, nullable = False) content = Column(String, nullable =", "'entries' # Table column definitions id = Column(Integer, primary_key = True) discord_id =", "declarative_base from sqlalchemy import create_engine Base = declarative_base() # Abuser class to insert", "Column(String, nullable = False) image_url = Column(String, nullable = True) position = Column(Integer,", "from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine Base = declarative_base() # Abuser", "content = Column(String, nullable = False) image_url = Column(String, nullable = True) position", "class for voice-related giveaway entries class Entry(Base): __tablename__ = 'entries' # Table column", "class Entry(Base): __tablename__ = 'entries' # Table column definitions id = Column(Integer, primary_key", "Column, ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine", "False) # Entry class for voice-related giveaway entries class Entry(Base): __tablename__ = 'entries'", "giveaways class Giveaway(Base): __tablename__ = 'giveaway' id = Column(Integer, primary_key = True) discord_id", "primary_key = True) discord_id = Column(String, nullable = False) # Create the engine", "False) score = Column(Integer, nullable = False) # EventMessage class for stuff_happening messages", "community giveaways class Giveaway(Base): __tablename__ = 'giveaway' id = Column(Integer, primary_key = True)", "from sqlalchemy import Column, ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base from", "voice-related giveaway entries class Entry(Base): __tablename__ = 'entries' # Table column definitions id", "id = Column(Integer, primary_key = True) token = Column(String, nullable = False) content", "nullable = False) content = Column(String, nullable = False) image_url = Column(String, nullable", "keep track of community giveaways class Giveaway(Base): __tablename__ = 'giveaway' id = Column(Integer,", "Column(String, nullable = False) # Create the engine to the sqlite database engine", "of community giveaways class Giveaway(Base): __tablename__ = 'giveaway' id = Column(Integer, primary_key =", "import Column, ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import", "Column(Integer, primary_key = True) discord_id = Column(String, nullable = False) # Create the", "messages class EventMessage(Base): __tablename__ = 'message' id = Column(Integer, primary_key = True) token", "people abusing the bugreporting feature class Abuser(Base): __tablename__ = 'abusers' id = Column(Integer,", "from sqlalchemy import create_engine Base = declarative_base() # Abuser class to insert people", "= 'message' id = Column(Integer, primary_key = True) token = Column(String, nullable =", "position = Column(Integer, nullable = False) # Giveaway class to keep track of", "# Abuser class to insert people abusing the bugreporting feature class Abuser(Base): __tablename__", "import os import sys from sqlalchemy import Column, ForeignKey, Integer, String, Boolean from", "for voice-related giveaway entries class Entry(Base): __tablename__ = 'entries' # Table column definitions", "True) discord_id = Column(String, nullable = False) # Create the engine to the", "= Column(Integer, nullable = False) # Giveaway class to keep track of community", "= Column(String, nullable = False) score = Column(Integer, nullable = False) # EventMessage", "primary_key = True) discord_id = Column(String, nullable = False) score = Column(Integer, nullable", "database engine = create_engine('sqlite:///database/database.sqlite') # Handles the creation of tables (if none exist", "EventMessage class for stuff_happening messages class EventMessage(Base): __tablename__ = 'message' id = Column(Integer,", "Create the engine to the sqlite database engine = create_engine('sqlite:///database/database.sqlite') # Handles the", "giveaway entries class Entry(Base): __tablename__ = 'entries' # Table column definitions id =", "class to insert people abusing the bugreporting feature class Abuser(Base): __tablename__ = 'abusers'", "= 'entries' # Table column definitions id = Column(Integer, primary_key = True) discord_id", "primary_key = True) discord_id = Column(String, nullable = False) # Entry class for", "to keep track of community giveaways class Giveaway(Base): __tablename__ = 'giveaway' id =", "False) # Create the engine to the sqlite database engine = create_engine('sqlite:///database/database.sqlite') #", "nullable = True) position = Column(Integer, nullable = False) # Giveaway class to", "sqlite database engine = create_engine('sqlite:///database/database.sqlite') # Handles the creation of tables (if none", "= True) discord_id = Column(String, nullable = False) # Create the engine to", "declarative_base() # Abuser class to insert people abusing the bugreporting feature class Abuser(Base):", "String, Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine Base = declarative_base()", "Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine Base =", "Column(String, nullable = True) position = Column(Integer, nullable = False) # Giveaway class", "import declarative_base from sqlalchemy import create_engine Base = declarative_base() # Abuser class to", "entries class Entry(Base): __tablename__ = 'entries' # Table column definitions id = Column(Integer,", "Column(Integer, primary_key = True) token = Column(String, nullable = False) content = Column(String,", "= Column(Integer, nullable = False) # EventMessage class for stuff_happening messages class EventMessage(Base):", "= Column(String, nullable = False) image_url = Column(String, nullable = True) position =", "<reponame>Anve94/DiscordBot-public import os import sys from sqlalchemy import Column, ForeignKey, Integer, String, Boolean", "= Column(Integer, primary_key = True) discord_id = Column(String, nullable = False) # Entry", "Column(Integer, nullable = False) # Giveaway class to keep track of community giveaways", "Giveaway class to keep track of community giveaways class Giveaway(Base): __tablename__ = 'giveaway'", "True) token = Column(String, nullable = False) content = Column(String, nullable = False)", "# Table column definitions id = Column(Integer, primary_key = True) discord_id = Column(String,", "= Column(Integer, primary_key = True) discord_id = Column(String, nullable = False) # Create", "for stuff_happening messages class EventMessage(Base): __tablename__ = 'message' id = Column(Integer, primary_key =", "= False) score = Column(Integer, nullable = False) # EventMessage class for stuff_happening", "import create_engine Base = declarative_base() # Abuser class to insert people abusing the", "# Entry class for voice-related giveaway entries class Entry(Base): __tablename__ = 'entries' #", "'giveaway' id = Column(Integer, primary_key = True) discord_id = Column(String, nullable = False)", "# Create the engine to the sqlite database engine = create_engine('sqlite:///database/database.sqlite') # Handles", "sqlalchemy import Column, ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy", "= 'giveaway' id = Column(Integer, primary_key = True) discord_id = Column(String, nullable =", "'abusers' id = Column(Integer, primary_key = True) discord_id = Column(String, nullable = False)", "Column(Integer, nullable = False) # EventMessage class for stuff_happening messages class EventMessage(Base): __tablename__", "False) # EventMessage class for stuff_happening messages class EventMessage(Base): __tablename__ = 'message' id", "discord_id = Column(String, nullable = False) # Entry class for voice-related giveaway entries", "nullable = False) # Entry class for voice-related giveaway entries class Entry(Base): __tablename__", "score = Column(Integer, nullable = False) # EventMessage class for stuff_happening messages class", "__tablename__ = 'abusers' id = Column(Integer, primary_key = True) discord_id = Column(String, nullable", "= Column(Integer, primary_key = True) token = Column(String, nullable = False) content =", "False) image_url = Column(String, nullable = True) position = Column(Integer, nullable = False)", "to the sqlite database engine = create_engine('sqlite:///database/database.sqlite') # Handles the creation of tables", "nullable = False) # Giveaway class to keep track of community giveaways class", "nullable = False) # EventMessage class for stuff_happening messages class EventMessage(Base): __tablename__ =", "= 'abusers' id = Column(Integer, primary_key = True) discord_id = Column(String, nullable =", "the engine to the sqlite database engine = create_engine('sqlite:///database/database.sqlite') # Handles the creation", "bugreporting feature class Abuser(Base): __tablename__ = 'abusers' id = Column(Integer, primary_key = True)", "Column(String, nullable = False) score = Column(Integer, nullable = False) # EventMessage class", "Column(Integer, primary_key = True) discord_id = Column(String, nullable = False) # Entry class", "True) discord_id = Column(String, nullable = False) # Entry class for voice-related giveaway", "= Column(String, nullable = False) # Create the engine to the sqlite database", "the sqlite database engine = create_engine('sqlite:///database/database.sqlite') # Handles the creation of tables (if", "= True) discord_id = Column(String, nullable = False) score = Column(Integer, nullable =", "ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine Base", "__tablename__ = 'giveaway' id = Column(Integer, primary_key = True) discord_id = Column(String, nullable", "discord_id = Column(String, nullable = False) score = Column(Integer, nullable = False) #", "the bugreporting feature class Abuser(Base): __tablename__ = 'abusers' id = Column(Integer, primary_key =", "= Column(Integer, primary_key = True) discord_id = Column(String, nullable = False) score =", "Column(Integer, primary_key = True) discord_id = Column(String, nullable = False) score = Column(Integer,", "= Column(String, nullable = False) # Entry class for voice-related giveaway entries class", "# EventMessage class for stuff_happening messages class EventMessage(Base): __tablename__ = 'message' id =", "Abuser(Base): __tablename__ = 'abusers' id = Column(Integer, primary_key = True) discord_id = Column(String,", "Boolean from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine Base = declarative_base() #", "sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine Base = declarative_base() # Abuser class", "create_engine Base = declarative_base() # Abuser class to insert people abusing the bugreporting", "sys from sqlalchemy import Column, ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative import declarative_base", "True) position = Column(Integer, nullable = False) # Giveaway class to keep track", "feature class Abuser(Base): __tablename__ = 'abusers' id = Column(Integer, primary_key = True) discord_id", "import sys from sqlalchemy import Column, ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative import", "insert people abusing the bugreporting feature class Abuser(Base): __tablename__ = 'abusers' id =", "True) discord_id = Column(String, nullable = False) score = Column(Integer, nullable = False)", "= create_engine('sqlite:///database/database.sqlite') # Handles the creation of tables (if none exist etc.) Base.metadata.create_all(engine)", "primary_key = True) token = Column(String, nullable = False) content = Column(String, nullable", "__tablename__ = 'message' id = Column(Integer, primary_key = True) token = Column(String, nullable", "= Column(String, nullable = False) content = Column(String, nullable = False) image_url =", "Abuser class to insert people abusing the bugreporting feature class Abuser(Base): __tablename__ =", "os import sys from sqlalchemy import Column, ForeignKey, Integer, String, Boolean from sqlalchemy.ext.declarative", "class for stuff_happening messages class EventMessage(Base): __tablename__ = 'message' id = Column(Integer, primary_key", "= False) # Giveaway class to keep track of community giveaways class Giveaway(Base):", "# Giveaway class to keep track of community giveaways class Giveaway(Base): __tablename__ =", "Column(String, nullable = False) content = Column(String, nullable = False) image_url = Column(String,", "nullable = False) image_url = Column(String, nullable = True) position = Column(Integer, nullable", "token = Column(String, nullable = False) content = Column(String, nullable = False) image_url", "nullable = False) score = Column(Integer, nullable = False) # EventMessage class for", "False) content = Column(String, nullable = False) image_url = Column(String, nullable = True)", "class Abuser(Base): __tablename__ = 'abusers' id = Column(Integer, primary_key = True) discord_id =", "abusing the bugreporting feature class Abuser(Base): __tablename__ = 'abusers' id = Column(Integer, primary_key", "__tablename__ = 'entries' # Table column definitions id = Column(Integer, primary_key = True)", "engine to the sqlite database engine = create_engine('sqlite:///database/database.sqlite') # Handles the creation of", "= False) # Create the engine to the sqlite database engine = create_engine('sqlite:///database/database.sqlite')", "= False) # EventMessage class for stuff_happening messages class EventMessage(Base): __tablename__ = 'message'", "= declarative_base() # Abuser class to insert people abusing the bugreporting feature class", "stuff_happening messages class EventMessage(Base): __tablename__ = 'message' id = Column(Integer, primary_key = True)", "= Column(String, nullable = True) position = Column(Integer, nullable = False) # Giveaway", "False) # Giveaway class to keep track of community giveaways class Giveaway(Base): __tablename__", "Column(String, nullable = False) # Entry class for voice-related giveaway entries class Entry(Base):" ]
[ "dest = self.ssyms.AddSymbol(dest) while src >= self.NumStates(): self.AddState() while dest >= self.NumStates(): self.AddState()", "infile != None: self.read(infile) def read(self, infile): \"\"\" Read probability definition from a", "lm, n, sidtab): \"\"\" Add states and arcs for all N-grams in the", "is a non-event if m == 0: # The destination state will be", "the classes in a Sphinx probability definition file. This transducer maps words to", "== None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms == None: ssyms = openfst.SymbolTable(\"states\")", "openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st = o.Start() ll =", "class language models. \"\"\" def __init__(self, infile=None): self.classes = {} if infile !=", "prev)) # Use a single symbol for end-of-sentence if w == '</s>': w", "w(1,M): # Create a state q(1,M) # Create an arc from state q(1,M-1)", "label, label, 0, st) for c in probdef.classes: clabel = symtab.AddSymbol(c) for word,", "table of lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst =", "tempfile import openfst import sphinxbase import subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST class which", "== \"END\" and parts[1] == classname: inclass = None else: prob = 1.0", "sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if phi != -1: opts", "class in this probability definition. \"\"\" self.classes[name][word] = prob def write(self, outfile): \"\"\"", "probability definition to a file. \"\"\" if not isinstance(outfile, file): outfile = file(outfile)", "= \"$Revision $\" import sys import os import tempfile import openfst import sphinxbase", "# o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() / math.log(10) ll -= a.weight.Value() st = a.nextstate", "from a class-based language model. By default this returns the lazy composition of", "arc-sort the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class SphinxProbdef(object): \"\"\" Probability definition", "prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev)) # Use a single symbol", "self.AddState() while dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest) def", "mg.words[0] == '</s>': continue # skip >1-grams starting with </s> if m ==", "symtab.Find(ng.words[n-1]) if wsym == -1: # OOV continue if ng.words[n-1] == '<s>': #", "# backoff state elif tuple(mg.words[1:]) in sidtab: bo_state = sidtab[tuple(mg.words[1:])] else: continue #", "hope) for m in range(lm.get_size() - 1): add_mgram_states(fst, symtab, lm, m, sidtab, bo_label)", "\"\"\" Probability definition file used for Sphinx class language models. \"\"\" def __init__(self,", "a.nextstate return ll def lm_eval(lm, sent): sent = [x for x in sent.split()", "to its input to obtain an equivalent non-class-based model. \"\"\" if not isinstance(probdef,", "== 2 \\ and parts[0] == \"END\" and parts[1] == classname: inclass =", "sidtab: dest = sidtab[('</s>',)] else: dest = fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest", "to a file. \"\"\" if not isinstance(outfile, file): outfile = file(outfile) for c", "the suffix M-1-gram # Note taht if mg.log_bowt == 0 it's particularly important", "write(self, outfile): \"\"\" Write out probability definition to a file. \"\"\" if not", "wsym in outsym: if wsym == 0: continue # Use a single symbol", "and arc-sort the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class SphinxProbdef(object): \"\"\" Probability", "one final state is allowed final = True newstate = False if ('</s>',)", "= self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self, isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms)", "m == 0: src = 0 # 1-grams start in backoff state elif", "== '__main__': lmf, fstf = sys.argv[1:] lm = sphinxbase.NGramModel(lmf) fst = build_lmfst(lm) fst.Write(fstf)", "outfile.write(\"LMCLASS %s\\n\" % c) for word, prob in self.classes[c]: outfile.write(\"%s %g\\n\" % (word,", "osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms == None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms", "fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0, nxt) fst.SetFinal(nxt, 0) dfst =", "to q(2,N) with weight P(w(1,N)) # Table holding M-gram to state mappings sidtab", "in symtab: if label == openfst.epsilon: continue fst.AddArc(st, label, label, 0, st) for", "__version__ = \"$Revision $\" import sys import os import tempfile import openfst import", "= txt.split() for c in txt: if omitstart and c == '<s>': continue", ">= self.NumStates(): self.AddState() while dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym, weight,", "wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm goes like this: # #", "probdef = SphinxProbdef(probdef) fst = openfst.StdVectorFst() if isyms: symtab = isyms else: symtab", "= os.system(\"fstdraw %s '%s' | dot -Tpdf > '%s'\" % (acceptor, fstfile, outfile))", "= fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start for c in", "if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState()", "dest #print \"Final state\", dest #print \"Entered state ID mapping (</s>,) =>\", dest", "dest #print \"Entered state ID mapping (</s>,) =>\", dest else: final = False", "= self.isyms.AddSymbol(isym) if not isinstance(osym, int): osym = self.osyms.AddSymbol(osym) if not isinstance(src, int):", "with the input, or pre-composed with the language model. In the latter case", "\"\"\" Convert a text string to an FST. \"\"\" fst = fstclass() start", "means it has an OOV else: src = sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>':", "symtab, lm, n, sidtab): \"\"\" Add states and arcs for all N-grams in", "isym, 0, 0, nxt) prev = nxt nxt = fst.AddState() osym = osyms.AddSymbol(s)", "goes like this: # # Create a backoff state # For M in", "isym = isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0, nxt) prev = nxt nxt =", "= ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src, isym, osym, weight, dest): if", "__name__ == '__main__': lmf, fstf = sys.argv[1:] lm = sphinxbase.NGramModel(lmf) fst = build_lmfst(lm)", "self.read(infile) def read(self, infile): \"\"\" Read probability definition from a file. \"\"\" if", "= fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for word, label in symtab: if label ==", "Convert a text string to an FST. \"\"\" fst = fstclass() start =", "%s\\n\" % c) for word, prob in self.classes[c]: outfile.write(\"%s %g\\n\" % (word, prob))", "== None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms == None: osyms = openfst.SymbolTable(\"outputs\")", "bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab) # Connect and arc-sort the resulting FST", "0, 0, next)) prev = next # And an epsilon arc to the", "0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw an", "ssyms == None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState())", "wsym == -1: # OOV continue if ng.words[n-1] == '<s>': # non-event continue", "in the language model, where M<N. \"\"\" for mg in lm.mgrams(m): wsym =", "= fstclass() start = fst.AddState() fst.SetStart(start) if isyms: symtab = isyms else: symtab", "arc to the first state of this word prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0,", "(word, prob)) outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize probabilities. \"\"\"", "is). \"\"\" for ng in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if wsym == -1:", "% (acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True):", "and output symbols as required. This is meant to behave somewhat like the", "import math def lmfst_eval(lmfst, sent): sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\")", "= sidtab[('</s>',)] else: dest = fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest #print \"Final", "\"\"\" Build an FST from the classes in a Sphinx probability definition file.", "= fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt", "VectorFst from it and project it to its input. \"\"\" lmfst = build_lmfst(lm,", "of words, or a string of whitespace-separated tokens, to a sentence FST. \"\"\"", "os.system(\"fstdraw %s '%s' | dot -Tpdf > '%s'\" % (acceptor, fstfile, outfile)) os.unlink(fstfile)", "Create a backoff arc to the suffix M-1-gram # Note taht if mg.log_bowt", "= symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm goes like this: # # Create", "# Create a backoff state # For M in 1 to N-1: #", "strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary lookup FST for a set of strings.", "a set of strings. \"\"\" fst = fstclass() isyms = openfst.SymbolTable(\"chars\") osyms =", "$\" import sys import os import tempfile import openfst import sphinxbase import subprocess", "= None for spam in infile: spam = spam.strip() if spam.startswith('#') or spam.startswith(';'):", "only one final state is allowed final = True newstate = False if", "an FST as a PDF using fstdraw and dot. \"\"\" tempdir = tempfile.mkdtemp()", "weight, dest): if not isinstance(isym, int): isym = self.isyms.AddSymbol(isym) if not isinstance(osym, int):", "Add a new state to the mapping if needed sidtab[tuple(mg.words)] = dest #print", "w, \"not found\" continue src = sidtab[tuple(ng.words[:n-1])] # Find longest suffix N-gram that", "P(w(1,M)) # Create an arc from state q(1,M) to q(2,M) with weight bowt(w(1,M-1))", "(</s>,) =>\", dest else: final = False newstate = True dest = fst.AddState()", "= openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms == None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms", "fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary lookup FST", "c) outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize probabilities. \"\"\" for c in self.classes: t", "= openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi: phi = symtab.AddSymbol(\"&phi;\")", "dest = fst.AddState() if mg.words[m] == '<s>': # <s> is a non-event if", "state fst.SetStart(dest) #print \"Initial state\", dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print", "(acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\"", "ending in OOV if m > 0 and mg.words[0] == '</s>': continue #", "from a file. \"\"\" if not isinstance(infile, file): infile = file(infile) inclass =", "from state q(1,N-1) to q(2,N) with weight P(w(1,N)) # Table holding M-gram to", "not isinstance(osym, int): osym = self.osyms.AddSymbol(osym) if not isinstance(src, int): src = self.ssyms.AddSymbol(src)", "= insym.Find(c) next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next)) prev = next", "#print \"Initial state\", dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print \"Added %d-gram", "language model, where N=N (the order of the model, that is). \"\"\" for", "isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self,", "SphinxProbdef(object): \"\"\" Probability definition file used for Sphinx class language models. \"\"\" def", "fst class SphinxProbdef(object): \"\"\" Probability definition file used for Sphinx class language models.", "\"Entered state ID mapping\", tuple(mg.words), \"=>\", dest if not final: # Create a", "if mg.words[m] == '<s>': # <s> is a non-event if m == 0:", "else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print \"Added %d-gram arc %d => %d", "%s\\n\" % c) outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize probabilities. \"\"\" for c in", "file. \"\"\" if not isinstance(outfile, file): outfile = file(outfile) for c in self.classes:", "fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build an FST from a", "This is meant to behave somewhat like the Dot language. \"\"\" def __init__(self,", "meant to behave somewhat like the Dot language. \"\"\" def __init__(self, isyms=None, osyms=None,", "Create an arc from state q(1,N-1) to q(2,N) with weight P(w(1,N)) # Table", "definition file used for Sphinx class language models. \"\"\" def __init__(self, infile=None): self.classes", "sym = isyms.Find(c) if sym == -1: #print \"Warning, unknown word\", c continue", "to state mappings sidtab = {} fst.AddState() # guaranteed to be zero (we", "= fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest #print \"Final state\", dest #print \"Entered", "M<N. \"\"\" for mg in lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if wsym == -1:", "isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms == None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if", "symtab, lm, lm.get_size(), sidtab) # Connect and arc-sort the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst)", "The destination state will be the initial state fst.SetStart(dest) #print \"Initial state\", dest", "= symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm,", "= fst.AddState() fst.SetStart(start) if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\")", "not in sidtab: spos += 1 if spos == n: raise RuntimeError, \"Unable", "phi else: bo_label = epsilon for ug in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab)", "'<s>': continue prob = lm.prob(sent[i::-1]) #print sent[i::-1], prob / math.log(10), bo ll +=", "== -1: # OOV continue if ng.words[n-1] == '<s>': # non-event continue if", "case you can project the resulting transducer to its input to obtain an", "os.rmdir(tempdir) return rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert a list of", "self.add_class_word(inclass, parts[0], prob) else: if spam.startswith('LMCLASS'): foo, classname = spam.split() self.add_class(classname) inclass =", "c) for word, prob in self.classes[c]: outfile.write(\"%s %g\\n\" % (word, prob)) outfile.write(\"END %s\\n\"", "continue # Not a 1-gram, no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state))", "arc from state q(1,N-1) to q(2,N) with weight P(w(1,N)) # Table holding M-gram", "= epsilon for ug in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The", "t = sum(self.classes[c].itervalues()) if t != 0: for w in self.classes[c]: self.classes[c][w] /=", "= insym.AddSymbol(c) for w, wsym in outsym: if wsym == 0: continue wsym", "== 0: bo_state = 0 # backoff state elif tuple(mg.words[1:]) in sidtab: bo_state", "state elif tuple(mg.words[0:m]) not in sidtab: continue # this means it has an", "str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a text string to an FST. \"\"\" fst =", "= fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0, nxt) fst.SetFinal(nxt, 0) dfst", "to classes, and can either be composed with the input, or pre-composed with", "sent): sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if phi != -1:", "[x for x in sent.split() if not x.startswith('++')] ll = 0 for i", "arcs for all N-grams in the language model, where N=N (the order of", "model, where N=N (the order of the model, that is). \"\"\" for ng", "None for spam in infile: spam = spam.strip() if spam.startswith('#') or spam.startswith(';'): continue", "== -1: #print \"Warning, unknown word\", c continue else: sym = symtab.AddSymbol(c) #print", "a single symbol for end-of-sentence if w == '</s>': w = [w,] for", "# this means it has an OOV else: src = sidtab[tuple(mg.words[0:m])] if mg.words[m]", "def lmfst_eval(lmfst, sent): sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if phi", "0) for w, wsym in outsym: if wsym == 0: continue # Use", "0: src = 0 # 1-grams start in backoff state elif tuple(mg.words[0:m]) not", "Convert a list of words, or a string of whitespace-separated tokens, to a", "dest else: final = False newstate = True dest = fst.AddState() if mg.words[m]", "phi = lmfst.InputSymbols().Find(\"&phi;\") if phi != -1: opts = openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst,", "SetOutputSymbols(self, osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab, lm, m, sidtab,", "prev = start for c in s: nxt = fst.AddState() isym = isyms.AddSymbol(c)", "Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state, weight=0): if", "c in txt: if omitstart and c == '<s>': continue nxt = fst.AddState()", "#print \"Adding backoff arc %d => %d %.4f\" % (dest, bo_state, -mg.log_bowt) def", "in this probability definition. \"\"\" self.classes[name][word] = prob def write(self, outfile): \"\"\" Write", "= None else: prob = 1.0 if len(parts) > 1: prob = float(parts[1])", "= isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start if isinstance(txt, str):", "model. By default this returns the lazy composition of the class definition transducer", "Add an epsilon:word arc to the first state of this word prev =", "-mg.log_bowt) def add_ngram_arcs(fst, symtab, lm, n, sidtab): \"\"\" Add states and arcs for", "a PDF using fstdraw and dot. \"\"\" tempdir = tempfile.mkdtemp() fstfile = os.path.join(tempdir,", "<<EMAIL>>\" __version__ = \"$Revision $\" import sys import os import tempfile import openfst", "osym = self.osyms.AddSymbol(osym) if not isinstance(src, int): src = self.ssyms.AddSymbol(src) if not isinstance(dest,", "!= -1: opts = openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT,", "sys import os import tempfile import openfst import sphinxbase import subprocess class AutoFst(openfst.StdVectorFst):", "an epsilon arc to the final state fst.AddArc(prev, openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym)", "this word prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev)) # Use a", "'</s>': continue # skip >1-grams starting with </s> if m == 0: src", "st) for c in probdef.classes: clabel = symtab.AddSymbol(c) for word, prob in probdef.classes[c].iteritems():", "isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms == None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if", "not in sidtab: continue # this means it has an OOV else: src", "to the first state of this word prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym,", "= openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms == None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms =", "arc to the final state fst.AddArc(prev, openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return", "this probability definition. \"\"\" self.classes[name] = {} def add_class_word(self, name, word, prob): \"\"\"", "\\ and parts[0] == \"END\" and parts[1] == classname: inclass = None else:", "prob = lm.prob(sent[i::-1]) #print sent[i::-1], prob / math.log(10), bo ll += prob return", "wsym == 0: continue # Use a single symbol for end-of-sentence if w", "lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st = o.Start() ll = 0", "=>\", dest else: final = False newstate = True dest = fst.AddState() if", "if not isinstance(src, int): src = self.ssyms.AddSymbol(src) if not isinstance(dest, int): dest =", "def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary lookup FST for a set of", "lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if wsym == -1: continue # skip mgrams ending", "state fst.AddArc(prev, openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst, outfile,", "isinstance(osym, int): osym = self.osyms.AddSymbol(osym) if not isinstance(src, int): src = self.ssyms.AddSymbol(src) if", "self.classes[c][w] /= t def build_classfst(probdef, isyms=None): \"\"\" Build an FST from the classes", "fst.AddArc(prev, isym, 0, 0, nxt) prev = nxt nxt = fst.AddState() osym =", "from an N-gram backoff language model. \"\"\" fst = openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\")", "to obtain an equivalent non-class-based model. \"\"\" if not isinstance(probdef, SphinxProbdef): probdef =", "start if isinstance(txt, str): txt = txt.split() for c in txt: if omitstart", "transducer to its input to obtain an equivalent non-class-based model. \"\"\" if not", "# skip >1-grams starting with </s> if m == 0: src = 0", "= sidtab[tuple(mg.words[1:])] else: continue # Not a 1-gram, no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label,", "Create an arc from state q(1,M) to q(2,M) with weight bowt(w(1,M-1)) # For", "w == '</s>': w = [w,] for c in w: csym = insym.Find(c)", "st != -1 and o.NumArcs(st): a = o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel), \\", "'<s>': # <s> is a non-event if m == 0: # The destination", "openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start if isinstance(txt, str): txt = txt.split() for c", "language model. By default this returns the lazy composition of the class definition", "isyms.AddSymbol(\"&epsilon;\") if osyms == None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms == None:", "= next # And an epsilon arc to the final state fst.AddArc(prev, openfst.StdArc(0,", "self.classes[name] = {} def add_class_word(self, name, word, prob): \"\"\" Add a word to", "freely under the same terms as # Sphinx-III \"\"\" FST utility functions \"\"\"", "not isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef) fst = openfst.StdVectorFst() if isyms: symtab =", "ll def lm_eval(lm, sent): sent = [x for x in sent.split() if not", "isym = self.isyms.AddSymbol(isym) if not isinstance(osym, int): osym = self.osyms.AddSymbol(osym) if not isinstance(src,", "spos == n: raise RuntimeError, \"Unable to find suffix N-gram for\", ng.wids dest", "lmfst = build_lmfst(lm, use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst)", "in w: csym = insym.Find(c) next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next))", "from state q(1,M) to q(2,M) with weight bowt(w(1,M-1)) # For each N-gram w(1,N):", "openfst.ArcSortInput(fst) return fst class SphinxProbdef(object): \"\"\" Probability definition file used for Sphinx class", "symtab.AddSymbol(\"&phi;\") bo_label = phi else: bo_label = epsilon for ug in lm.mgrams(0): wsym", "add_ngram_arcs(fst, symtab, lm, n, sidtab): \"\"\" Add states and arcs for all N-grams", "and arcs for all M-grams in the language model, where M<N. \"\"\" for", "\"\"\" def __init__(self, infile=None): self.classes = {} if infile != None: self.read(infile) def", "not final: # Create a backoff arc to the suffix M-1-gram # Note", "# Create an arc from state q(1,N-1) to q(2,N) with weight P(w(1,N)) #", "N-grams in the language model, where N=N (the order of the model, that", "0, nxt) prev = nxt nxt = fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev, 0,", "this freely under the same terms as # Sphinx-III \"\"\" FST utility functions", "prev = start if isinstance(txt, str): txt = txt.split() for c in txt:", "a file. \"\"\" if not isinstance(outfile, file): outfile = file(outfile) for c in", "\"\": continue if inclass: parts = spam.split() if len(parts) == 2 \\ and", "ng in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if wsym == -1: # OOV continue", "project it to its input. \"\"\" lmfst = build_lmfst(lm, use_phi) classfst = build_classfst(probdef,", "epsilon:word arc to the first state of this word prev = fst.AddState() fst.AddArc(start,", "1): add_mgram_states(fst, symtab, lm, m, sidtab, bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab) #", "fst.AddArc(prev, 0, osym, 0, nxt) fst.SetFinal(nxt, 0) dfst = fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst)", "return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\" Build a character-to-word FST based on the", "and c == '<s>': continue nxt = fst.AddState() if isyms: sym = isyms.Find(c)", "with weight bowt(w(1,M-1)) # For each N-gram w(1,N): # Create an arc from", "osym = osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0, nxt) fst.SetFinal(nxt, 0) dfst = fstclass()", "# Connect and arc-sort the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class SphinxProbdef(object):", "0, 0, nxt) prev = nxt nxt = fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev,", "fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev)) # Use a single symbol for end-of-sentence if", "= {} fst.AddState() # guaranteed to be zero (we hope) for m in", "\"$Revision $\" import sys import os import tempfile import openfst import sphinxbase import", "nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a", "fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a text string to an FST. \"\"\"", "src = 0 # 1-grams start in backoff state elif tuple(mg.words[0:m]) not in", "dest, #mg.words[m], -mg.log_prob) if newstate: # Add a new state to the mapping", "= sum(self.classes[c].itervalues()) if t != 0: for w in self.classes[c]: self.classes[c][w] /= t", "omitstart and c == '<s>': continue nxt = fst.AddState() if isyms: sym =", "== '<s>': # <s> is a non-event if m == 0: # The", "symtab = openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label =", "continue # skip >1-grams starting with </s> if m == 0: src =", "use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\"", "0) dfst = fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import math", "not isinstance(src, int): src = self.ssyms.AddSymbol(src) if not isinstance(dest, int): dest = self.ssyms.AddSymbol(dest)", "from the classes in a Sphinx probability definition file. This transducer maps words", "of the class definition transducer and the language model. To obtain the full", "def add_class_word(self, name, word, prob): \"\"\" Add a word to a class in", "in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return", "wsym == 0: continue wsym = outsym.Find(w) # Add an epsilon:word arc to", "FST class which automatically adds states, input and output symbols as required. This", "'<s>': continue nxt = fst.AddState() if isyms: sym = isyms.Find(c) if sym ==", "| dot -Tpdf > '%s'\" % (acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv", "= fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0, nxt) prev = nxt", "c in self.classes: t = sum(self.classes[c].itervalues()) if t != 0: for w in", "as required. This is meant to behave somewhat like the Dot language. \"\"\"", "\"\"\" fst = openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi: phi", "if acceptor: acceptor = \"--acceptor\" else: acceptor = \"\" rv = os.system(\"fstdraw %s", "ll += prob return ll if __name__ == '__main__': lmf, fstf = sys.argv[1:]", "be composed with the input, or pre-composed with the language model. In the", "of this word prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev)) # Use", "-Tpdf > '%s'\" % (acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv def sent2fst(txt,", "lmfst) def build_dictfst(lmfst): \"\"\" Build a character-to-word FST based on the symbol table", "c in txt: nxt = fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0,", "state elif tuple(mg.words[1:]) in sidtab: bo_state = sidtab[tuple(mg.words[1:])] else: continue # Not a", "def fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw an FST as a PDF using fstdraw", "symtab.Find(mg.words[m]) if wsym == -1: continue # skip mgrams ending in OOV if", "q(1,M) # Create an arc from state q(1,M-1) to q(1,M) with weight P(w(1,M))", "string of whitespace-separated tokens, to a sentence FST. \"\"\" fst = fstclass() start", "('</s>',) in sidtab: dest = sidtab[('</s>',)] else: dest = fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)]", "state to the mapping if needed sidtab[tuple(mg.words)] = dest #print \"Entered state ID", "fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next)) prev = next # And an epsilon arc", "N-1: # For each M-gram w(1,M): # Create a state q(1,M) # Create", "bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab, lm, n, sidtab): \"\"\" Add states and arcs", "-a.weight.Value() / math.log(10) ll -= a.weight.Value() st = a.nextstate return ll def lm_eval(lm,", "= spam.split() if len(parts) == 2 \\ and parts[0] == \"END\" and parts[1]", "= [x for x in sent.split() if not x.startswith('++')] ll = 0 for", "as a PDF using fstdraw and dot. \"\"\" tempdir = tempfile.mkdtemp() fstfile =", "terms as # Sphinx-III \"\"\" FST utility functions \"\"\" __author__ = \"<NAME> <<EMAIL>>\"", "fst.SetOutputSymbols(symtab) return fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a text string to an", "symbol for end-of-sentence if w == '</s>': w = [w,] for c in", "nxt = fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0, nxt) prev =", "*args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state, weight=0): if not", "acceptor = \"\" rv = os.system(\"fstdraw %s '%s' | dot -Tpdf > '%s'\"", "sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst", "= 1 while tuple(ng.words[spos:]) not in sidtab: spos += 1 if spos ==", "use_phi=False): \"\"\" Build an FST from a class-based language model. By default this", "= self.ssyms.AddSymbol(dest) while src >= self.NumStates(): self.AddState() while dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self,", "lm_eval(lm, sent): sent = [x for x in sent.split() if not x.startswith('++')] ll", "dest): if not isinstance(isym, int): isym = self.isyms.AddSymbol(isym) if not isinstance(osym, int): osym", "FST from the classes in a Sphinx probability definition file. This transducer maps", "m, sidtab, bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab) # Connect and arc-sort the", "obtain the full language model, create a VectorFst from it and project it", "FST recognizer from an N-gram backoff language model. \"\"\" fst = openfst.StdVectorFst() symtab", "model. To obtain the full language model, create a VectorFst from it and", "dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\" Build an FST recognizer from an", "<reponame>Ohara124c41/TUB-MSc_Thesis<filename>speech_recognition/cmusphinx-code/sphinxtrain/python/cmusphinx/fstutils.py #!/usr/bin/env python # Copyright (c) 2010 Carnegie Mellon University # # You", "fst.AddArc(st, wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm, probdef, use_phi=False):", "dfst = fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import math def", "Build an FST recognizer from an N-gram backoff language model. \"\"\" fst =", "FST based on the symbol table of lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\")", "ng.words[n-1] == '<s>': # non-event continue if '</s>' in ng.words[0:n-1]: continue for w", "infile): \"\"\" Read probability definition from a file. \"\"\" if not isinstance(infile, file):", "with </s> if m == 0: src = 0 # 1-grams start in", "!= -1 and o.NumArcs(st): a = o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel), \\ #", "This transducer maps words to classes, and can either be composed with the", "os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor: acceptor = \"--acceptor\" else: acceptor = \"\" rv", "lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst = openfst.StdVectorFst() start", "== 0: # The destination state will be the initial state fst.SetStart(dest) #print", "of the model, that is). \"\"\" for ng in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1])", "else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for word,", "symtab.AddSymbol(c) #print prev, sym, nxt fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt", "skip mgrams ending in OOV if m > 0 and mg.words[0] == '</s>':", "osyms.AddSymbol(\"&epsilon;\") if ssyms == None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms)", "you can project the resulting transducer to its input to obtain an equivalent", "openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms == None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms ==", "ll if __name__ == '__main__': lmf, fstf = sys.argv[1:] lm = sphinxbase.NGramModel(lmf) fst", "self.classes[name][word] = prob def write(self, outfile): \"\"\" Write out probability definition to a", "if sym == -1: #print \"Warning, unknown word\", c continue else: sym =", "osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start) for s in strs:", "continue else: sym = symtab.AddSymbol(c) #print prev, sym, nxt fst.AddArc(prev, sym, sym, 0,", "else: continue # Not a 1-gram, no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt,", "tuple(ng.words[spos:]) not in sidtab: spos += 1 if spos == n: raise RuntimeError,", "== 0 it's particularly important to do this! if m == 0: bo_state", "\"\"\" Build a dictionary lookup FST for a set of strings. \"\"\" fst", "= isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0, nxt) prev = nxt nxt = fst.AddState()", "symtab, lm, m, sidtab, bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab) # Connect and", "taht if mg.log_bowt == 0 it's particularly important to do this! if m", "prev = start for c in txt: nxt = fst.AddState() sym = symtab.AddSymbol(c)", "self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0): \"\"\"", "transducer maps words to classes, and can either be composed with the input,", "spam in infile: spam = spam.strip() if spam.startswith('#') or spam.startswith(';'): continue if spam", "definition transducer and the language model. To obtain the full language model, create", "in 1 to N-1: # For each M-gram w(1,M): # Create a state", "needed sidtab[tuple(mg.words)] = dest #print \"Entered state ID mapping\", tuple(mg.words), \"=>\", dest if", "or spam.startswith(';'): continue if spam == \"\": continue if inclass: parts = spam.split()", "next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next)) prev = next # And", "somewhat like the Dot language. \"\"\" def __init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if", "st = a.nextstate return ll def lm_eval(lm, sent): sent = [x for x", "states and arcs for all M-grams in the language model, where M<N. \"\"\"", "Note taht if mg.log_bowt == 0 it's particularly important to do this! if", "osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0):", "fstclass=openfst.StdVectorFst): \"\"\" Convert a text string to an FST. \"\"\" fst = fstclass()", "\"not found\" continue src = sidtab[tuple(ng.words[:n-1])] # Find longest suffix N-gram that exists", "#print \"Entered state ID mapping\", tuple(mg.words), \"=>\", dest if not final: # Create", "def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state, weight=0):", "from it and project it to its input. \"\"\" lmfst = build_lmfst(lm, use_phi)", "University # # You may copy and modify this freely under the same", "if label == openfst.epsilon: continue fst.AddArc(st, label, label, 0, st) for c in", "openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst, lmfst, opts) else:", "def normalize(self): \"\"\" Normalize probabilities. \"\"\" for c in self.classes: t = sum(self.classes[c].itervalues())", "osyms == None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms == None: ssyms =", "(c) 2010 Carnegie Mellon University # # You may copy and modify this", "the model, that is). \"\"\" for ng in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if", "# Copyright (c) 2010 Carnegie Mellon University # # You may copy and", "to the final state fst.AddArc(prev, openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst", "= dest #print \"Final state\", dest #print \"Entered state ID mapping (</s>,) =>\",", "0 # 1-grams start in backoff state elif tuple(mg.words[0:m]) not in sidtab: continue", "outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize probabilities. \"\"\" for c in self.classes: t =", "#print \"Added %d-gram arc %d => %d %s/%.4f\" % (m+1, src, dest, #mg.words[m],", "if m == 0: bo_state = 0 # backoff state elif tuple(mg.words[1:]) in", "-ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\" Build an FST recognizer from an N-gram backoff", "class AutoFst(openfst.StdVectorFst): \"\"\" FST class which automatically adds states, input and output symbols", "if wsym == 0: continue # Use a single symbol for end-of-sentence if", "to the suffix M-1-gram # Note taht if mg.log_bowt == 0 it's particularly", "# Table holding M-gram to state mappings sidtab = {} fst.AddState() # guaranteed", "in the language model, where N=N (the order of the model, that is).", "else: final = False newstate = True dest = fst.AddState() if mg.words[m] ==", "symtab.AddSymbol(\"&epsilon;\") prev = start if isinstance(txt, str): txt = txt.split() for c in", "return dfst import math def lmfst_eval(lmfst, sent): sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi", "sphinxbase import subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST class which automatically adds states, input", "RuntimeError, \"Unable to find suffix N-gram for\", ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym,", "# Create an arc from state q(1,M-1) to q(1,M) with weight P(w(1,M)) #", "-1: #print w, \"not found\" continue src = sidtab[tuple(ng.words[:n-1])] # Find longest suffix", "= symtab.AddSymbol(c) #print prev, sym, nxt fst.AddArc(prev, sym, sym, 0, nxt) prev =", "\"\"\" for c in self.classes: t = sum(self.classes[c].itervalues()) if t != 0: for", "raise RuntimeError, \"Unable to find suffix N-gram for\", ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src,", "wsym = symtab.Find(ng.words[n-1]) if wsym == -1: # OOV continue if ng.words[n-1] ==", "0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary lookup", "model, create a VectorFst from it and project it to its input. \"\"\"", "1: prob = float(parts[1]) self.add_class_word(inclass, parts[0], prob) else: if spam.startswith('LMCLASS'): foo, classname =", "math.log(10) ll -= a.weight.Value() st = a.nextstate return ll def lm_eval(lm, sent): sent", "the Dot language. \"\"\" def __init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms ==", "definition to a file. \"\"\" if not isinstance(outfile, file): outfile = file(outfile) for", "definition file. This transducer maps words to classes, and can either be composed", "and the language model. To obtain the full language model, create a VectorFst", "dictionary lookup FST for a set of strings. \"\"\" fst = fstclass() isyms", "= fst.AddState() fst.SetStart(start) final = fst.AddState() fst.SetFinal(final, 0) for w, wsym in outsym:", "Find longest suffix N-gram that exists spos = 1 while tuple(ng.words[spos:]) not in", "input, or pre-composed with the language model. In the latter case you can", "else: if spam.startswith('LMCLASS'): foo, classname = spam.split() self.add_class(classname) inclass = classname def add_class(self,", "OOV if m > 0 and mg.words[0] == '</s>': continue # skip >1-grams", "[w,] for c in w: csym = insym.AddSymbol(c) for w, wsym in outsym:", "parts[0], prob) else: if spam.startswith('LMCLASS'): foo, classname = spam.split() self.add_class(classname) inclass = classname", "%d %.4f\" % (dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab, lm, n, sidtab): \"\"\"", "fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next)) prev = next # And an epsilon", "in lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if wsym == -1: continue # skip mgrams", "bo_label = epsilon for ug in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) #", "= openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c =", "lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm goes like this: #", "to find suffix N-gram for\", ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob,", "Normalize probabilities. \"\"\" for c in self.classes: t = sum(self.classes[c].itervalues()) if t !=", "under the same terms as # Sphinx-III \"\"\" FST utility functions \"\"\" __author__", "c in w: csym = insym.AddSymbol(c) for w, wsym in outsym: if wsym", "particularly important to do this! if m == 0: bo_state = 0 #", "c continue else: sym = symtab.AddSymbol(c) #print prev, sym, nxt fst.AddArc(prev, sym, sym,", "order of the model, that is). \"\"\" for ng in lm.mgrams(n-1): wsym =", "it's particularly important to do this! if m == 0: bo_state = 0", "to behave somewhat like the Dot language. \"\"\" def __init__(self, isyms=None, osyms=None, ssyms=None):", "For M in 1 to N-1: # For each M-gram w(1,M): # Create", "outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize probabilities. \"\"\" for c", "By default this returns the lazy composition of the class definition transducer and", "fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert", "pre-composed with the language model. In the latter case you can project the", "mapping (</s>,) =>\", dest else: final = False newstate = True dest =", "%s/%.4f\" % (m+1, src, dest, #mg.words[m], -mg.log_prob) if newstate: # Add a new", "probability definition. \"\"\" self.classes[name] = {} def add_class_word(self, name, word, prob): \"\"\" Add", "%d-gram arc %d => %d %s/%.4f\" % (m+1, src, dest, #mg.words[m], -mg.log_prob) if", "if m == 0: src = 0 # 1-grams start in backoff state", "None else: prob = 1.0 if len(parts) > 1: prob = float(parts[1]) self.add_class_word(inclass,", "behave somewhat like the Dot language. \"\"\" def __init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self)", "for w, wsym in outsym: if wsym == 0: continue # Use a", "= True dest = fst.AddState() if mg.words[m] == '<s>': # <s> is a", "% c) for word, prob in self.classes[c]: outfile.write(\"%s %g\\n\" % (word, prob)) outfile.write(\"END", "FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) if isyms: symtab =", "if infile != None: self.read(infile) def read(self, infile): \"\"\" Read probability definition from", "self.osyms.AddSymbol(osym) if not isinstance(src, int): src = self.ssyms.AddSymbol(src) if not isinstance(dest, int): dest", "0 for i in xrange(len(sent)): if sent[i] == '<s>': continue prob = lm.prob(sent[i::-1])", "fst.AddState() # guaranteed to be zero (we hope) for m in range(lm.get_size() -", "outsym: if wsym == 0: continue # Use a single symbol for end-of-sentence", "= \"<NAME> <<EMAIL>>\" __version__ = \"$Revision $\" import sys import os import tempfile", "sidtab[tuple(mg.words[1:])] else: continue # Not a 1-gram, no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label,", "a list of words, or a string of whitespace-separated tokens, to a sentence", "openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst = openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start) final", "FST utility functions \"\"\" __author__ = \"<NAME> <<EMAIL>>\" __version__ = \"$Revision $\" import", "it to its input. \"\"\" lmfst = build_lmfst(lm, use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols())", "start = fst.AddState() fst.SetStart(start) final = fst.AddState() fst.SetFinal(final, 0) for w, wsym in", "\"Adding %d-gram arc %d => %d %s/%.4f\" % (n, src, dest, ng.words[n-1], -ng.log_prob)", "== \"\": continue if inclass: parts = spam.split() if len(parts) == 2 \\", "w, wsym in outsym: if wsym == 0: continue # Use a single", "0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def strset2fst(strs,", "of lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst = openfst.StdVectorFst()", "outsym.Find(w) # Add an epsilon:word arc to the first state of this word", "probabilities. \"\"\" for c in self.classes: t = sum(self.classes[c].itervalues()) if t != 0:", "m, sidtab, bo_label=0): \"\"\" Add states and arcs for all M-grams in the", "You may copy and modify this freely under the same terms as #", "on the symbol table of lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym =", "ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print \"Adding %d-gram arc", "Add a class to this probability definition. \"\"\" self.classes[name] = {} def add_class_word(self,", "self.classes: outfile.write(\"LMCLASS %s\\n\" % c) for word, prob in self.classes[c]: outfile.write(\"%s %g\\n\" %", "and mg.words[0] == '</s>': continue # skip >1-grams starting with </s> if m", "self.NumStates(): self.AddState() while dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest)", "c = openfst.StdComposeFst(sentfst, lmfst, opts) else: c = openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst()", "-mg.log_prob, dest)) #print \"Added %d-gram arc %d => %d %s/%.4f\" % (m+1, src,", "prob = float(parts[1]) self.add_class_word(inclass, parts[0], prob) else: if spam.startswith('LMCLASS'): foo, classname = spam.split()", "= SphinxProbdef(probdef) fst = openfst.StdVectorFst() if isyms: symtab = isyms else: symtab =", "fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary", "\"\"\" FST utility functions \"\"\" __author__ = \"<NAME> <<EMAIL>>\" __version__ = \"$Revision $\"", "= False if ('</s>',) in sidtab: dest = sidtab[('</s>',)] else: dest = fst.AddState()", "using fstdraw and dot. \"\"\" tempdir = tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile)", "ug in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm goes like", "bo_label=0): \"\"\" Add states and arcs for all M-grams in the language model,", "to its input. \"\"\" lmfst = build_lmfst(lm, use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst)", "tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor: acceptor = \"--acceptor\" else: acceptor", "resulting transducer to its input to obtain an equivalent non-class-based model. \"\"\" if", "an FST recognizer from an N-gram backoff language model. \"\"\" fst = openfst.StdVectorFst()", "for spam in infile: spam = spam.strip() if spam.startswith('#') or spam.startswith(';'): continue if", "final state is allowed final = True newstate = False if ('</s>',) in", "acceptor = \"--acceptor\" else: acceptor = \"\" rv = os.system(\"fstdraw %s '%s' |", "\"\"\" fst = fstclass() isyms = openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start", "= float(parts[1]) self.add_class_word(inclass, parts[0], prob) else: if spam.startswith('LMCLASS'): foo, classname = spam.split() self.add_class(classname)", "for all N-grams in the language model, where N=N (the order of the", "\"\"\" Write out probability definition to a file. \"\"\" if not isinstance(outfile, file):", "= outsym.Find(w) # Add an epsilon:word arc to the first state of this", "adds states, input and output symbols as required. This is meant to behave", "input. \"\"\" lmfst = build_lmfst(lm, use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return", "utility functions \"\"\" __author__ = \"<NAME> <<EMAIL>>\" __version__ = \"$Revision $\" import sys", "%.4f\" % (dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab, lm, n, sidtab): \"\"\" Add", "openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label = phi else:", "openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label", "state q(1,M) to q(2,M) with weight bowt(w(1,M-1)) # For each N-gram w(1,N): #", "False if ('</s>',) in sidtab: dest = sidtab[('</s>',)] else: dest = fst.AddState() fst.SetFinal(dest,", "the language model, where N=N (the order of the model, that is). \"\"\"", "\"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev", "allowed final = True newstate = False if ('</s>',) in sidtab: dest =", "in a Sphinx probability definition file. This transducer maps words to classes, and", "else: src = sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>': # only one final state", "while src >= self.NumStates(): self.AddState() while dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym,", "create a VectorFst from it and project it to its input. \"\"\" lmfst", "recognizer from an N-gram backoff language model. \"\"\" fst = openfst.StdVectorFst() symtab =", "else: sym = symtab.AddSymbol(c) #print prev, sym, nxt fst.AddArc(prev, sym, sym, 0, nxt)", "# The destination state will be the initial state fst.SetStart(dest) #print \"Initial state\",", "if newstate: # Add a new state to the mapping if needed sidtab[tuple(mg.words)]", "nxt nxt = fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0, nxt) fst.SetFinal(nxt,", "self.ssyms.AddSymbol(dest) while src >= self.NumStates(): self.AddState() while dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src,", "phi = symtab.AddSymbol(\"&phi;\") bo_label = phi else: bo_label = epsilon for ug in", "Carnegie Mellon University # # You may copy and modify this freely under", "#mg.words[m], -mg.log_prob) if newstate: # Add a new state to the mapping if", "in xrange(len(sent)): if sent[i] == '<s>': continue prob = lm.prob(sent[i::-1]) #print sent[i::-1], prob", "outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert a", "weight) def SetInputSymbols(self, isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms): self.osyms", "sidtab[tuple(ng.words[:n-1])] # Find longest suffix N-gram that exists spos = 1 while tuple(ng.words[spos:])", "FST as a PDF using fstdraw and dot. \"\"\" tempdir = tempfile.mkdtemp() fstfile", "# And an epsilon arc to the final state fst.AddArc(prev, openfst.StdArc(0, 0, 0,", "foo, classname = spam.split() self.add_class(classname) inclass = classname def add_class(self, name): \"\"\" Add", "wsym in outsym: if wsym == 0: continue wsym = outsym.Find(w) # Add", "a word to a class in this probability definition. \"\"\" self.classes[name][word] = prob", "dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest) def Write(self, *args):", "start for c in txt: nxt = fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev, sym,", "else: dest = fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest #print \"Final state\", dest", "not isinstance(isym, int): isym = self.isyms.AddSymbol(isym) if not isinstance(osym, int): osym = self.osyms.AddSymbol(osym)", "M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print \"Adding backoff arc %d => %d", "can either be composed with the input, or pre-composed with the language model.", "a non-event if m == 0: # The destination state will be the", "o = openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st = o.Start() ll = 0 while", "\"\"\" Add a class to this probability definition. \"\"\" self.classes[name] = {} def", "while tuple(ng.words[spos:]) not in sidtab: spos += 1 if spos == n: raise", "SetFinal(self, state, weight=0): if not isinstance(state, int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight)", "The algorithm goes like this: # # Create a backoff state # For", "sent[i::-1], prob / math.log(10), bo ll += prob return ll if __name__ ==", "in probdef.classes: clabel = symtab.AddSymbol(c) for word, prob in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word)", "= file(infile) inclass = None for spam in infile: spam = spam.strip() if", "def write(self, outfile): \"\"\" Write out probability definition to a file. \"\"\" if", "st = fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for word, label in symtab: if label", "and project it to its input. \"\"\" lmfst = build_lmfst(lm, use_phi) classfst =", "an arc from state q(1,M) to q(2,M) with weight bowt(w(1,M-1)) # For each", "nxt) fst.SetFinal(nxt, 0) dfst = fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst", "= o.Start() ll = 0 while st != -1 and o.NumArcs(st): a =", "= self.ssyms.AddSymbol(src) if not isinstance(dest, int): dest = self.ssyms.AddSymbol(dest) while src >= self.NumStates():", "important to do this! if m == 0: bo_state = 0 # backoff", "openfst.StdComposeFst(sentfst, lmfst, opts) else: c = openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c, o,", "language model, create a VectorFst from it and project it to its input.", "for x in sent.split() if not x.startswith('++')] ll = 0 for i in", "os import tempfile import openfst import sphinxbase import subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST", "sym == -1: #print \"Warning, unknown word\", c continue else: sym = symtab.AddSymbol(c)", "wsym == -1: continue # skip mgrams ending in OOV if m >", "= symtab.AddSymbol(\"&epsilon;\") if use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label = phi else: bo_label =", "OOV else: src = sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>': # only one final", "\"Final state\", dest #print \"Entered state ID mapping (</s>,) =>\", dest else: final", "== '<s>': continue prob = lm.prob(sent[i::-1]) #print sent[i::-1], prob / math.log(10), bo ll", "(n, src, dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\" Build an FST recognizer", "if wsym == 0: continue wsym = outsym.Find(w) # Add an epsilon:word arc", "fst.AddState() fst.SetStart(start) if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev", "src = self.ssyms.AddSymbol(src) if not isinstance(dest, int): dest = self.ssyms.AddSymbol(dest) while src >=", "in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if wsym == -1: # OOV continue if", "osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0): \"\"\" Add states", "do this! if m == 0: bo_state = 0 # backoff state elif", "dest)) #print \"Adding %d-gram arc %d => %d %s/%.4f\" % (n, src, dest,", "an FST from a class-based language model. By default this returns the lazy", "else: bo_label = epsilon for ug in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab)", "ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src,", "epsilon arc to the final state fst.AddArc(prev, openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym)", "fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw an FST as a PDF using fstdraw and", "# Find longest suffix N-gram that exists spos = 1 while tuple(ng.words[spos:]) not", "final = False newstate = True dest = fst.AddState() if mg.words[m] == '<s>':", "src >= self.NumStates(): self.AddState() while dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym,", "zero (we hope) for m in range(lm.get_size() - 1): add_mgram_states(fst, symtab, lm, m,", "c in s: nxt = fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0,", "name): \"\"\" Add a class to this probability definition. \"\"\" self.classes[name] = {}", "ID mapping\", tuple(mg.words), \"=>\", dest if not final: # Create a backoff arc", "fst.SetFinal(final, 0) for w, wsym in outsym: if wsym == 0: continue #", "To obtain the full language model, create a VectorFst from it and project", "and parts[0] == \"END\" and parts[1] == classname: inclass = None else: prob", "self.classes: t = sum(self.classes[c].itervalues()) if t != 0: for w in self.classes[c]: self.classes[c][w]", "# Create a state q(1,M) # Create an arc from state q(1,M-1) to", "for m in range(lm.get_size() - 1): add_mgram_states(fst, symtab, lm, m, sidtab, bo_label) add_ngram_arcs(fst,", "SphinxProbdef(probdef) fst = openfst.StdVectorFst() if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\")", "wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm, probdef, use_phi=False): \"\"\"", "q(1,M) with weight P(w(1,M)) # Create an arc from state q(1,M) to q(2,M)", "0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw", "tuple(mg.words), \"=>\", dest if not final: # Create a backoff arc to the", "self.osyms) def add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0): \"\"\" Add states and arcs", "str): txt = txt.split() for c in txt: if omitstart and c ==", "wsym, 0, prev)) # Use a single symbol for end-of-sentence if w ==", "= phi else: bo_label = epsilon for ug in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0])", "composition of the class definition transducer and the language model. To obtain the", "if __name__ == '__main__': lmf, fstf = sys.argv[1:] lm = sphinxbase.NGramModel(lmf) fst =", "bo_label = phi else: bo_label = epsilon for ug in lm.mgrams(0): wsym =", "openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst, lmfst, opts) else: c = openfst.StdComposeFst(sentfst, lmfst)", "continue if inclass: parts = spam.split() if len(parts) == 2 \\ and parts[0]", "%d => %d %s/%.4f\" % (n, src, dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False):", "2010 Carnegie Mellon University # # You may copy and modify this freely", "for c in w: csym = insym.AddSymbol(c) for w, wsym in outsym: if", "= fstclass() isyms = openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState()", "its input. \"\"\" lmfst = build_lmfst(lm, use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst)", "def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a text string to an FST. \"\"\" fst", "build_dictfst(lmfst): \"\"\" Build a character-to-word FST based on the symbol table of lmfst.", "for s in strs: prev = start for c in s: nxt =", "parts = spam.split() if len(parts) == 2 \\ and parts[0] == \"END\" and", "to the mapping if needed sidtab[tuple(mg.words)] = dest #print \"Entered state ID mapping\",", "the symbol table of lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols()", "math def lmfst_eval(lmfst, sent): sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if", "% (word, prob)) outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize probabilities.", "skip OOVs if symtab.Find(w) == -1: #print w, \"not found\" continue src =", "self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src, isym, osym, weight, dest): if not isinstance(isym, int):", "\"\"\" if not isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef) fst = openfst.StdVectorFst() if isyms:", "w = [w,] for c in w: csym = insym.AddSymbol(c) for w, wsym", "#print \"Entered state ID mapping (</s>,) =>\", dest else: final = False newstate", "default this returns the lazy composition of the class definition transducer and the", "== '</s>': continue # skip >1-grams starting with </s> if m == 0:", "-= a.weight.Value() st = a.nextstate return ll def lm_eval(lm, sent): sent = [x", "to be zero (we hope) for m in range(lm.get_size() - 1): add_mgram_states(fst, symtab,", "openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start) final = fst.AddState() fst.SetFinal(final, 0) for w, wsym", "# Add an epsilon:word arc to the first state of this word prev", "isyms = openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start) for", "txt: if omitstart and c == '<s>': continue nxt = fst.AddState() if isyms:", "fstclass() start = fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start for", "self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self, isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def", "sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print \"Adding %d-gram arc %d => %d", "src = sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>': # only one final state is", "isinstance(src, int): src = self.ssyms.AddSymbol(src) if not isinstance(dest, int): dest = self.ssyms.AddSymbol(dest) while", "isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst,", "language model. \"\"\" fst = openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if", "all M-grams in the language model, where M<N. \"\"\" for mg in lm.mgrams(m):", "model, where M<N. \"\"\" for mg in lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if wsym", "o, 1) st = o.Start() ll = 0 while st != -1 and", "And an epsilon arc to the final state fst.AddArc(prev, openfst.StdArc(0, 0, 0, final))", "modify this freely under the same terms as # Sphinx-III \"\"\" FST utility", "lm, m, sidtab, bo_label=0): \"\"\" Add states and arcs for all M-grams in", "!= 0: for w in self.classes[c]: self.classes[c][w] /= t def build_classfst(probdef, isyms=None): \"\"\"", "acceptor=False): \"\"\" Draw an FST as a PDF using fstdraw and dot. \"\"\"", "ng.words[:n-1]: # skip OOVs if symtab.Find(w) == -1: #print w, \"not found\" continue", "isinstance(isym, int): isym = self.isyms.AddSymbol(isym) if not isinstance(osym, int): osym = self.osyms.AddSymbol(osym) if", "For each M-gram w(1,M): # Create a state q(1,M) # Create an arc", "for c in w: csym = insym.Find(c) next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0,", "sidtab[tuple(mg.words)] = dest #print \"Entered state ID mapping\", tuple(mg.words), \"=>\", dest if not", "= start for c in s: nxt = fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev,", "AutoFst(openfst.StdVectorFst): \"\"\" FST class which automatically adds states, input and output symbols as", "sidtab, bo_label=0): \"\"\" Add states and arcs for all M-grams in the language", "openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start) for s in strs: prev =", "for word, label in symtab: if label == openfst.epsilon: continue fst.AddArc(st, label, label,", "sidtab[('</s>',)] else: dest = fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest #print \"Final state\",", "openfst.ShortestPath(c, o, 1) st = o.Start() ll = 0 while st != -1", "\"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst = openfst.StdVectorFst() start =", "final: # Create a backoff arc to the suffix M-1-gram # Note taht", "state q(1,M-1) to q(1,M) with weight P(w(1,M)) # Create an arc from state", "lazy composition of the class definition transducer and the language model. To obtain", "self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms)", "add_class_word(self, name, word, prob): \"\"\" Add a word to a class in this", "in txt: if omitstart and c == '<s>': continue nxt = fst.AddState() if", "state # For M in 1 to N-1: # For each M-gram w(1,M):", "\\ # -a.weight.Value() / math.log(10) ll -= a.weight.Value() st = a.nextstate return ll", "in sidtab: bo_state = sidtab[tuple(mg.words[1:])] else: continue # Not a 1-gram, no suffix", "# Create an arc from state q(1,M) to q(2,M) with weight bowt(w(1,M-1)) #", "the class definition transducer and the language model. To obtain the full language", "= dest #print \"Entered state ID mapping\", tuple(mg.words), \"=>\", dest if not final:", "openfst import sphinxbase import subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST class which automatically adds", "Sphinx-III \"\"\" FST utility functions \"\"\" __author__ = \"<NAME> <<EMAIL>>\" __version__ = \"$Revision", "lm, lm.get_size(), sidtab) # Connect and arc-sort the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return", "continue nxt = fst.AddState() if isyms: sym = isyms.Find(c) if sym == -1:", "def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert a list of words, or a", "isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef) fst = openfst.StdVectorFst() if isyms: symtab = isyms", "self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src, isym, osym, weight, dest):", "if mg.log_bowt == 0 it's particularly important to do this! if m ==", "% c) outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize probabilities. \"\"\" for c in self.classes:", "for ug in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm goes", "it and project it to its input. \"\"\" lmfst = build_lmfst(lm, use_phi) classfst", "m == 0: bo_state = 0 # backoff state elif tuple(mg.words[1:]) in sidtab:", "osym, weight, dest): if not isinstance(isym, int): isym = self.isyms.AddSymbol(isym) if not isinstance(osym,", "whitespace-separated tokens, to a sentence FST. \"\"\" fst = fstclass() start = fst.AddState()", "sidtab): \"\"\" Add states and arcs for all N-grams in the language model,", "o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() / math.log(10) ll -= a.weight.Value() st", "\"Warning, unknown word\", c continue else: sym = symtab.AddSymbol(c) #print prev, sym, nxt", "Draw an FST as a PDF using fstdraw and dot. \"\"\" tempdir =", "<s> is a non-event if m == 0: # The destination state will", "to an FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) symtab =", "prob)) outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize probabilities. \"\"\" for", "M-gram to state mappings sidtab = {} fst.AddState() # guaranteed to be zero", "openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start) for s in", "a character-to-word FST based on the symbol table of lmfst. \"\"\" insym =", "character-to-word FST based on the symbol table of lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\")", "that is). \"\"\" for ng in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if wsym ==", "arc from state q(1,M-1) to q(1,M) with weight P(w(1,M)) # Create an arc", "a backoff arc to the suffix M-1-gram # Note taht if mg.log_bowt ==", "= fst.AddState() fst.SetStart(start) for s in strs: prev = start for c in", "in self.classes: t = sum(self.classes[c].itervalues()) if t != 0: for w in self.classes[c]:", "weight, dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self,", "an N-gram backoff language model. \"\"\" fst = openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon", "to a sentence FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) if", "'%s' | dot -Tpdf > '%s'\" % (acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return", "\"\"\" Normalize probabilities. \"\"\" for c in self.classes: t = sum(self.classes[c].itervalues()) if t", "openfst.StdArc(0, wsym, 0, prev)) # Use a single symbol for end-of-sentence if w", "= nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build", "can project the resulting transducer to its input to obtain an equivalent non-class-based", "0, nxt) fst.SetFinal(nxt, 0) dfst = fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return", "symtab.AddSymbol(\"&epsilon;\") prev = start for c in txt: nxt = fst.AddState() sym =", "python # Copyright (c) 2010 Carnegie Mellon University # # You may copy", "N=N (the order of the model, that is). \"\"\" for ng in lm.mgrams(n-1):", "opts) else: c = openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st", "fst.AddState() fst.SetStart(start) for s in strs: prev = start for c in s:", "opts = openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c", "lmfst, opts) else: c = openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c, o, 1)", "= start if isinstance(txt, str): txt = txt.split() for c in txt: if", "else: acceptor = \"\" rv = os.system(\"fstdraw %s '%s' | dot -Tpdf >", "this! if m == 0: bo_state = 0 # backoff state elif tuple(mg.words[1:])", "prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\"", "%d %s/%.4f\" % (m+1, src, dest, #mg.words[m], -mg.log_prob) if newstate: # Add a", "dfst.SetOutputSymbols(osyms) return dfst import math def lmfst_eval(lmfst, sent): sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols())", "# Add a new state to the mapping if needed sidtab[tuple(mg.words)] = dest", "fst.SetFinal(st, 0) for word, label in symtab: if label == openfst.epsilon: continue fst.AddArc(st,", "if wsym == -1: continue # skip mgrams ending in OOV if m", "'</s>' in ng.words[0:n-1]: continue for w in ng.words[:n-1]: # skip OOVs if symtab.Find(w)", "openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms == None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms", "and parts[1] == classname: inclass = None else: prob = 1.0 if len(parts)", "t != 0: for w in self.classes[c]: self.classes[c][w] /= t def build_classfst(probdef, isyms=None):", "float(parts[1]) self.add_class_word(inclass, parts[0], prob) else: if spam.startswith('LMCLASS'): foo, classname = spam.split() self.add_class(classname) inclass", "None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self,", "state is allowed final = True newstate = False if ('</s>',) in sidtab:", "=> %d %s/%.4f\" % (m+1, src, dest, #mg.words[m], -mg.log_prob) if newstate: # Add", "Table holding M-gram to state mappings sidtab = {} fst.AddState() # guaranteed to", "= classname def add_class(self, name): \"\"\" Add a class to this probability definition.", "outfile, acceptor=False): \"\"\" Draw an FST as a PDF using fstdraw and dot.", "if omitstart and c == '<s>': continue nxt = fst.AddState() if isyms: sym", "prob in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab)", "% (m+1, src, dest, #mg.words[m], -mg.log_prob) if newstate: # Add a new state", "state, weight) def SetInputSymbols(self, isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms):", "== n: raise RuntimeError, \"Unable to find suffix N-gram for\", ng.wids dest =", "an arc from state q(1,N-1) to q(2,N) with weight P(w(1,N)) # Table holding", "Create a state q(1,M) # Create an arc from state q(1,M-1) to q(1,M)", "weight P(w(1,N)) # Table holding M-gram to state mappings sidtab = {} fst.AddState()", "bowt(w(1,M-1)) # For each N-gram w(1,N): # Create an arc from state q(1,N-1)", "\"\"\" FST class which automatically adds states, input and output symbols as required.", "t def build_classfst(probdef, isyms=None): \"\"\" Build an FST from the classes in a", "o.NumArcs(st): a = o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\ #", "openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state, weight=0): if not isinstance(state, int): state = self.ssyms.AddSymbol(state)", "add_mgram_states(fst, symtab, lm, m, sidtab, bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab) # Connect", "x.startswith('++')] ll = 0 for i in xrange(len(sent)): if sent[i] == '<s>': continue", "> '%s'\" % (acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv def sent2fst(txt, fstclass=openfst.StdVectorFst,", "non-event continue if '</s>' in ng.words[0:n-1]: continue for w in ng.words[:n-1]: # skip", "symtab: if label == openfst.epsilon: continue fst.AddArc(st, label, label, 0, st) for c", "-math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build an", "self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self,", "bo ll += prob return ll if __name__ == '__main__': lmf, fstf =", "maps words to classes, and can either be composed with the input, or", "language model. In the latter case you can project the resulting transducer to", "continue src = sidtab[tuple(ng.words[:n-1])] # Find longest suffix N-gram that exists spos =", "epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label = phi else: bo_label", "= openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst, lmfst, opts)", "def build_dictfst(lmfst): \"\"\" Build a character-to-word FST based on the symbol table of", "bo_state = 0 # backoff state elif tuple(mg.words[1:]) in sidtab: bo_state = sidtab[tuple(mg.words[1:])]", "destination state will be the initial state fst.SetStart(dest) #print \"Initial state\", dest else:", "if spam.startswith('LMCLASS'): foo, classname = spam.split() self.add_class(classname) inclass = classname def add_class(self, name):", "and can either be composed with the input, or pre-composed with the language", "parts[1] == classname: inclass = None else: prob = 1.0 if len(parts) >", "M-grams in the language model, where M<N. \"\"\" for mg in lm.mgrams(m): wsym", "not isinstance(infile, file): infile = file(infile) inclass = None for spam in infile:", "ll = 0 while st != -1 and o.NumArcs(st): a = o.GetArc(st, 0)", "else: prob = 1.0 if len(parts) > 1: prob = float(parts[1]) self.add_class_word(inclass, parts[0],", "Not a 1-gram, no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print \"Adding", "# For each M-gram w(1,M): # Create a state q(1,M) # Create an", "spam = spam.strip() if spam.startswith('#') or spam.startswith(';'): continue if spam == \"\": continue", "if sent[i] == '<s>': continue prob = lm.prob(sent[i::-1]) #print sent[i::-1], prob / math.log(10),", "w == '</s>': w = [w,] for c in w: csym = insym.AddSymbol(c)", "probability definition from a file. \"\"\" if not isinstance(infile, file): infile = file(infile)", "not isinstance(dest, int): dest = self.ssyms.AddSymbol(dest) while src >= self.NumStates(): self.AddState() while dest", "int): osym = self.osyms.AddSymbol(osym) if not isinstance(src, int): src = self.ssyms.AddSymbol(src) if not", "continue if '</s>' in ng.words[0:n-1]: continue for w in ng.words[:n-1]: # skip OOVs", "and modify this freely under the same terms as # Sphinx-III \"\"\" FST", "openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start for c in txt: nxt = fst.AddState() sym", "a file. \"\"\" if not isinstance(infile, file): infile = file(infile) inclass = None", "inclass = None for spam in infile: spam = spam.strip() if spam.startswith('#') or", "may copy and modify this freely under the same terms as # Sphinx-III", "%s '%s' | dot -Tpdf > '%s'\" % (acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir)", "in sent.split() if not x.startswith('++')] ll = 0 for i in xrange(len(sent)): if", "(we hope) for m in range(lm.get_size() - 1): add_mgram_states(fst, symtab, lm, m, sidtab,", "FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\")", "= openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start) for s in strs: prev", "symtab.AddSymbol(\"&epsilon;\") if use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label = phi else: bo_label = epsilon", "if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start", "openfst.StdVectorFst() if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st =", "file. \"\"\" if not isinstance(infile, file): infile = file(infile) inclass = None for", "model. In the latter case you can project the resulting transducer to its", "len(parts) == 2 \\ and parts[0] == \"END\" and parts[1] == classname: inclass", ">1-grams starting with </s> if m == 0: src = 0 # 1-grams", "ssyms=None): openfst.StdVectorFst.__init__(self) if isyms == None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms ==", "= openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src, isym,", "continue if spam == \"\": continue if inclass: parts = spam.split() if len(parts)", "arc to the suffix M-1-gram # Note taht if mg.log_bowt == 0 it's", "OOV continue if ng.words[n-1] == '<s>': # non-event continue if '</s>' in ng.words[0:n-1]:", "lm.get_size(), sidtab) # Connect and arc-sort the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst", "= 1.0 if len(parts) > 1: prob = float(parts[1]) self.add_class_word(inclass, parts[0], prob) else:", "to q(1,M) with weight P(w(1,M)) # Create an arc from state q(1,M) to", "based on the symbol table of lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym", "in self.classes[c]: outfile.write(\"%s %g\\n\" % (word, prob)) outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\") def", "weight=0): if not isinstance(state, int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self,", "lmfst_eval(lmfst, sent): sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if phi !=", "#print \"Final state\", dest #print \"Entered state ID mapping (</s>,) =>\", dest else:", "Create a backoff state # For M in 1 to N-1: # For", "obtain an equivalent non-class-based model. \"\"\" if not isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef)", "the final state fst.AddArc(prev, openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def", "for c in s: nxt = fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev, isym, 0,", "True newstate = False if ('</s>',) in sidtab: dest = sidtab[('</s>',)] else: dest", "= os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor: acceptor = \"--acceptor\" else: acceptor = \"\"", "for ng in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if wsym == -1: # OOV", "spam.startswith('LMCLASS'): foo, classname = spam.split() self.add_class(classname) inclass = classname def add_class(self, name): \"\"\"", "fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm goes like this: # # Create a backoff", "isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start if isinstance(txt, str): txt", "q(2,M) with weight bowt(w(1,M-1)) # For each N-gram w(1,N): # Create an arc", "start for c in s: nxt = fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev, isym,", "the full language model, create a VectorFst from it and project it to", "% (dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab, lm, n, sidtab): \"\"\" Add states", "\"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) if isyms: symtab = isyms", "= file(outfile) for c in self.classes: outfile.write(\"LMCLASS %s\\n\" % c) for word, prob", "# <s> is a non-event if m == 0: # The destination state", "build_classfst(probdef, isyms=None): \"\"\" Build an FST from the classes in a Sphinx probability", "if not isinstance(infile, file): infile = file(infile) inclass = None for spam in", "openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\" Build a character-to-word FST based on the symbol", "where M<N. \"\"\" for mg in lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if wsym ==", "mg.words[m] == '</s>': # only one final state is allowed final = True", "= 0 for i in xrange(len(sent)): if sent[i] == '<s>': continue prob =", "-mg.log_bowt, bo_state)) #print \"Adding backoff arc %d => %d %.4f\" % (dest, bo_state,", "state mappings sidtab = {} fst.AddState() # guaranteed to be zero (we hope)", "fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for word, label in symtab: if label == openfst.epsilon:", "classes, and can either be composed with the input, or pre-composed with the", "= {} if infile != None: self.read(infile) def read(self, infile): \"\"\" Read probability", "/ math.log(10), bo ll += prob return ll if __name__ == '__main__': lmf,", "= openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st = o.Start() ll = 0 while st", "0: # The destination state will be the initial state fst.SetStart(dest) #print \"Initial", "project the resulting transducer to its input to obtain an equivalent non-class-based model.", "this means it has an OOV else: src = sidtab[tuple(mg.words[0:m])] if mg.words[m] ==", "N-gram for\", ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print \"Adding", "fstclass() isyms = openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start)", "for a set of strings. \"\"\" fst = fstclass() isyms = openfst.SymbolTable(\"chars\") osyms", "fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print \"Adding %d-gram arc %d => %d %s/%.4f\"", "like the Dot language. \"\"\" def __init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms", "= 0 # 1-grams start in backoff state elif tuple(mg.words[0:m]) not in sidtab:", "fst = openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi: phi =", "\\ # o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() / math.log(10) ll -= a.weight.Value() st =", "sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if phi != -1: opts = openfst.StdPhiComposeOptions()", "False newstate = True dest = fst.AddState() if mg.words[m] == '<s>': # <s>", "= openfst.StdComposeFst(sentfst, lmfst, opts) else: c = openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c,", "= osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0): \"\"\" Add", "starting with </s> if m == 0: src = 0 # 1-grams start", "#print w, \"not found\" continue src = sidtab[tuple(ng.words[:n-1])] # Find longest suffix N-gram", "Add a word to a class in this probability definition. \"\"\" self.classes[name][word] =", "def add_class(self, name): \"\"\" Add a class to this probability definition. \"\"\" self.classes[name]", "newstate = True dest = fst.AddState() if mg.words[m] == '<s>': # <s> is", "sidtab[('</s>',)] = dest #print \"Final state\", dest #print \"Entered state ID mapping (</s>,)", "opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst, lmfst,", "prev = nxt nxt = fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0,", "o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() / math.log(10) ll -= a.weight.Value() st = a.nextstate return", "continue prob = lm.prob(sent[i::-1]) #print sent[i::-1], prob / math.log(10), bo ll += prob", "if needed sidtab[tuple(mg.words)] = dest #print \"Entered state ID mapping\", tuple(mg.words), \"=>\", dest", "not x.startswith('++')] ll = 0 for i in xrange(len(sent)): if sent[i] == '<s>':", "'</s>': # only one final state is allowed final = True newstate =", "self.isyms) def SetOutputSymbols(self, osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab, lm,", "0 it's particularly important to do this! if m == 0: bo_state =", "fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a text string to", "of whitespace-separated tokens, to a sentence FST. \"\"\" fst = fstclass() start =", "c in self.classes: outfile.write(\"LMCLASS %s\\n\" % c) for word, prob in self.classes[c]: outfile.write(\"%s", "nxt = fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0, nxt) fst.SetFinal(nxt, 0)", "#print \"Warning, unknown word\", c continue else: sym = symtab.AddSymbol(c) #print prev, sym,", "file): outfile = file(outfile) for c in self.classes: outfile.write(\"LMCLASS %s\\n\" % c) for", "# skip OOVs if symtab.Find(w) == -1: #print w, \"not found\" continue src", "= 0 # backoff state elif tuple(mg.words[1:]) in sidtab: bo_state = sidtab[tuple(mg.words[1:])] else:", "0) sidtab[('</s>',)] = dest #print \"Final state\", dest #print \"Entered state ID mapping", "sidtab, bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab) # Connect and arc-sort the resulting", "# Not a 1-gram, no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print", "not isinstance(outfile, file): outfile = file(outfile) for c in self.classes: outfile.write(\"LMCLASS %s\\n\" %", "holding M-gram to state mappings sidtab = {} fst.AddState() # guaranteed to be", "/ math.log(10) ll -= a.weight.Value() st = a.nextstate return ll def lm_eval(lm, sent):", "Dot language. \"\"\" def __init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms == None:", "openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print \"Adding %d-gram arc %d => %d %s/%.4f\" %", "prev = next # And an epsilon arc to the final state fst.AddArc(prev,", "composed with the input, or pre-composed with the language model. In the latter", "the language model. To obtain the full language model, create a VectorFst from", "\"\"\" Build a character-to-word FST based on the symbol table of lmfst. \"\"\"", "in w: csym = insym.AddSymbol(c) for w, wsym in outsym: if wsym ==", "to N-1: # For each M-gram w(1,M): # Create a state q(1,M) #", "-1: opts = openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi)", "= build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\" Build a", "Copyright (c) 2010 Carnegie Mellon University # # You may copy and modify", "= symtab.AddSymbol(c) for word, prob in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel,", "class-based language model. By default this returns the lazy composition of the class", "a 1-gram, no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print \"Adding backoff", "if inclass: parts = spam.split() if len(parts) == 2 \\ and parts[0] ==", "has an OOV else: src = sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>': # only", "if len(parts) > 1: prob = float(parts[1]) self.add_class_word(inclass, parts[0], prob) else: if spam.startswith('LMCLASS'):", "arc %d => %d %.4f\" % (dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab, lm,", "for mg in lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if wsym == -1: continue #", "symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start if isinstance(txt, str): txt = txt.split()", "osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start) for s in strs: prev = start for", "string to an FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) symtab", "language model, where M<N. \"\"\" for mg in lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if", "inclass: parts = spam.split() if len(parts) == 2 \\ and parts[0] == \"END\"", "fst.AddArc(prev, openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst, outfile, acceptor=False):", "if osyms == None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms == None: ssyms", "same terms as # Sphinx-III \"\"\" FST utility functions \"\"\" __author__ = \"<NAME>", "word prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev)) # Use a single", "return rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert a list of words,", "in outsym: if wsym == 0: continue wsym = outsym.Find(w) # Add an", "for c in txt: if omitstart and c == '<s>': continue nxt =", "suffix M-1-gram # Note taht if mg.log_bowt == 0 it's particularly important to", "> 0 and mg.words[0] == '</s>': continue # skip >1-grams starting with </s>", "state ID mapping (</s>,) =>\", dest else: final = False newstate = True", "# For each N-gram w(1,N): # Create an arc from state q(1,N-1) to", "from state q(1,M-1) to q(1,M) with weight P(w(1,M)) # Create an arc from", "the latter case you can project the resulting transducer to its input to", "\"\"\" Convert a list of words, or a string of whitespace-separated tokens, to", "if not x.startswith('++')] ll = 0 for i in xrange(len(sent)): if sent[i] ==", "openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0): \"\"\" Add states and", "guaranteed to be zero (we hope) for m in range(lm.get_size() - 1): add_mgram_states(fst,", "probdef, use_phi=False): \"\"\" Build an FST from a class-based language model. By default", "fst def fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw an FST as a PDF using", "out probability definition to a file. \"\"\" if not isinstance(outfile, file): outfile =", "Add states and arcs for all M-grams in the language model, where M<N.", "/= t def build_classfst(probdef, isyms=None): \"\"\" Build an FST from the classes in", "fst.SetOutputSymbols(symtab) # The algorithm goes like this: # # Create a backoff state", "an FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\")", "\"=>\", dest if not final: # Create a backoff arc to the suffix", "words, or a string of whitespace-separated tokens, to a sentence FST. \"\"\" fst", "# Sphinx-III \"\"\" FST utility functions \"\"\" __author__ = \"<NAME> <<EMAIL>>\" __version__ =", "= symtab.AddSymbol(\"&phi;\") bo_label = phi else: bo_label = epsilon for ug in lm.mgrams(0):", "%d => %d %.4f\" % (dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab, lm, n,", "= openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start) for s", "0 and mg.words[0] == '</s>': continue # skip >1-grams starting with </s> if", "\"Entered state ID mapping (</s>,) =>\", dest else: final = False newstate =", "\"\"\" if not isinstance(infile, file): infile = file(infile) inclass = None for spam", "newstate: # Add a new state to the mapping if needed sidtab[tuple(mg.words)] =", "src, dest, #mg.words[m], -mg.log_prob) if newstate: # Add a new state to the", "0: for w in self.classes[c]: self.classes[c][w] /= t def build_classfst(probdef, isyms=None): \"\"\" Build", "fst.AddState() fst.SetFinal(final, 0) for w, wsym in outsym: if wsym == 0: continue", "dfst import math def lmfst_eval(lmfst, sent): sentfst = sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi =", "0, st) for c in probdef.classes: clabel = symtab.AddSymbol(c) for word, prob in", "= lm.prob(sent[i::-1]) #print sent[i::-1], prob / math.log(10), bo ll += prob return ll", "a.weight.Value() st = a.nextstate return ll def lm_eval(lm, sent): sent = [x for", "if not isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef) fst = openfst.StdVectorFst() if isyms: symtab", "\"\"\" Draw an FST as a PDF using fstdraw and dot. \"\"\" tempdir", "sym = symtab.AddSymbol(c) #print prev, sym, nxt fst.AddArc(prev, sym, sym, 0, nxt) prev", "w: csym = insym.Find(c) next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next)) prev", "prev, sym, nxt fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0)", "ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\" Build an FST recognizer from an N-gram", "will be the initial state fst.SetStart(dest) #print \"Initial state\", dest else: fst.AddArc(src, openfst.StdArc(wsym,", "\"Initial state\", dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print \"Added %d-gram arc", "algorithm goes like this: # # Create a backoff state # For M", "Read probability definition from a file. \"\"\" if not isinstance(infile, file): infile =", "%d => %d %s/%.4f\" % (m+1, src, dest, #mg.words[m], -mg.log_prob) if newstate: #", "#print \"Adding %d-gram arc %d => %d %s/%.4f\" % (n, src, dest, ng.words[n-1],", "a Sphinx probability definition file. This transducer maps words to classes, and can", "probability definition. \"\"\" self.classes[name][word] = prob def write(self, outfile): \"\"\" Write out probability", "an FST from the classes in a Sphinx probability definition file. This transducer", "bo_state = sidtab[tuple(mg.words[1:])] else: continue # Not a 1-gram, no suffix M-gram fst.AddArc(dest,", "and dot. \"\"\" tempdir = tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor:", "with weight P(w(1,M)) # Create an arc from state q(1,M) to q(2,M) with", "if not isinstance(isym, int): isym = self.isyms.AddSymbol(isym) if not isinstance(osym, int): osym =", "-1: continue # skip mgrams ending in OOV if m > 0 and", "%d %s/%.4f\" % (n, src, dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\" Build", "dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state,", "add_class(self, name): \"\"\" Add a class to this probability definition. \"\"\" self.classes[name] =", "dot -Tpdf > '%s'\" % (acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv def", "%d-gram arc %d => %d %s/%.4f\" % (n, src, dest, ng.words[n-1], -ng.log_prob) def", "list of words, or a string of whitespace-separated tokens, to a sentence FST.", "dest #print \"Entered state ID mapping\", tuple(mg.words), \"=>\", dest if not final: #", "s in strs: prev = start for c in s: nxt = fst.AddState()", "bo_label, -mg.log_bowt, bo_state)) #print \"Adding backoff arc %d => %d %.4f\" % (dest,", "final state fst.AddArc(prev, openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst,", "outsym = lmfst.InputSymbols() fst = openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start) final = fst.AddState()", "class SphinxProbdef(object): \"\"\" Probability definition file used for Sphinx class language models. \"\"\"", "in s: nxt = fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0, nxt)", "= sidtab[tuple(ng.words[:n-1])] # Find longest suffix N-gram that exists spos = 1 while", "spam == \"\": continue if inclass: parts = spam.split() if len(parts) == 2", "spam.split() if len(parts) == 2 \\ and parts[0] == \"END\" and parts[1] ==", "in sidtab: continue # this means it has an OOV else: src =", "'</s>': w = [w,] for c in w: csym = insym.Find(c) next =", "backoff state elif tuple(mg.words[1:]) in sidtab: bo_state = sidtab[tuple(mg.words[1:])] else: continue # Not", "def read(self, infile): \"\"\" Read probability definition from a file. \"\"\" if not", "add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0): \"\"\" Add states and arcs for all", "build_lmfst(lm, use_phi=False): \"\"\" Build an FST recognizer from an N-gram backoff language model.", "for word, prob in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob), st)", "st = o.Start() ll = 0 while st != -1 and o.NumArcs(st): a", "P(w(1,N)) # Table holding M-gram to state mappings sidtab = {} fst.AddState() #", "== 0: src = 0 # 1-grams start in backoff state elif tuple(mg.words[0:m])", "weight P(w(1,M)) # Create an arc from state q(1,M) to q(2,M) with weight", "fst.SetStart(start) if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev =", "fstdraw and dot. \"\"\" tempdir = tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if", "(m+1, src, dest, #mg.words[m], -mg.log_prob) if newstate: # Add a new state to", "file(infile) inclass = None for spam in infile: spam = spam.strip() if spam.startswith('#')", "symbols as required. This is meant to behave somewhat like the Dot language.", "\"Adding backoff arc %d => %d %.4f\" % (dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst,", "spam.startswith('#') or spam.startswith(';'): continue if spam == \"\": continue if inclass: parts =", "in ng.words[0:n-1]: continue for w in ng.words[:n-1]: # skip OOVs if symtab.Find(w) ==", "0: continue # Use a single symbol for end-of-sentence if w == '</s>':", "spam.strip() if spam.startswith('#') or spam.startswith(';'): continue if spam == \"\": continue if inclass:", "osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms == None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms", "text string to an FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start)", "set of strings. \"\"\" fst = fstclass() isyms = openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\")", "start = fst.AddState() fst.SetStart(start) for s in strs: prev = start for c", "functions \"\"\" __author__ = \"<NAME> <<EMAIL>>\" __version__ = \"$Revision $\" import sys import", "dest = fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest #print \"Final state\", dest #print", "the same terms as # Sphinx-III \"\"\" FST utility functions \"\"\" __author__ =", "== '</s>': w = [w,] for c in w: csym = insym.Find(c) next", "Mellon University # # You may copy and modify this freely under the", "word to a class in this probability definition. \"\"\" self.classes[name][word] = prob def", "return ll if __name__ == '__main__': lmf, fstf = sys.argv[1:] lm = sphinxbase.NGramModel(lmf)", "if not isinstance(osym, int): osym = self.osyms.AddSymbol(osym) if not isinstance(src, int): src =", "fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary lookup FST for a set", "state ID mapping\", tuple(mg.words), \"=>\", dest if not final: # Create a backoff", "# Note taht if mg.log_bowt == 0 it's particularly important to do this!", "initial state fst.SetStart(dest) #print \"Initial state\", dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest))", "{} fst.AddState() # guaranteed to be zero (we hope) for m in range(lm.get_size()", "models. \"\"\" def __init__(self, infile=None): self.classes = {} if infile != None: self.read(infile)", "wsym = outsym.Find(w) # Add an epsilon:word arc to the first state of", "in ng.words[:n-1]: # skip OOVs if symtab.Find(w) == -1: #print w, \"not found\"", "fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw an FST as", "= fst.AddState() if mg.words[m] == '<s>': # <s> is a non-event if m", "\"\"\" self.classes[name] = {} def add_class_word(self, name, word, prob): \"\"\" Add a word", "1-gram, no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print \"Adding backoff arc", "== classname: inclass = None else: prob = 1.0 if len(parts) > 1:", "__author__ = \"<NAME> <<EMAIL>>\" __version__ = \"$Revision $\" import sys import os import", "self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state, weight=0): if not isinstance(state, int):", "is allowed final = True newstate = False if ('</s>',) in sidtab: dest", "1 if spos == n: raise RuntimeError, \"Unable to find suffix N-gram for\",", "i in xrange(len(sent)): if sent[i] == '<s>': continue prob = lm.prob(sent[i::-1]) #print sent[i::-1],", "start in backoff state elif tuple(mg.words[0:m]) not in sidtab: continue # this means", "probability definition file. This transducer maps words to classes, and can either be", "states and arcs for all N-grams in the language model, where N=N (the", "state of this word prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev)) #", "be zero (we hope) for m in range(lm.get_size() - 1): add_mgram_states(fst, symtab, lm,", "else: c = openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st =", "def SetFinal(self, state, weight=0): if not isinstance(state, int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state,", "range(lm.get_size() - 1): add_mgram_states(fst, symtab, lm, m, sidtab, bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(),", "copy and modify this freely under the same terms as # Sphinx-III \"\"\"", "dot. \"\"\" tempdir = tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor: acceptor", "== 0: continue wsym = outsym.Find(w) # Add an epsilon:word arc to the", "mg in lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if wsym == -1: continue # skip", "c in probdef.classes: clabel = symtab.AddSymbol(c) for word, prob in probdef.classes[c].iteritems(): wlabel =", "nxt = fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0, nxt) prev =", "first state of this word prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev))", "ID mapping (</s>,) =>\", dest else: final = False newstate = True dest", "0, next)) prev = next # And an epsilon arc to the final", "epsilon for ug in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm", "N-gram w(1,N): # Create an arc from state q(1,N-1) to q(2,N) with weight", "if isyms == None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms == None: osyms", "# For M in 1 to N-1: # For each M-gram w(1,M): #", "a new state to the mapping if needed sidtab[tuple(mg.words)] = dest #print \"Entered", "fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert a list of words, or a string of", "= isyms.Find(c) if sym == -1: #print \"Warning, unknown word\", c continue else:", "isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for", "arc %d => %d %s/%.4f\" % (m+1, src, dest, #mg.words[m], -mg.log_prob) if newstate:", "or pre-composed with the language model. In the latter case you can project", "the input, or pre-composed with the language model. In the latter case you", "import openfst import sphinxbase import subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST class which automatically", "\"\"\" Add a word to a class in this probability definition. \"\"\" self.classes[name][word]", "word, prob in self.classes[c]: outfile.write(\"%s %g\\n\" % (word, prob)) outfile.write(\"END %s\\n\" % c)", "Build an FST from the classes in a Sphinx probability definition file. This", "\"END\" and parts[1] == classname: inclass = None else: prob = 1.0 if", "# You may copy and modify this freely under the same terms as", "backoff language model. \"\"\" fst = openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\")", "== '<s>': # non-event continue if '</s>' in ng.words[0:n-1]: continue for w in", "0) for word, label in symtab: if label == openfst.epsilon: continue fst.AddArc(st, label,", "self.classes[c]: outfile.write(\"%s %g\\n\" % (word, prob)) outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\") def normalize(self):", "new state to the mapping if needed sidtab[tuple(mg.words)] = dest #print \"Entered state", "isyms: sym = isyms.Find(c) if sym == -1: #print \"Warning, unknown word\", c", "tempdir = tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor: acceptor = \"--acceptor\"", "phi != -1: opts = openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst,", "m > 0 and mg.words[0] == '</s>': continue # skip >1-grams starting with", "-ng.log_prob, dest)) #print \"Adding %d-gram arc %d => %d %s/%.4f\" % (n, src,", "which automatically adds states, input and output symbols as required. This is meant", "# Create a backoff arc to the suffix M-1-gram # Note taht if", "fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return", "for c in self.classes: t = sum(self.classes[c].itervalues()) if t != 0: for w", "outfile.write(\"%s %g\\n\" % (word, prob)) outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\") def normalize(self): \"\"\"", "if '</s>' in ng.words[0:n-1]: continue for w in ng.words[:n-1]: # skip OOVs if", "+= 1 if spos == n: raise RuntimeError, \"Unable to find suffix N-gram", "Build a dictionary lookup FST for a set of strings. \"\"\" fst =", "return fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a text string to an FST.", "symtab.Find(w) == -1: #print w, \"not found\" continue src = sidtab[tuple(ng.words[:n-1])] # Find", "'%s'\" % (acceptor, fstfile, outfile)) os.unlink(fstfile) os.rmdir(tempdir) return rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None,", "=> %d %.4f\" % (dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab, lm, n, sidtab):", "to q(2,M) with weight bowt(w(1,M-1)) # For each N-gram w(1,N): # Create an", "0) # print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() / math.log(10) ll", "= {} def add_class_word(self, name, word, prob): \"\"\" Add a word to a", "isym, osym, weight, dest): if not isinstance(isym, int): isym = self.isyms.AddSymbol(isym) if not", "continue # this means it has an OOV else: src = sidtab[tuple(mg.words[0:m])] if", "= fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next)) prev = next # And an", "# 1-grams start in backoff state elif tuple(mg.words[0:m]) not in sidtab: continue #", "# only one final state is allowed final = True newstate = False", "fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev)) # Use a single symbol for end-of-sentence", "int): dest = self.ssyms.AddSymbol(dest) while src >= self.NumStates(): self.AddState() while dest >= self.NumStates():", "symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for word, label", "== 0: continue # Use a single symbol for end-of-sentence if w ==", "\"\"\" Read probability definition from a file. \"\"\" if not isinstance(infile, file): infile", "fst = openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start) final = fst.AddState() fst.SetFinal(final, 0) for", "the initial state fst.SetStart(dest) #print \"Initial state\", dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob,", "symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm, probdef,", "= openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for word, label in", "like this: # # Create a backoff state # For M in 1", "isinstance(state, int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self, isyms): self.isyms =", "__init__(self, infile=None): self.classes = {} if infile != None: self.read(infile) def read(self, infile):", "\"\"\" for mg in lm.mgrams(m): wsym = symtab.Find(mg.words[m]) if wsym == -1: continue", "def SetOutputSymbols(self, osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab, lm, m,", "while st != -1 and o.NumArcs(st): a = o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel),", "(dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab, lm, n, sidtab): \"\"\" Add states and", "-mg.log_prob) if newstate: # Add a new state to the mapping if needed", "label, 0, st) for c in probdef.classes: clabel = symtab.AddSymbol(c) for word, prob", "= sent2fst(sent, openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if phi != -1: opts =", "the first state of this word prev = fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0,", "0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def str2fst(txt,", "that exists spos = 1 while tuple(ng.words[spos:]) not in sidtab: spos += 1", "self.ssyms.AddSymbol(src) if not isinstance(dest, int): dest = self.ssyms.AddSymbol(dest) while src >= self.NumStates(): self.AddState()", "= fst.AddState() if isyms: sym = isyms.Find(c) if sym == -1: #print \"Warning,", "a class to this probability definition. \"\"\" self.classes[name] = {} def add_class_word(self, name,", "= nxt nxt = fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0, nxt)", "return fst def fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw an FST as a PDF", "strs: prev = start for c in s: nxt = fst.AddState() isym =", "\"\"\" Add states and arcs for all M-grams in the language model, where", "2 \\ and parts[0] == \"END\" and parts[1] == classname: inclass = None", "to do this! if m == 0: bo_state = 0 # backoff state", "xrange(len(sent)): if sent[i] == '<s>': continue prob = lm.prob(sent[i::-1]) #print sent[i::-1], prob /", "ll = 0 for i in xrange(len(sent)): if sent[i] == '<s>': continue prob", "fst def build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build an FST from a class-based language", "class which automatically adds states, input and output symbols as required. This is", "Add states and arcs for all N-grams in the language model, where N=N", "build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\" Build a character-to-word", "elif tuple(mg.words[0:m]) not in sidtab: continue # this means it has an OOV", "FST from a class-based language model. By default this returns the lazy composition", "words to classes, and can either be composed with the input, or pre-composed", "\"--acceptor\" else: acceptor = \"\" rv = os.system(\"fstdraw %s '%s' | dot -Tpdf", "in infile: spam = spam.strip() if spam.startswith('#') or spam.startswith(';'): continue if spam ==", "= sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>': # only one final state is allowed", "each N-gram w(1,N): # Create an arc from state q(1,N-1) to q(2,N) with", "outsym: if wsym == 0: continue wsym = outsym.Find(w) # Add an epsilon:word", "a dictionary lookup FST for a set of strings. \"\"\" fst = fstclass()", "N-gram that exists spos = 1 while tuple(ng.words[spos:]) not in sidtab: spos +=", "for i in xrange(len(sent)): if sent[i] == '<s>': continue prob = lm.prob(sent[i::-1]) #print", "all N-grams in the language model, where N=N (the order of the model,", "#!/usr/bin/env python # Copyright (c) 2010 Carnegie Mellon University # # You may", "openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for word, label in symtab:", "# OOV continue if ng.words[n-1] == '<s>': # non-event continue if '</s>' in", "isym, osym, weight, dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args)", "s: nxt = fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0, nxt) prev", "%s/%.4f\" % (n, src, dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\" Build an", "not isinstance(state, int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self, isyms): self.isyms", "the language model. In the latter case you can project the resulting transducer", "q(1,M-1) to q(1,M) with weight P(w(1,M)) # Create an arc from state q(1,M)", "clabel = symtab.AddSymbol(c) for word, prob in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel,", "if ssyms == None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms)", "{} def add_class_word(self, name, word, prob): \"\"\" Add a word to a class", "=> %d %s/%.4f\" % (n, src, dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\"", "continue fst.AddArc(st, label, label, 0, st) for c in probdef.classes: clabel = symtab.AddSymbol(c)", "= symtab.Find(ng.words[n-1]) if wsym == -1: # OOV continue if ng.words[n-1] == '<s>':", "wsym = symtab.Find(mg.words[m]) if wsym == -1: continue # skip mgrams ending in", "= symtab.Find(mg.words[m]) if wsym == -1: continue # skip mgrams ending in OOV", "sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>': # only one final state is allowed final", "strings. \"\"\" fst = fstclass() isyms = openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\")", "fst.SetStart(dest) #print \"Initial state\", dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print \"Added", "lmfst.InputSymbols() fst = openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start) final = fst.AddState() fst.SetFinal(final, 0)", "else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start if isinstance(txt, str): txt =", "n: raise RuntimeError, \"Unable to find suffix N-gram for\", ng.wids dest = sidtab[tuple(ng.words[spos:])]", "= self.osyms.AddSymbol(osym) if not isinstance(src, int): src = self.ssyms.AddSymbol(src) if not isinstance(dest, int):", "0, prev)) # Use a single symbol for end-of-sentence if w == '</s>':", "classname: inclass = None else: prob = 1.0 if len(parts) > 1: prob", "nxt fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab)", "None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms == None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\")", "OOVs if symtab.Find(w) == -1: #print w, \"not found\" continue src = sidtab[tuple(ng.words[:n-1])]", "fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw an FST as a", "symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start for c in txt: nxt =", "input and output symbols as required. This is meant to behave somewhat like", "sidtab: bo_state = sidtab[tuple(mg.words[1:])] else: continue # Not a 1-gram, no suffix M-gram", "openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state, weight=0): if not isinstance(state,", "symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0) for word, label in symtab: if", "= nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert", "= openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start for c in txt: nxt = fst.AddState()", "for all M-grams in the language model, where M<N. \"\"\" for mg in", "inclass = None else: prob = 1.0 if len(parts) > 1: prob =", "input to obtain an equivalent non-class-based model. \"\"\" if not isinstance(probdef, SphinxProbdef): probdef", "return fst def build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build an FST from a class-based", "lookup FST for a set of strings. \"\"\" fst = fstclass() isyms =", "return fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary lookup FST for a", "# # You may copy and modify this freely under the same terms", "no suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print \"Adding backoff arc %d", "N-gram backoff language model. \"\"\" fst = openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon =", "and arcs for all N-grams in the language model, where N=N (the order", "clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build", "return fst class SphinxProbdef(object): \"\"\" Probability definition file used for Sphinx class language", "sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def", "isinstance(dest, int): dest = self.ssyms.AddSymbol(dest) while src >= self.NumStates(): self.AddState() while dest >=", "and o.NumArcs(st): a = o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\", "if phi != -1: opts = openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 =", "with the language model. In the latter case you can project the resulting", "sidtab: continue # this means it has an OOV else: src = sidtab[tuple(mg.words[0:m])]", "def build_classfst(probdef, isyms=None): \"\"\" Build an FST from the classes in a Sphinx", "w, wsym in outsym: if wsym == 0: continue wsym = outsym.Find(w) #", "fst.AddState() if mg.words[m] == '<s>': # <s> is a non-event if m ==", "PDF using fstdraw and dot. \"\"\" tempdir = tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\")", "fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print \"Adding backoff arc %d => %d %.4f\"", "longest suffix N-gram that exists spos = 1 while tuple(ng.words[spos:]) not in sidtab:", "w in self.classes[c]: self.classes[c][w] /= t def build_classfst(probdef, isyms=None): \"\"\" Build an FST", "be the initial state fst.SetStart(dest) #print \"Initial state\", dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym,", "class to this probability definition. \"\"\" self.classes[name] = {} def add_class_word(self, name, word,", "probdef.classes: clabel = symtab.AddSymbol(c) for word, prob in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st,", "= fstclass() start = fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start", "self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src, isym, osym, weight, dest): if not isinstance(isym,", "backoff state elif tuple(mg.words[0:m]) not in sidtab: continue # this means it has", "automatically adds states, input and output symbols as required. This is meant to", "fst = fstclass() isyms = openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start =", "dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print \"Added %d-gram arc %d =>", "file(outfile) for c in self.classes: outfile.write(\"LMCLASS %s\\n\" % c) for word, prob in", "for word, prob in self.classes[c]: outfile.write(\"%s %g\\n\" % (word, prob)) outfile.write(\"END %s\\n\" %", "insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst = openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start) final =", "continue for w in ng.words[:n-1]: # skip OOVs if symtab.Find(w) == -1: #print", "word, prob): \"\"\" Add a word to a class in this probability definition.", "fst = openfst.StdVectorFst() if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\")", "isyms.AddSymbol(\"&epsilon;\") osyms.AddSymbol(\"&epsilon;\") start = fst.AddState() fst.SetStart(start) for s in strs: prev = start", "import subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST class which automatically adds states, input and", "returns the lazy composition of the class definition transducer and the language model.", "fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor: acceptor = \"--acceptor\" else: acceptor =", "0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a text string", "a VectorFst from it and project it to its input. \"\"\" lmfst =", "{} if infile != None: self.read(infile) def read(self, infile): \"\"\" Read probability definition", "a string of whitespace-separated tokens, to a sentence FST. \"\"\" fst = fstclass()", "non-class-based model. \"\"\" if not isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef) fst = openfst.StdVectorFst()", "# -a.weight.Value() / math.log(10) ll -= a.weight.Value() st = a.nextstate return ll def", "openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state, weight=0): if not isinstance(state, int): state", "c = openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st = o.Start()", "final = True newstate = False if ('</s>',) in sidtab: dest = sidtab[('</s>',)]", "fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import math def lmfst_eval(lmfst, sent):", "M in 1 to N-1: # For each M-gram w(1,M): # Create a", "tuple(mg.words[0:m]) not in sidtab: continue # this means it has an OOV else:", "a state q(1,M) # Create an arc from state q(1,M-1) to q(1,M) with", "wsym, -ng.log_prob, dest)) #print \"Adding %d-gram arc %d => %d %s/%.4f\" % (n,", "each M-gram w(1,M): # Create a state q(1,M) # Create an arc from", "symtab, lm, m, sidtab, bo_label=0): \"\"\" Add states and arcs for all M-grams", "Sphinx class language models. \"\"\" def __init__(self, infile=None): self.classes = {} if infile", "#print sent[i::-1], prob / math.log(10), bo ll += prob return ll if __name__", "sidtab) # Connect and arc-sort the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class", "= False newstate = True dest = fst.AddState() if mg.words[m] == '<s>': #", "src, dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\" Build an FST recognizer from", "def __init__(self, infile=None): self.classes = {} if infile != None: self.read(infile) def read(self,", "> 1: prob = float(parts[1]) self.add_class_word(inclass, parts[0], prob) else: if spam.startswith('LMCLASS'): foo, classname", "fst.AddArc(st, label, label, 0, st) for c in probdef.classes: clabel = symtab.AddSymbol(c) for", "= fst.AddState() fst.SetFinal(final, 0) for w, wsym in outsym: if wsym == 0:", "for end-of-sentence if w == '</s>': w = [w,] for c in w:", "= \"--acceptor\" else: acceptor = \"\" rv = os.system(\"fstdraw %s '%s' | dot", "elif tuple(mg.words[1:]) in sidtab: bo_state = sidtab[tuple(mg.words[1:])] else: continue # Not a 1-gram,", "bo_state)) #print \"Adding backoff arc %d => %d %.4f\" % (dest, bo_state, -mg.log_bowt)", "o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() / math.log(10)", "1-grams start in backoff state elif tuple(mg.words[0:m]) not in sidtab: continue # this", "= spam.split() self.add_class(classname) inclass = classname def add_class(self, name): \"\"\" Add a class", "FST for a set of strings. \"\"\" fst = fstclass() isyms = openfst.SymbolTable(\"chars\")", "1.0 if len(parts) > 1: prob = float(parts[1]) self.add_class_word(inclass, parts[0], prob) else: if", "mg.log_bowt == 0 it's particularly important to do this! if m == 0:", "final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst, outfile, acceptor=False): \"\"\" Draw an FST", "c == '<s>': continue nxt = fst.AddState() if isyms: sym = isyms.Find(c) if", "skip >1-grams starting with </s> if m == 0: src = 0 #", "fst.Write(fstfile) if acceptor: acceptor = \"--acceptor\" else: acceptor = \"\" rv = os.system(\"fstdraw", "def add_mgram_states(fst, symtab, lm, m, sidtab, bo_label=0): \"\"\" Add states and arcs for", "FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class SphinxProbdef(object): \"\"\" Probability definition file used for", "sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert a list of words, or a string", "non-event if m == 0: # The destination state will be the initial", "read(self, infile): \"\"\" Read probability definition from a file. \"\"\" if not isinstance(infile,", "language models. \"\"\" def __init__(self, infile=None): self.classes = {} if infile != None:", "#print prev, sym, nxt fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt,", "spos += 1 if spos == n: raise RuntimeError, \"Unable to find suffix", "latter case you can project the resulting transducer to its input to obtain", "definition. \"\"\" self.classes[name] = {} def add_class_word(self, name, word, prob): \"\"\" Add a", "\"\"\" self.classes[name][word] = prob def write(self, outfile): \"\"\" Write out probability definition to", "lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if wsym == -1: # OOV continue if ng.words[n-1]", "arc from state q(1,M) to q(2,M) with weight bowt(w(1,M-1)) # For each N-gram", "len(parts) > 1: prob = float(parts[1]) self.add_class_word(inclass, parts[0], prob) else: if spam.startswith('LMCLASS'): foo,", "ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src, isym, osym, weight,", "w in ng.words[:n-1]: # skip OOVs if symtab.Find(w) == -1: #print w, \"not", "single symbol for end-of-sentence if w == '</s>': w = [w,] for c", "\"\"\" lmfst = build_lmfst(lm, use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst,", "= [w,] for c in w: csym = insym.Find(c) next = fst.AddState() fst.AddArc(prev,", "a sentence FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) if isyms:", "import tempfile import openfst import sphinxbase import subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST class", "suffix N-gram for\", ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print", "x in sent.split() if not x.startswith('++')] ll = 0 for i in xrange(len(sent)):", "wsym, -mg.log_prob, dest)) #print \"Added %d-gram arc %d => %d %s/%.4f\" % (m+1,", "insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst = openfst.StdVectorFst() start = fst.AddState()", "found\" continue src = sidtab[tuple(ng.words[:n-1])] # Find longest suffix N-gram that exists spos", "fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest #print \"Final state\", dest #print \"Entered state ID", "== '<s>': continue nxt = fst.AddState() if isyms: sym = isyms.Find(c) if sym", "isinstance(infile, file): infile = file(infile) inclass = None for spam in infile: spam", "backoff state # For M in 1 to N-1: # For each M-gram", "\"\"\" for ng in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if wsym == -1: #", "newstate = False if ('</s>',) in sidtab: dest = sidtab[('</s>',)] else: dest =", "in strs: prev = start for c in s: nxt = fst.AddState() isym", "if use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label = phi else: bo_label = epsilon for", "exists spos = 1 while tuple(ng.words[spos:]) not in sidtab: spos += 1 if", "src = sidtab[tuple(ng.words[:n-1])] # Find longest suffix N-gram that exists spos = 1", "dest)) #print \"Added %d-gram arc %d => %d %s/%.4f\" % (m+1, src, dest,", "an arc from state q(1,M-1) to q(1,M) with weight P(w(1,M)) # Create an", "in txt: nxt = fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0, nxt)", "# guaranteed to be zero (we hope) for m in range(lm.get_size() - 1):", "a text string to an FST. \"\"\" fst = fstclass() start = fst.AddState()", "in sidtab: spos += 1 if spos == n: raise RuntimeError, \"Unable to", "with weight P(w(1,N)) # Table holding M-gram to state mappings sidtab = {}", "fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def str2fst(txt, fstclass=openfst.StdVectorFst): \"\"\" Convert a text", "def lm_eval(lm, sent): sent = [x for x in sent.split() if not x.startswith('++')]", "either be composed with the input, or pre-composed with the language model. In", "= openfst.StdVectorFst() if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st", "def build_lmfst(lm, use_phi=False): \"\"\" Build an FST recognizer from an N-gram backoff language", "'</s>': w = [w,] for c in w: csym = insym.AddSymbol(c) for w,", "\"\"\" tempdir = tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor: acceptor =", "self.classes[c]: self.classes[c][w] /= t def build_classfst(probdef, isyms=None): \"\"\" Build an FST from the", "arcs for all M-grams in the language model, where M<N. \"\"\" for mg", "self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms)", "-1: #print \"Warning, unknown word\", c continue else: sym = symtab.AddSymbol(c) #print prev,", "in lm.mgrams(0): wsym = symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm goes like this:", "Build a character-to-word FST based on the symbol table of lmfst. \"\"\" insym", "omitstart=True): \"\"\" Convert a list of words, or a string of whitespace-separated tokens,", "Sphinx probability definition file. This transducer maps words to classes, and can either", "openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\" Build a character-to-word FST based on", "continue # Use a single symbol for end-of-sentence if w == '</s>': w", "fst.SetStart(start) for s in strs: prev = start for c in s: nxt", "end-of-sentence if w == '</s>': w = [w,] for c in w: csym", "symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start if isinstance(txt,", "wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def", "fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print \"Added %d-gram arc %d => %d %s/%.4f\"", "a class in this probability definition. \"\"\" self.classes[name][word] = prob def write(self, outfile):", "in sidtab: dest = sidtab[('</s>',)] else: dest = fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] =", "Probability definition file used for Sphinx class language models. \"\"\" def __init__(self, infile=None):", "# print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() / math.log(10) ll -=", "for\", ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print \"Adding %d-gram", "csym = insym.Find(c) next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next)) prev =", "next)) prev = next # And an epsilon arc to the final state", "if ng.words[n-1] == '<s>': # non-event continue if '</s>' in ng.words[0:n-1]: continue for", "in range(lm.get_size() - 1): add_mgram_states(fst, symtab, lm, m, sidtab, bo_label) add_ngram_arcs(fst, symtab, lm,", "ll -= a.weight.Value() st = a.nextstate return ll def lm_eval(lm, sent): sent =", "fst.SetStart(st) fst.SetFinal(st, 0) for word, label in symtab: if label == openfst.epsilon: continue", "sidtab = {} fst.AddState() # guaranteed to be zero (we hope) for m", "\"\" rv = os.system(\"fstdraw %s '%s' | dot -Tpdf > '%s'\" % (acceptor,", "self.isyms.AddSymbol(isym) if not isinstance(osym, int): osym = self.osyms.AddSymbol(osym) if not isinstance(src, int): src", "classname def add_class(self, name): \"\"\" Add a class to this probability definition. \"\"\"", "== None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def", "= osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0, nxt) fst.SetFinal(nxt, 0) dfst = fstclass() openfst.Determinize(fst,", "this returns the lazy composition of the class definition transducer and the language", "o.Start() ll = 0 while st != -1 and o.NumArcs(st): a = o.GetArc(st,", "classes in a Sphinx probability definition file. This transducer maps words to classes,", "fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt,", "spam.startswith(';'): continue if spam == \"\": continue if inclass: parts = spam.split() if", "probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst", "state q(1,M) # Create an arc from state q(1,M-1) to q(1,M) with weight", "openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class SphinxProbdef(object): \"\"\" Probability definition file used for Sphinx", "an epsilon:word arc to the first state of this word prev = fst.AddState()", "parts[0] == \"END\" and parts[1] == classname: inclass = None else: prob =", "language model. To obtain the full language model, create a VectorFst from it", "if isinstance(txt, str): txt = txt.split() for c in txt: if omitstart and", "classname = spam.split() self.add_class(classname) inclass = classname def add_class(self, name): \"\"\" Add a", "# skip mgrams ending in OOV if m > 0 and mg.words[0] ==", "% (n, src, dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm, use_phi=False): \"\"\" Build an FST", "symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st) fst.SetFinal(st,", "in backoff state elif tuple(mg.words[0:m]) not in sidtab: continue # this means it", "m in range(lm.get_size() - 1): add_mgram_states(fst, symtab, lm, m, sidtab, bo_label) add_ngram_arcs(fst, symtab,", "# # Create a backoff state # For M in 1 to N-1:", "where N=N (the order of the model, that is). \"\"\" for ng in", "for w, wsym in outsym: if wsym == 0: continue wsym = outsym.Find(w)", "prob in self.classes[c]: outfile.write(\"%s %g\\n\" % (word, prob)) outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\")", "start = fst.AddState() fst.SetStart(start) if isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\")", "phi) c = openfst.StdComposeFst(sentfst, lmfst, opts) else: c = openfst.StdComposeFst(sentfst, lmfst) o =", "openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms)", "q(1,M) to q(2,M) with weight bowt(w(1,M-1)) # For each N-gram w(1,N): # Create", "== -1: continue # skip mgrams ending in OOV if m > 0", "dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import math def lmfst_eval(lmfst, sent): sentfst =", "if wsym == -1: # OOV continue if ng.words[n-1] == '<s>': # non-event", "output symbols as required. This is meant to behave somewhat like the Dot", "print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() / math.log(10) ll -= a.weight.Value()", "lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\" Build a character-to-word FST", "return ll def lm_eval(lm, sent): sent = [x for x in sent.split() if", "if w == '</s>': w = [w,] for c in w: csym =", "the resulting transducer to its input to obtain an equivalent non-class-based model. \"\"\"", "0, osym, 0, nxt) fst.SetFinal(nxt, 0) dfst = fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms)", "build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build an FST from a class-based language model. By", "for c in self.classes: outfile.write(\"LMCLASS %s\\n\" % c) for word, prob in self.classes[c]:", "= [w,] for c in w: csym = insym.AddSymbol(c) for w, wsym in", "mg.words[m] == '<s>': # <s> is a non-event if m == 0: #", "sent = [x for x in sent.split() if not x.startswith('++')] ll = 0", "the language model, where M<N. \"\"\" for mg in lm.mgrams(m): wsym = symtab.Find(mg.words[m])", "the lazy composition of the class definition transducer and the language model. To", "label == openfst.epsilon: continue fst.AddArc(st, label, label, 0, st) for c in probdef.classes:", "for c in probdef.classes: clabel = symtab.AddSymbol(c) for word, prob in probdef.classes[c].iteritems(): wlabel", "an OOV else: src = sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>': # only one", "backoff arc to the suffix M-1-gram # Note taht if mg.log_bowt == 0", "infile: spam = spam.strip() if spam.startswith('#') or spam.startswith(';'): continue if spam == \"\":", "word\", c continue else: sym = symtab.AddSymbol(c) #print prev, sym, nxt fst.AddArc(prev, sym,", "in OOV if m > 0 and mg.words[0] == '</s>': continue # skip", "Build an FST from a class-based language model. By default this returns the", "state\", dest #print \"Entered state ID mapping (</s>,) =>\", dest else: final =", "\"\"\" Add states and arcs for all N-grams in the language model, where", "tuple(mg.words[1:]) in sidtab: bo_state = sidtab[tuple(mg.words[1:])] else: continue # Not a 1-gram, no", "0 while st != -1 and o.NumArcs(st): a = o.GetArc(st, 0) # print", "int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self, isyms): self.isyms = isyms", "= tempfile.mkdtemp() fstfile = os.path.join(tempdir, \"output.fst\") fst.Write(fstfile) if acceptor: acceptor = \"--acceptor\" else:", "definition. \"\"\" self.classes[name][word] = prob def write(self, outfile): \"\"\" Write out probability definition", "fst.AddState() isym = isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0, nxt) prev = nxt nxt", "\"\"\" __author__ = \"<NAME> <<EMAIL>>\" __version__ = \"$Revision $\" import sys import os", "infile=None): self.classes = {} if infile != None: self.read(infile) def read(self, infile): \"\"\"", "SphinxProbdef): probdef = SphinxProbdef(probdef) fst = openfst.StdVectorFst() if isyms: symtab = isyms else:", "lm.prob(sent[i::-1]) #print sent[i::-1], prob / math.log(10), bo ll += prob return ll if", "q(1,N-1) to q(2,N) with weight P(w(1,N)) # Table holding M-gram to state mappings", "= prob def write(self, outfile): \"\"\" Write out probability definition to a file.", "self.add_class(classname) inclass = classname def add_class(self, name): \"\"\" Add a class to this", "Connect and arc-sort the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class SphinxProbdef(object): \"\"\"", "openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self, isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self,", "add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab) # Connect and arc-sort the resulting FST openfst.Connect(fst)", "continue wsym = outsym.Find(w) # Add an epsilon:word arc to the first state", "an equivalent non-class-based model. \"\"\" if not isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef) fst", "M-1-gram # Note taht if mg.log_bowt == 0 it's particularly important to do", "prob) else: if spam.startswith('LMCLASS'): foo, classname = spam.split() self.add_class(classname) inclass = classname def", "# Use a single symbol for end-of-sentence if w == '</s>': w =", "== '</s>': # only one final state is allowed final = True newstate", "Create an arc from state q(1,M-1) to q(1,M) with weight P(w(1,M)) # Create", "= openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst = openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start)", "equivalent non-class-based model. \"\"\" if not isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef) fst =", "\"Unable to find suffix N-gram for\", ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym,", "class definition transducer and the language model. To obtain the full language model,", "fst.SetFinal(nxt, 0) dfst = fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import", "infile = file(infile) inclass = None for spam in infile: spam = spam.strip()", "dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print \"Adding %d-gram arc %d", "None: self.read(infile) def read(self, infile): \"\"\" Read probability definition from a file. \"\"\"", "sent): sent = [x for x in sent.split() if not x.startswith('++')] ll =", "symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab)", "openfst.epsilon: continue fst.AddArc(st, label, label, 0, st) for c in probdef.classes: clabel =", "for w in self.classes[c]: self.classes[c][w] /= t def build_classfst(probdef, isyms=None): \"\"\" Build an", "find suffix N-gram for\", ng.wids dest = sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest))", "in self.classes: outfile.write(\"LMCLASS %s\\n\" % c) for word, prob in self.classes[c]: outfile.write(\"%s %g\\n\"", "1 to N-1: # For each M-gram w(1,M): # Create a state q(1,M)", "fst = fstclass() start = fst.AddState() fst.SetStart(start) if isyms: symtab = isyms else:", "to a class in this probability definition. \"\"\" self.classes[name][word] = prob def write(self,", "st) fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build an FST", "fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest #print \"Final state\", dest #print \"Entered state", "os.unlink(fstfile) os.rmdir(tempdir) return rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert a list", "isyms.AddSymbol(c) fst.AddArc(prev, isym, 0, 0, nxt) prev = nxt nxt = fst.AddState() osym", "sidtab: spos += 1 if spos == n: raise RuntimeError, \"Unable to find", "insym.AddSymbol(c) for w, wsym in outsym: if wsym == 0: continue wsym =", "continue # skip mgrams ending in OOV if m > 0 and mg.words[0]", "== '</s>': w = [w,] for c in w: csym = insym.AddSymbol(c) for", "1) st = o.Start() ll = 0 while st != -1 and o.NumArcs(st):", "= symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab)", "final = fst.AddState() fst.SetFinal(final, 0) for w, wsym in outsym: if wsym ==", "w: csym = insym.AddSymbol(c) for w, wsym in outsym: if wsym == 0:", "this probability definition. \"\"\" self.classes[name][word] = prob def write(self, outfile): \"\"\" Write out", "1 while tuple(ng.words[spos:]) not in sidtab: spos += 1 if spos == n:", "isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st)", "suffix N-gram that exists spos = 1 while tuple(ng.words[spos:]) not in sidtab: spos", "fst.SetStart(start) final = fst.AddState() fst.SetFinal(final, 0) for w, wsym in outsym: if wsym", "if m == 0: # The destination state will be the initial state", "nxt = fst.AddState() if isyms: sym = isyms.Find(c) if sym == -1: #print", "def __init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms == None: isyms = openfst.SymbolTable(\"inputs\")", "= a.nextstate return ll def lm_eval(lm, sent): sent = [x for x in", "this: # # Create a backoff state # For M in 1 to", "weight bowt(w(1,M-1)) # For each N-gram w(1,N): # Create an arc from state", "mgrams ending in OOV if m > 0 and mg.words[0] == '</s>': continue", ">= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self,", "= spam.strip() if spam.startswith('#') or spam.startswith(';'): continue if spam == \"\": continue if", "osym, 0, nxt) fst.SetFinal(nxt, 0) dfst = fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms)", "*args) def SetFinal(self, state, weight=0): if not isinstance(state, int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self,", "file. This transducer maps words to classes, and can either be composed with", "state\", dest else: fst.AddArc(src, openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print \"Added %d-gram arc %d", "w(1,N): # Create an arc from state q(1,N-1) to q(2,N) with weight P(w(1,N))", "ng.words[0:n-1]: continue for w in ng.words[:n-1]: # skip OOVs if symtab.Find(w) == -1:", "!= None: self.read(infile) def read(self, infile): \"\"\" Read probability definition from a file.", "in self.classes[c]: self.classes[c][w] /= t def build_classfst(probdef, isyms=None): \"\"\" Build an FST from", "outfile = file(outfile) for c in self.classes: outfile.write(\"LMCLASS %s\\n\" % c) for word,", "word, prob in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob), st) fst.SetOutputSymbols(symtab)", "state q(1,N-1) to q(2,N) with weight P(w(1,N)) # Table holding M-gram to state", "-1 and o.NumArcs(st): a = o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel),", "the mapping if needed sidtab[tuple(mg.words)] = dest #print \"Entered state ID mapping\", tuple(mg.words),", "nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def strset2fst(strs, fstclass=openfst.StdVectorFst):", "AddArc(self, src, isym, osym, weight, dest): if not isinstance(isym, int): isym = self.isyms.AddSymbol(isym)", "mappings sidtab = {} fst.AddState() # guaranteed to be zero (we hope) for", "symtab.AddSymbol(c) for word, prob in probdef.classes[c].iteritems(): wlabel = symtab.AddSymbol(word) fst.AddArc(st, wlabel, clabel, -math.log(prob),", "in outsym: if wsym == 0: continue # Use a single symbol for", "sym, nxt fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab)", "ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src, isym, osym, weight, dest): if not", "= sidtab[tuple(ng.words[spos:])] fst.AddArc(src, openfst.StdArc(wsym, wsym, -ng.log_prob, dest)) #print \"Adding %d-gram arc %d =>", "tokens, to a sentence FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start)", "openfst.StdVectorFst.__init__(self) if isyms == None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms == None:", "0 # backoff state elif tuple(mg.words[1:]) in sidtab: bo_state = sidtab[tuple(mg.words[1:])] else: continue", "\"\"\" Build an FST recognizer from an N-gram backoff language model. \"\"\" fst", "lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if phi != -1: opts = openfst.StdPhiComposeOptions() opts.matcher1 =", "True dest = fst.AddState() if mg.words[m] == '<s>': # <s> is a non-event", "next # And an epsilon arc to the final state fst.AddArc(prev, openfst.StdArc(0, 0,", "def AddArc(self, src, isym, osym, weight, dest): if not isinstance(isym, int): isym =", "of strings. \"\"\" fst = fstclass() isyms = openfst.SymbolTable(\"chars\") osyms = openfst.SymbolTable(\"words\") isyms.AddSymbol(\"&epsilon;\")", "openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import math def lmfst_eval(lmfst, sent): sentfst = sent2fst(sent,", "isyms: symtab = isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start if", "states, input and output symbols as required. This is meant to behave somewhat", "lm, m, sidtab, bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab) # Connect and arc-sort", "txt.split() for c in txt: if omitstart and c == '<s>': continue nxt", "nxt) prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def str2fst(txt, fstclass=openfst.StdVectorFst):", "openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st = o.Start() ll = 0 while st !=", "\"\"\" Build an FST from a class-based language model. By default this returns", "lmfst.InputSymbols().Find(\"&phi;\") if phi != -1: opts = openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2", "opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst, lmfst, opts) else: c =", "prob / math.log(10), bo ll += prob return ll if __name__ == '__main__':", "required. This is meant to behave somewhat like the Dot language. \"\"\" def", "sent[i] == '<s>': continue prob = lm.prob(sent[i::-1]) #print sent[i::-1], prob / math.log(10), bo", "self.classes = {} if infile != None: self.read(infile) def read(self, infile): \"\"\" Read", "definition from a file. \"\"\" if not isinstance(infile, file): infile = file(infile) inclass", "symbol table of lmfst. \"\"\" insym = openfst.SymbolTable(\"letters\") insym.AddSymbol(\"&epsilon;\") outsym = lmfst.InputSymbols() fst", "'<s>': # non-event continue if '</s>' in ng.words[0:n-1]: continue for w in ng.words[:n-1]:", "prob return ll if __name__ == '__main__': lmf, fstf = sys.argv[1:] lm =", "def build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build an FST from a class-based language model.", "sentence FST. \"\"\" fst = fstclass() start = fst.AddState() fst.SetStart(start) if isyms: symtab", "if ('</s>',) in sidtab: dest = sidtab[('</s>',)] else: dest = fst.AddState() fst.SetFinal(dest, 0)", "SetInputSymbols(self, isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms): self.osyms = osyms", "def add_ngram_arcs(fst, symtab, lm, n, sidtab): \"\"\" Add states and arcs for all", "for c in txt: nxt = fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev, sym, sym,", "state, weight=0): if not isinstance(state, int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def", "unknown word\", c continue else: sym = symtab.AddSymbol(c) #print prev, sym, nxt fst.AddArc(prev,", "# non-event continue if '</s>' in ng.words[0:n-1]: continue for w in ng.words[:n-1]: #", "if symtab.Find(w) == -1: #print w, \"not found\" continue src = sidtab[tuple(ng.words[:n-1])] #", "= o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value() /", "if not final: # Create a backoff arc to the suffix M-1-gram #", "Use a single symbol for end-of-sentence if w == '</s>': w = [w,]", "import sphinxbase import subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST class which automatically adds states,", "a class-based language model. By default this returns the lazy composition of the", "spos = 1 while tuple(ng.words[spos:]) not in sidtab: spos += 1 if spos", "0: bo_state = 0 # backoff state elif tuple(mg.words[1:]) in sidtab: bo_state =", "sent.split() if not x.startswith('++')] ll = 0 for i in xrange(len(sent)): if sent[i]", "for Sphinx class language models. \"\"\" def __init__(self, infile=None): self.classes = {} if", "arc %d => %d %s/%.4f\" % (n, src, dest, ng.words[n-1], -ng.log_prob) def build_lmfst(lm,", "fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start for c in txt:", "backoff arc %d => %d %.4f\" % (dest, bo_state, -mg.log_bowt) def add_ngram_arcs(fst, symtab,", "isinstance(txt, str): txt = txt.split() for c in txt: if omitstart and c", "n, sidtab): \"\"\" Add states and arcs for all N-grams in the language", "nxt) prev = nxt nxt = fst.AddState() osym = osyms.AddSymbol(s) fst.AddArc(prev, 0, osym,", "= 0 while st != -1 and o.NumArcs(st): a = o.GetArc(st, 0) #", "openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import math def lmfst_eval(lmfst, sent): sentfst", "= lmfst.InputSymbols().Find(\"&phi;\") if phi != -1: opts = openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE)", "if mg.words[m] == '</s>': # only one final state is allowed final =", "isyms.Find(c) if sym == -1: #print \"Warning, unknown word\", c continue else: sym", "transducer and the language model. To obtain the full language model, create a", "the resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class SphinxProbdef(object): \"\"\" Probability definition file", "as # Sphinx-III \"\"\" FST utility functions \"\"\" __author__ = \"<NAME> <<EMAIL>>\" __version__", "(the order of the model, that is). \"\"\" for ng in lm.mgrams(n-1): wsym", "isyms == None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\") if osyms == None: osyms =", "used for Sphinx class language models. \"\"\" def __init__(self, infile=None): self.classes = {}", "dest = sidtab[('</s>',)] else: dest = fst.AddState() fst.SetFinal(dest, 0) sidtab[('</s>',)] = dest #print", "file): infile = file(infile) inclass = None for spam in infile: spam =", "while dest >= self.NumStates(): self.AddState() openfst.StdVectorFst.AddArc(self, src, isym, osym, weight, dest) def Write(self,", "openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\") self.ssyms = ssyms self.SetInputSymbols(isyms) self.SetOutputSymbols(osyms) self.SetStart(self.AddState()) def AddArc(self, src, isym, osym,", "src, isym, osym, weight, dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self,", "= \"\" rv = os.system(\"fstdraw %s '%s' | dot -Tpdf > '%s'\" %", "fst.AddState() fst.SetStart(start) final = fst.AddState() fst.SetFinal(final, 0) for w, wsym in outsym: if", "state will be the initial state fst.SetStart(dest) #print \"Initial state\", dest else: fst.AddArc(src,", "mapping if needed sidtab[tuple(mg.words)] = dest #print \"Entered state ID mapping\", tuple(mg.words), \"=>\",", "= openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst, lmfst, opts) else: c = openfst.StdComposeFst(sentfst,", "import sys import os import tempfile import openfst import sphinxbase import subprocess class", "model. \"\"\" if not isinstance(probdef, SphinxProbdef): probdef = SphinxProbdef(probdef) fst = openfst.StdVectorFst() if", "model. \"\"\" fst = openfst.StdVectorFst() symtab = openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi:", "fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary lookup FST for a set of strings. \"\"\"", "a backoff state # For M in 1 to N-1: # For each", "int): src = self.ssyms.AddSymbol(src) if not isinstance(dest, int): dest = self.ssyms.AddSymbol(dest) while src", "== -1: #print w, \"not found\" continue src = sidtab[tuple(ng.words[:n-1])] # Find longest", "spam.split() self.add_class(classname) inclass = classname def add_class(self, name): \"\"\" Add a class to", "osym, weight, dest) def Write(self, *args): openfst.StdVectorFst.SetInputSymbols(self, self.isyms) openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) openfst.StdVectorFst.Write(self, *args) def", "\"Added %d-gram arc %d => %d %s/%.4f\" % (m+1, src, dest, #mg.words[m], -mg.log_prob)", "-1: # OOV continue if ng.words[n-1] == '<s>': # non-event continue if '</s>'", "prob): \"\"\" Add a word to a class in this probability definition. \"\"\"", "</s> if m == 0: src = 0 # 1-grams start in backoff", "c in w: csym = insym.Find(c) next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0,", "self.osyms) openfst.StdVectorFst.Write(self, *args) def SetFinal(self, state, weight=0): if not isinstance(state, int): state =", "= openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start) final = fst.AddState() fst.SetFinal(final, 0) for w,", "normalize(self): \"\"\" Normalize probabilities. \"\"\" for c in self.classes: t = sum(self.classes[c].itervalues()) if", "fstclass() start = fst.AddState() fst.SetStart(start) if isyms: symtab = isyms else: symtab =", "use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label = phi else: bo_label = epsilon for ug", "subprocess class AutoFst(openfst.StdVectorFst): \"\"\" FST class which automatically adds states, input and output", "w = [w,] for c in w: csym = insym.Find(c) next = fst.AddState()", "openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\" Build a character-to-word FST based", "import os import tempfile import openfst import sphinxbase import subprocess class AutoFst(openfst.StdVectorFst): \"\"\"", "= fst.AddState() fst.AddArc(start, openfst.StdArc(0, wsym, 0, prev)) # Use a single symbol for", "if spos == n: raise RuntimeError, \"Unable to find suffix N-gram for\", ng.wids", "inclass = classname def add_class(self, name): \"\"\" Add a class to this probability", "openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def add_mgram_states(fst, symtab,", "if len(parts) == 2 \\ and parts[0] == \"END\" and parts[1] == classname:", "For each N-gram w(1,N): # Create an arc from state q(1,N-1) to q(2,N)", "isinstance(outfile, file): outfile = file(outfile) for c in self.classes: outfile.write(\"LMCLASS %s\\n\" % c)", "None: osyms = openfst.SymbolTable(\"outputs\") osyms.AddSymbol(\"&epsilon;\") if ssyms == None: ssyms = openfst.SymbolTable(\"states\") ssyms.AddSymbol(\"__START__\")", "openfst.StdArc(0, 0, 0, final)) fst.SetInputSymbols(insym) fst.SetOutputSymbols(outsym) return fst def fst2pdf(fst, outfile, acceptor=False): \"\"\"", "if spam.startswith('#') or spam.startswith(';'): continue if spam == \"\": continue if inclass: parts", "- 1): add_mgram_states(fst, symtab, lm, m, sidtab, bo_label) add_ngram_arcs(fst, symtab, lm, lm.get_size(), sidtab)", "In the latter case you can project the resulting transducer to its input", "int): isym = self.isyms.AddSymbol(isym) if not isinstance(osym, int): osym = self.osyms.AddSymbol(osym) if not", "prob def write(self, outfile): \"\"\" Write out probability definition to a file. \"\"\"", "M-gram w(1,M): # Create a state q(1,M) # Create an arc from state", "openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst, lmfst, opts) else: c = openfst.StdComposeFst(sentfst, lmfst) o", "full language model, create a VectorFst from it and project it to its", "openfst.StdArc(csym, 0, 0, next)) prev = next # And an epsilon arc to", "acceptor: acceptor = \"--acceptor\" else: acceptor = \"\" rv = os.system(\"fstdraw %s '%s'", "outfile): \"\"\" Write out probability definition to a file. \"\"\" if not isinstance(outfile,", "= lmfst.InputSymbols() fst = openfst.StdVectorFst() start = fst.AddState() fst.SetStart(start) final = fst.AddState() fst.SetFinal(final,", "m == 0: # The destination state will be the initial state fst.SetStart(dest)", "symtab.AddSymbol(ug.words[0]) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) # The algorithm goes like this: # # Create a", "use_phi=False): \"\"\" Build an FST recognizer from an N-gram backoff language model. \"\"\"", "= openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") prev = start if isinstance(txt, str): txt = txt.split() for", "openfst.StdArc(wsym, wsym, -mg.log_prob, dest)) #print \"Added %d-gram arc %d => %d %s/%.4f\" %", "if isyms: sym = isyms.Find(c) if sym == -1: #print \"Warning, unknown word\",", "= openfst.SymbolTable(\"words\") epsilon = symtab.AddSymbol(\"&epsilon;\") if use_phi: phi = symtab.AddSymbol(\"&phi;\") bo_label = phi", "classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst): \"\"\" Build", "state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self, isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self,", "insym.Find(c) next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym, 0, 0, next)) prev = next #", "openfst.StdPhiComposeOptions() opts.matcher1 = openfst.StdPhiMatcher(sentfst, openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst,", "+= prob return ll if __name__ == '__main__': lmf, fstf = sys.argv[1:] lm", "== openfst.epsilon: continue fst.AddArc(st, label, label, 0, st) for c in probdef.classes: clabel", "it has an OOV else: src = sidtab[tuple(mg.words[0:m])] if mg.words[m] == '</s>': #", "or a string of whitespace-separated tokens, to a sentence FST. \"\"\" fst =", "prev = nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\"", "if not isinstance(outfile, file): outfile = file(outfile) for c in self.classes: outfile.write(\"LMCLASS %s\\n\"", "fst = fstclass() start = fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev =", "dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import math def lmfst_eval(lmfst, sent): sentfst = sent2fst(sent, openfst.StdVectorFst,", "self.SetStart(self.AddState()) def AddArc(self, src, isym, osym, weight, dest): if not isinstance(isym, int): isym", "= isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms): self.osyms = osyms openfst.StdVectorFst.SetOutputSymbols(self, self.osyms) def", "= openfst.StdComposeFst(sentfst, lmfst) o = openfst.StdVectorFst() openfst.ShortestPath(c, o, 1) st = o.Start() ll", "isyms=None, omitstart=True): \"\"\" Convert a list of words, or a string of whitespace-separated", "model, that is). \"\"\" for ng in lm.mgrams(n-1): wsym = symtab.Find(ng.words[n-1]) if wsym", "csym = insym.AddSymbol(c) for w, wsym in outsym: if wsym == 0: continue", "__init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms == None: isyms = openfst.SymbolTable(\"inputs\") isyms.AddSymbol(\"&epsilon;\")", "if not isinstance(dest, int): dest = self.ssyms.AddSymbol(dest) while src >= self.NumStates(): self.AddState() while", "rv def sent2fst(txt, fstclass=openfst.StdVectorFst, isyms=None, omitstart=True): \"\"\" Convert a list of words, or", "\"\"\" if not isinstance(outfile, file): outfile = file(outfile) for c in self.classes: outfile.write(\"LMCLASS", "q(2,N) with weight P(w(1,N)) # Table holding M-gram to state mappings sidtab =", "rv = os.system(\"fstdraw %s '%s' | dot -Tpdf > '%s'\" % (acceptor, fstfile,", "for w in ng.words[:n-1]: # skip OOVs if symtab.Find(w) == -1: #print w,", "nxt fst.SetFinal(nxt, 0) fst.SetInputSymbols(symtab) fst.SetOutputSymbols(symtab) return fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a", "is meant to behave somewhat like the Dot language. \"\"\" def __init__(self, isyms=None,", "= start for c in txt: nxt = fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev,", "def SetInputSymbols(self, isyms): self.isyms = isyms openfst.StdVectorFst.SetInputSymbols(self, self.isyms) def SetOutputSymbols(self, osyms): self.osyms =", "txt: nxt = fst.AddState() sym = symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0, nxt) prev", "suffix M-gram fst.AddArc(dest, openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print \"Adding backoff arc %d =>", "= build_lmfst(lm, use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def", "fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start for c in txt: nxt", "math.log(10), bo ll += prob return ll if __name__ == '__main__': lmf, fstf", "fst.AddState() if isyms: sym = isyms.Find(c) if sym == -1: #print \"Warning, unknown", "its input to obtain an equivalent non-class-based model. \"\"\" if not isinstance(probdef, SphinxProbdef):", "txt = txt.split() for c in txt: if omitstart and c == '<s>':", "= True newstate = False if ('</s>',) in sidtab: dest = sidtab[('</s>',)] else:", "fst.SetOutputSymbols(symtab) fst.SetInputSymbols(symtab) return fst def build_class_lmfst(lm, probdef, use_phi=False): \"\"\" Build an FST from", "# The algorithm goes like this: # # Create a backoff state #", "0: continue wsym = outsym.Find(w) # Add an epsilon:word arc to the first", "isyms=None): \"\"\" Build an FST from the classes in a Sphinx probability definition", "= fstclass() openfst.Determinize(fst, dfst) openfst.RmEpsilon(dfst) dfst.SetInputSymbols(isyms) dfst.SetOutputSymbols(osyms) return dfst import math def lmfst_eval(lmfst,", "src, isym, osym, weight, dest): if not isinstance(isym, int): isym = self.isyms.AddSymbol(isym) if", "prob = 1.0 if len(parts) > 1: prob = float(parts[1]) self.add_class_word(inclass, parts[0], prob)", "%g\\n\" % (word, prob)) outfile.write(\"END %s\\n\" % c) outfile.write(\"\\n\") def normalize(self): \"\"\" Normalize", "if not isinstance(state, int): state = self.ssyms.AddSymbol(state) openfst.StdVectorFst.SetFinal(self, state, weight) def SetInputSymbols(self, isyms):", "if m > 0 and mg.words[0] == '</s>': continue # skip >1-grams starting", "osyms.AddSymbol(s) fst.AddArc(prev, 0, osym, 0, nxt) fst.SetFinal(nxt, 0) dfst = fstclass() openfst.Determinize(fst, dfst)", "openfst.StdVectorFst, lmfst.InputSymbols()) phi = lmfst.InputSymbols().Find(\"&phi;\") if phi != -1: opts = openfst.StdPhiComposeOptions() opts.matcher1", "Write out probability definition to a file. \"\"\" if not isinstance(outfile, file): outfile", "continue if ng.words[n-1] == '<s>': # non-event continue if '</s>' in ng.words[0:n-1]: continue", "mapping\", tuple(mg.words), \"=>\", dest if not final: # Create a backoff arc to", "dest if not final: # Create a backoff arc to the suffix M-1-gram", "if spam == \"\": continue if inclass: parts = spam.split() if len(parts) ==", "\"<NAME> <<EMAIL>>\" __version__ = \"$Revision $\" import sys import os import tempfile import", "resulting FST openfst.Connect(fst) openfst.ArcSortInput(fst) return fst class SphinxProbdef(object): \"\"\" Probability definition file used", "openfst.MATCH_NONE) opts.matcher2 = openfst.StdPhiMatcher(lmfst, openfst.MATCH_INPUT, phi) c = openfst.StdComposeFst(sentfst, lmfst, opts) else: c", "a = o.GetArc(st, 0) # print o.InputSymbols().Find(a.ilabel), \\ # o.OutputSymbols().Find(a.olabel), \\ # -a.weight.Value()", "\"output.fst\") fst.Write(fstfile) if acceptor: acceptor = \"--acceptor\" else: acceptor = \"\" rv =", "label in symtab: if label == openfst.epsilon: continue fst.AddArc(st, label, label, 0, st)", "\"\"\" def __init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms == None: isyms =", "language. \"\"\" def __init__(self, isyms=None, osyms=None, ssyms=None): openfst.StdVectorFst.__init__(self) if isyms == None: isyms", "sum(self.classes[c].itervalues()) if t != 0: for w in self.classes[c]: self.classes[c][w] /= t def", "= isyms else: symtab = openfst.SymbolTable(\"words\") symtab.AddSymbol(\"&epsilon;\") st = fst.AddState() fst.SetStart(st) fst.SetFinal(st, 0)", "word, label in symtab: if label == openfst.epsilon: continue fst.AddArc(st, label, label, 0,", "build_lmfst(lm, use_phi) classfst = build_classfst(probdef, lmfst.InputSymbols()) openfst.ArcSortInput(lmfst) openfst.ArcSortInput(classfst) return openfst.StdComposeFst(classfst, lmfst) def build_dictfst(lmfst):", "sym = symtab.AddSymbol(c) fst.AddArc(prev, sym, sym, 0, nxt) prev = nxt fst.SetFinal(nxt, 0)", "file used for Sphinx class language models. \"\"\" def __init__(self, infile=None): self.classes =", "to this probability definition. \"\"\" self.classes[name] = {} def add_class_word(self, name, word, prob):", "[w,] for c in w: csym = insym.Find(c) next = fst.AddState() fst.AddArc(prev, openfst.StdArc(csym,", "if t != 0: for w in self.classes[c]: self.classes[c][w] /= t def build_classfst(probdef,", "start = fst.AddState() fst.SetStart(start) symtab = openfst.SymbolTable(\"chars\") symtab.AddSymbol(\"&epsilon;\") prev = start for c", "openfst.StdArc(bo_label, bo_label, -mg.log_bowt, bo_state)) #print \"Adding backoff arc %d => %d %.4f\" %", "fst.SetOutputSymbols(symtab) return fst def strset2fst(strs, fstclass=openfst.StdVectorFst): \"\"\" Build a dictionary lookup FST for", "name, word, prob): \"\"\" Add a word to a class in this probability" ]
[ "# coding=utf-8 def add(a, b): return a + b c = add(288, 500)", "#!/usr/bin/env python # coding=utf-8 def add(a, b): return a + b c =", "python # coding=utf-8 def add(a, b): return a + b c = add(288," ]
[ "typing import List, Union from typing_extensions import Literal Oses = Literal[\"macos\", \"ubuntu\", \"windows\"]", "Defaults are conservative. parallel: Union[bool, int, Literal[\"auto\"]] = False checkout_blocks_and_plots = False install_timelord", "False install_timelord = False check_resource_usage = False job_timeout = 30 custom_vars: List[str] =", "Literal[\"auto\"]] = False checkout_blocks_and_plots = False install_timelord = False check_resource_usage = False job_timeout", "oses: List[Oses] = [\"macos\", \"ubuntu\", \"windows\"] # Defaults are conservative. parallel: Union[bool, int,", "= False install_timelord = False check_resource_usage = False job_timeout = 30 custom_vars: List[str]", "check_resource_usage = False job_timeout = 30 custom_vars: List[str] = [] os_skip: List[Oses] =", "int, Literal[\"auto\"]] = False checkout_blocks_and_plots = False install_timelord = False check_resource_usage = False", "= False checkout_blocks_and_plots = False install_timelord = False check_resource_usage = False job_timeout =", "List, Union from typing_extensions import Literal Oses = Literal[\"macos\", \"ubuntu\", \"windows\"] # Github", "Union from typing_extensions import Literal Oses = Literal[\"macos\", \"ubuntu\", \"windows\"] # Github actions", "config. oses: List[Oses] = [\"macos\", \"ubuntu\", \"windows\"] # Defaults are conservative. parallel: Union[bool,", "import Literal Oses = Literal[\"macos\", \"ubuntu\", \"windows\"] # Github actions template config. oses:", "annotations from typing import List, Union from typing_extensions import Literal Oses = Literal[\"macos\",", "Oses = Literal[\"macos\", \"ubuntu\", \"windows\"] # Github actions template config. oses: List[Oses] =", "import List, Union from typing_extensions import Literal Oses = Literal[\"macos\", \"ubuntu\", \"windows\"] #", "List[Oses] = [\"macos\", \"ubuntu\", \"windows\"] # Defaults are conservative. parallel: Union[bool, int, Literal[\"auto\"]]", "= [\"macos\", \"ubuntu\", \"windows\"] # Defaults are conservative. parallel: Union[bool, int, Literal[\"auto\"]] =", "\"ubuntu\", \"windows\"] # Defaults are conservative. parallel: Union[bool, int, Literal[\"auto\"]] = False checkout_blocks_and_plots", "from typing import List, Union from typing_extensions import Literal Oses = Literal[\"macos\", \"ubuntu\",", "Union[bool, int, Literal[\"auto\"]] = False checkout_blocks_and_plots = False install_timelord = False check_resource_usage =", "Literal[\"macos\", \"ubuntu\", \"windows\"] # Github actions template config. oses: List[Oses] = [\"macos\", \"ubuntu\",", "# Github actions template config. oses: List[Oses] = [\"macos\", \"ubuntu\", \"windows\"] # Defaults", "typing_extensions import Literal Oses = Literal[\"macos\", \"ubuntu\", \"windows\"] # Github actions template config.", "install_timelord = False check_resource_usage = False job_timeout = 30 custom_vars: List[str] = []", "= False check_resource_usage = False job_timeout = 30 custom_vars: List[str] = [] os_skip:", "are conservative. parallel: Union[bool, int, Literal[\"auto\"]] = False checkout_blocks_and_plots = False install_timelord =", "= False job_timeout = 30 custom_vars: List[str] = [] os_skip: List[Oses] = []", "Literal Oses = Literal[\"macos\", \"ubuntu\", \"windows\"] # Github actions template config. oses: List[Oses]", "import annotations from typing import List, Union from typing_extensions import Literal Oses =", "Github actions template config. oses: List[Oses] = [\"macos\", \"ubuntu\", \"windows\"] # Defaults are", "= Literal[\"macos\", \"ubuntu\", \"windows\"] # Github actions template config. oses: List[Oses] = [\"macos\",", "False check_resource_usage = False job_timeout = 30 custom_vars: List[str] = [] os_skip: List[Oses]", "<gh_stars>0 from __future__ import annotations from typing import List, Union from typing_extensions import", "# Defaults are conservative. parallel: Union[bool, int, Literal[\"auto\"]] = False checkout_blocks_and_plots = False", "checkout_blocks_and_plots = False install_timelord = False check_resource_usage = False job_timeout = 30 custom_vars:", "\"windows\"] # Github actions template config. oses: List[Oses] = [\"macos\", \"ubuntu\", \"windows\"] #", "actions template config. oses: List[Oses] = [\"macos\", \"ubuntu\", \"windows\"] # Defaults are conservative.", "from __future__ import annotations from typing import List, Union from typing_extensions import Literal", "from typing_extensions import Literal Oses = Literal[\"macos\", \"ubuntu\", \"windows\"] # Github actions template", "parallel: Union[bool, int, Literal[\"auto\"]] = False checkout_blocks_and_plots = False install_timelord = False check_resource_usage", "False checkout_blocks_and_plots = False install_timelord = False check_resource_usage = False job_timeout = 30", "__future__ import annotations from typing import List, Union from typing_extensions import Literal Oses", "[\"macos\", \"ubuntu\", \"windows\"] # Defaults are conservative. parallel: Union[bool, int, Literal[\"auto\"]] = False", "template config. oses: List[Oses] = [\"macos\", \"ubuntu\", \"windows\"] # Defaults are conservative. parallel:", "\"ubuntu\", \"windows\"] # Github actions template config. oses: List[Oses] = [\"macos\", \"ubuntu\", \"windows\"]", "\"windows\"] # Defaults are conservative. parallel: Union[bool, int, Literal[\"auto\"]] = False checkout_blocks_and_plots =", "conservative. parallel: Union[bool, int, Literal[\"auto\"]] = False checkout_blocks_and_plots = False install_timelord = False" ]
[ "= preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label] cls += config.EPS cls_loss", "mx import numpy as np from rcnn.config import config class LogLossMetric(mx.metric.EvalMetric): def __init__(self):", "def update(self, labels, preds): pred_cls = preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]),", "preds): pred_cls = preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label] cls +=", "cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst +=", "super(LogLossMetric, self).__init__('LogLoss') def update(self, labels, preds): pred_cls = preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls", "cls_loss self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels,", "preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label] cls += config.EPS cls_loss =", "np from rcnn.config import config class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self,", "np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss')", "self.sum_metric += cls_loss self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def", "__init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self, labels, preds): pred_cls = preds[0].asnumpy() label = labels[0].asnumpy().astype('int32')", "self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels, preds):", "def update(self, labels, preds): bbox_loss = preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss = np.sum(bbox_loss)", "__init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels, preds): bbox_loss = preds[0].asnumpy() label = labels[0].asnumpy()", "np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def", "pred_cls = preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label] cls += config.EPS", "cls += config.EPS cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric +=", "label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels, preds): bbox_loss =", "from rcnn.config import config class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self, labels,", "numpy as np from rcnn.config import config class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss')", "mxnet as mx import numpy as np from rcnn.config import config class LogLossMetric(mx.metric.EvalMetric):", "self).__init__('SmoothL1Loss') def update(self, labels, preds): bbox_loss = preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss =", "= -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0]", "update(self, labels, preds): bbox_loss = preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss = np.sum(bbox_loss) self.sum_metric", "preds): bbox_loss = preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss = np.sum(bbox_loss) self.sum_metric += bbox_loss", "update(self, labels, preds): pred_cls = preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label]", "SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels, preds): bbox_loss = preds[0].asnumpy() label", "as mx import numpy as np from rcnn.config import config class LogLossMetric(mx.metric.EvalMetric): def", "cls = pred_cls[np.arange(label.shape[0]), label] cls += config.EPS cls_loss = -1 * np.log(cls) cls_loss", "label] cls += config.EPS cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric", "+= label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels, preds): bbox_loss", "super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels, preds): bbox_loss = preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss", "preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss = np.sum(bbox_loss) self.sum_metric += bbox_loss self.num_inst += label.shape[0]", "label = labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label] cls += config.EPS cls_loss = -1", "config.EPS cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst", "self).__init__('LogLoss') def update(self, labels, preds): pred_cls = preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls =", "= np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric,", "class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels, preds): bbox_loss = preds[0].asnumpy()", "import config class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self, labels, preds): pred_cls", "as np from rcnn.config import config class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def", "bbox_loss = preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss = np.sum(bbox_loss) self.sum_metric += bbox_loss self.num_inst", "cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self):", "= labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label] cls += config.EPS cls_loss = -1 *", "def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self, labels, preds): pred_cls = preds[0].asnumpy() label =", "def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self, labels, preds): bbox_loss = preds[0].asnumpy() label =", "import numpy as np from rcnn.config import config class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric,", "* np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric):", "rcnn.config import config class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self, labels, preds):", "class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self, labels, preds): pred_cls = preds[0].asnumpy()", "= pred_cls[np.arange(label.shape[0]), label] cls += config.EPS cls_loss = -1 * np.log(cls) cls_loss =", "+= config.EPS cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss", "config class LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self, labels, preds): pred_cls =", "labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label] cls += config.EPS cls_loss = -1 * np.log(cls)", "= preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss = np.sum(bbox_loss) self.sum_metric += bbox_loss self.num_inst +=", "import mxnet as mx import numpy as np from rcnn.config import config class", "pred_cls[np.arange(label.shape[0]), label] cls += config.EPS cls_loss = -1 * np.log(cls) cls_loss = np.sum(cls_loss)", "labels, preds): pred_cls = preds[0].asnumpy() label = labels[0].asnumpy().astype('int32') cls = pred_cls[np.arange(label.shape[0]), label] cls", "LogLossMetric(mx.metric.EvalMetric): def __init__(self): super(LogLossMetric, self).__init__('LogLoss') def update(self, labels, preds): pred_cls = preds[0].asnumpy() label", "labels, preds): bbox_loss = preds[0].asnumpy() label = labels[0].asnumpy() bbox_loss = np.sum(bbox_loss) self.sum_metric +=", "-1 * np.log(cls) cls_loss = np.sum(cls_loss) self.sum_metric += cls_loss self.num_inst += label.shape[0] class", "+= cls_loss self.num_inst += label.shape[0] class SmoothL1LossMetric(mx.metric.EvalMetric): def __init__(self): super(SmoothL1LossMetric, self).__init__('SmoothL1Loss') def update(self," ]
[ "# Client to add follows data def _add_follow_requests(self, follow_requests): if len(follow_requests) > 0:", "post in posts: p = post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p) url = base_url", "timelines): ts = [] for timeline in timelines: posts = timeline['posts'] for post", "url = base_url + 'post-request/create_posts' response = requests.post(url, json=ps) # print(response.json()) # Client", "def evaluate(self): followers = self._get_followers() followings = self._get_followings() timelines = self._get_timelines() generated_posts =", "<reponame>mlatcl/fbp-vs-oop import requests from mblogger.record_types import * base_url = 'http://127.0.0.1:5000/' class App(): def", "self._get_followers() followings = self._get_followings() timelines = self._get_timelines() generated_posts = self._get_generated_posts() return self.get_outputs(followers, followings,", "data for main program def get_outputs(self, followers, followings, timelines, generated_posts): followers = self._parse_followers(followers)", "# Parses followers def _parse_followers(self, followers): fs = [] for follower in followers:", "'%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post) ps.append(p) return ps if __name__ == \"__main__\": app", "json={}) generated_posts = response.json() return generated_posts def add_data(self, followings, followers, follow_requests, posts, input_record):", "= response.json() return followers # Client to get list of followings def _get_followings(self):", "import * base_url = 'http://127.0.0.1:5000/' class App(): def evaluate(self): followers = self._get_followers() followings", "_parse_timelines(self, timelines): ts = [] for timeline in timelines: posts = timeline['posts'] for", "_add_follow_requests(self, follow_requests): if len(follow_requests) > 0: follows = [] for follow in follow_requests:", "self._parse_followers(followers) followings = self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return followers, followings,", "= [] for post in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p =", "response = requests.post(url, json={}) generated_posts = response.json() return generated_posts def add_data(self, followings, followers,", "generated_posts # Parses followers def _parse_followers(self, followers): fs = [] for follower in", "+ 'post-request/get_timelines' response = requests.post(url, json={}) followings = response.json() return followings # Client", "> 0: follows = [] for follow in follow_requests: f = follow.to_dict() follows.append(f)", "ts = [] for timeline in timelines: posts = timeline['posts'] for post in", "[] for timeline in timelines: posts = timeline['posts'] for post in posts: post['timestamp']", "in input_record: req = {} req['user_id'] = ir.user_id req['length'] = ir.length url =", "evaluate(self): followers = self._get_followers() followings = self._get_followings() timelines = self._get_timelines() generated_posts = self._get_generated_posts()", "len(follow_requests) > 0: follows = [] for follow in follow_requests: f = follow.to_dict()", "follows = [] for follow in follow_requests: f = follow.to_dict() follows.append(f) url =", "url = base_url + 'author-request/follows' response = requests.post(url, json=follows) # print(response.json()) # Client", "response.json() return generated_posts def add_data(self, followings, followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record)", "self._get_followings() timelines = self._get_timelines() generated_posts = self._get_generated_posts() return self.get_outputs(followers, followings, timelines, generated_posts) #", "if len(posts) > 0: ps = [] for post in posts: p =", "url = base_url + 'post-request/get_timelines' response = requests.post(url, json={}) followings = response.json() return", "timeline['posts'] = posts t = Timeline.from_dict(timeline) ts.append(t) return ts # Parses generated posts", "= ir.user_id req['length'] = ir.length url = base_url + 'post-request/generate_post' response = requests.post(url,", "len(posts) > 0: ps = [] for post in posts: p = post.to_dict()", "ts.append(t) return ts # Parses generated posts def _parse_generated_posts(self, generated_posts): ps = []", "def _get_followers(self): url = base_url + 'author-request/list_followers' response = requests.post(url, json={}) followers =", "url = base_url + 'author-request/list_followers' response = requests.post(url, json={}) followers = response.json() return", "+ 'post-request/get_generated_posts' response = requests.post(url, json={}) generated_posts = response.json() return generated_posts def add_data(self,", "# print(response.json()) # Client to add a generated post def _add_generated_post(self, input_record): for", "Parses generated posts def _parse_generated_posts(self, generated_posts): ps = [] for post in generated_posts:", "= requests.post(url, json=ps) # print(response.json()) # Client to add a generated post def", "Parses followers def _parse_followers(self, followers): fs = [] for follower in followers: fls", "from mblogger.record_types import * base_url = 'http://127.0.0.1:5000/' class App(): def evaluate(self): followers =", "[] for post in posts: p = post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p) url", "%H:%M:%S.%f') p = Post.from_dict(post) ps.append(p) return ps if __name__ == \"__main__\": app =", "add a generated post def _add_generated_post(self, input_record): for ir in input_record: req =", "in followings: fls = following['followings'] f = FollowingsRecord.from_dict(following) f.followings = fls fs.append(f) return", "base_url + 'post-request/create_posts' response = requests.post(url, json=ps) # print(response.json()) # Client to add", "followings def _get_followings(self): url = base_url + 'author-request/list_followings' response = requests.post(url, json={}) followings", "0: follows = [] for follow in follow_requests: f = follow.to_dict() follows.append(f) url", "= str(post.timestamp) ps.append(p) url = base_url + 'post-request/create_posts' response = requests.post(url, json=ps) #", "self._add_generated_post(input_record) # Client to add follows data def _add_follow_requests(self, follow_requests): if len(follow_requests) >", "post in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post) ps.append(p) return", "= FollowersRecord.from_dict(follower) f.followers = fls fs.append(f) return fs # Parses followings def _parse_followings(self,", "timelines, generated_posts) # Client to get list of followers def _get_followers(self): url =", "followers def _parse_followers(self, followers): fs = [] for follower in followers: fls =", "follow_requests: f = follow.to_dict() follows.append(f) url = base_url + 'author-request/follows' response = requests.post(url,", "= base_url + 'author-request/list_followings' response = requests.post(url, json={}) followings = response.json() return followings", "timelines: posts = timeline['posts'] for post in posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f')", "= base_url + 'post-request/get_generated_posts' response = requests.post(url, json={}) generated_posts = response.json() return generated_posts", "json={}) followings = response.json() return followings # Client to get list of generated", "* base_url = 'http://127.0.0.1:5000/' class App(): def evaluate(self): followers = self._get_followers() followings =", "f = FollowersRecord.from_dict(follower) f.followers = fls fs.append(f) return fs # Parses followings def", "get list of followers def _get_followers(self): url = base_url + 'author-request/list_followers' response =", "of followings def _get_timelines(self): url = base_url + 'post-request/get_timelines' response = requests.post(url, json={})", "_add_posts(self, posts): if len(posts) > 0: ps = [] for post in posts:", "return self.get_outputs(followers, followings, timelines, generated_posts) # Client to get list of followers def", "get list of generated posts def _get_generated_posts(self): url = base_url + 'post-request/get_generated_posts' response", "Client to add follows data def _add_posts(self, posts): if len(posts) > 0: ps", "# Client to get list of followers def _get_followers(self): url = base_url +", "# Parses timelines def _parse_timelines(self, timelines): ts = [] for timeline in timelines:", "# Client to add follows data def _add_posts(self, posts): if len(posts) > 0:", "response = requests.post(url, json=req) # Parsing data for main program def get_outputs(self, followers,", "requests from mblogger.record_types import * base_url = 'http://127.0.0.1:5000/' class App(): def evaluate(self): followers", "generated_posts): followers = self._parse_followers(followers) followings = self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts)", "FollowersRecord.from_dict(follower) f.followers = fls fs.append(f) return fs # Parses followings def _parse_followings(self, followings):", "fls fs.append(f) return fs # Parses timelines def _parse_timelines(self, timelines): ts = []", "generated_posts = self._parse_generated_posts(generated_posts) return followers, followings, timelines, generated_posts # Parses followers def _parse_followers(self,", "= self._get_generated_posts() return self.get_outputs(followers, followings, timelines, generated_posts) # Client to get list of", "'author-request/list_followings' response = requests.post(url, json={}) followings = response.json() return followings # Client to", "_add_generated_post(self, input_record): for ir in input_record: req = {} req['user_id'] = ir.user_id req['length']", "followings = response.json() return followings # Client to get list of generated posts", "base_url + 'post-request/get_generated_posts' response = requests.post(url, json={}) generated_posts = response.json() return generated_posts def", "for timeline in timelines: posts = timeline['posts'] for post in posts: post['timestamp'] =", "= fls fs.append(f) return fs # Parses followings def _parse_followings(self, followings): fs =", "to add follows data def _add_follow_requests(self, follow_requests): if len(follow_requests) > 0: follows =", "= [] for follower in followers: fls = follower['followers'] f = FollowersRecord.from_dict(follower) f.followers", "return followings # Client to get list of followings def _get_timelines(self): url =", "ps.append(p) url = base_url + 'post-request/create_posts' response = requests.post(url, json=ps) # print(response.json()) #", "= self._get_timelines() generated_posts = self._get_generated_posts() return self.get_outputs(followers, followings, timelines, generated_posts) # Client to", "followings = self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return followers, followings, timelines,", "= FollowingsRecord.from_dict(following) f.followings = fls fs.append(f) return fs # Parses timelines def _parse_timelines(self,", "'author-request/list_followers' response = requests.post(url, json={}) followers = response.json() return followers # Client to", "ir.user_id req['length'] = ir.length url = base_url + 'post-request/generate_post' response = requests.post(url, json=req)", "follows data def _add_follow_requests(self, follow_requests): if len(follow_requests) > 0: follows = [] for", "url = base_url + 'post-request/generate_post' response = requests.post(url, json=req) # Parsing data for", "= self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return followers, followings, timelines, generated_posts", "followers # Client to get list of followings def _get_followings(self): url = base_url", "json=follows) # print(response.json()) # Client to add follows data def _add_posts(self, posts): if", "followers): fs = [] for follower in followers: fls = follower['followers'] f =", "generated_posts): ps = [] for post in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f')", "requests.post(url, json={}) followers = response.json() return followers # Client to get list of", "follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client to add follows data def", "posts t = Timeline.from_dict(timeline) ts.append(t) return ts # Parses generated posts def _parse_generated_posts(self,", "return fs # Parses followings def _parse_followings(self, followings): fs = [] for following", "_parse_generated_posts(self, generated_posts): ps = [] for post in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d", "= fls fs.append(f) return fs # Parses timelines def _parse_timelines(self, timelines): ts =", "'http://127.0.0.1:5000/' class App(): def evaluate(self): followers = self._get_followers() followings = self._get_followings() timelines =", "for following in followings: fls = following['followings'] f = FollowingsRecord.from_dict(following) f.followings = fls", "list of followings def _get_timelines(self): url = base_url + 'post-request/get_timelines' response = requests.post(url,", "input_record): for ir in input_record: req = {} req['user_id'] = ir.user_id req['length'] =", "= response.json() return generated_posts def add_data(self, followings, followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts)", "str(post.timestamp) ps.append(p) url = base_url + 'post-request/create_posts' response = requests.post(url, json=ps) # print(response.json())", "response.json() return followers # Client to get list of followings def _get_followings(self): url", "timelines, generated_posts): followers = self._parse_followers(followers) followings = self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts =", "base_url = 'http://127.0.0.1:5000/' class App(): def evaluate(self): followers = self._get_followers() followings = self._get_followings()", "followers, followings, timelines, generated_posts): followers = self._parse_followers(followers) followings = self._parse_followings(followings) timelines = self._parse_timelines(timelines)", "self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client to add follows data def _add_follow_requests(self, follow_requests): if", "timelines, generated_posts # Parses followers def _parse_followers(self, followers): fs = [] for follower", "self._add_posts(posts) self._add_generated_post(input_record) # Client to add follows data def _add_follow_requests(self, follow_requests): if len(follow_requests)", "self._parse_generated_posts(generated_posts) return followers, followings, timelines, generated_posts # Parses followers def _parse_followers(self, followers): fs", "= response.json() return followings # Client to get list of followings def _get_timelines(self):", "for post in posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts t", "fs.append(f) return fs # Parses timelines def _parse_timelines(self, timelines): ts = [] for", "follow.to_dict() follows.append(f) url = base_url + 'author-request/follows' response = requests.post(url, json=follows) # print(response.json())", "in posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts t = Timeline.from_dict(timeline)", "datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts t = Timeline.from_dict(timeline) ts.append(t) return ts #", "= Timeline.from_dict(timeline) ts.append(t) return ts # Parses generated posts def _parse_generated_posts(self, generated_posts): ps", "= requests.post(url, json={}) followers = response.json() return followers # Client to get list", "ps = [] for post in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p", "= post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p) url = base_url + 'post-request/create_posts' response =", "ts # Parses generated posts def _parse_generated_posts(self, generated_posts): ps = [] for post", "= timeline['posts'] for post in posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] =", "fs # Parses followings def _parse_followings(self, followings): fs = [] for following in", "p['timestamp'] = str(post.timestamp) ps.append(p) url = base_url + 'post-request/create_posts' response = requests.post(url, json=ps)", "posts def _get_generated_posts(self): url = base_url + 'post-request/get_generated_posts' response = requests.post(url, json={}) generated_posts", "Client to add follows data def _add_follow_requests(self, follow_requests): if len(follow_requests) > 0: follows", "for follower in followers: fls = follower['followers'] f = FollowersRecord.from_dict(follower) f.followers = fls", "return followings # Client to get list of generated posts def _get_generated_posts(self): url", "Parses timelines def _parse_timelines(self, timelines): ts = [] for timeline in timelines: posts", "= [] for timeline in timelines: posts = timeline['posts'] for post in posts:", "self._get_generated_posts() return self.get_outputs(followers, followings, timelines, generated_posts) # Client to get list of followers", "json=ps) # print(response.json()) # Client to add a generated post def _add_generated_post(self, input_record):", "# Client to get list of followings def _get_followings(self): url = base_url +", "_parse_followers(self, followers): fs = [] for follower in followers: fls = follower['followers'] f", "= requests.post(url, json={}) generated_posts = response.json() return generated_posts def add_data(self, followings, followers, follow_requests,", "# Parsing data for main program def get_outputs(self, followers, followings, timelines, generated_posts): followers", "generated posts def _get_generated_posts(self): url = base_url + 'post-request/get_generated_posts' response = requests.post(url, json={})", "= [] for post in posts: p = post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p)", "response = requests.post(url, json={}) followers = response.json() return followers # Client to get", "response = requests.post(url, json={}) followings = response.json() return followings # Client to get", "add follows data def _add_posts(self, posts): if len(posts) > 0: ps = []", "followings, timelines, generated_posts) # Client to get list of followers def _get_followers(self): url", "def _parse_timelines(self, timelines): ts = [] for timeline in timelines: posts = timeline['posts']", "generated posts def _parse_generated_posts(self, generated_posts): ps = [] for post in generated_posts: post['timestamp']", "response = requests.post(url, json=follows) # print(response.json()) # Client to add follows data def", "p = Post.from_dict(post) ps.append(p) return ps if __name__ == \"__main__\": app = App()", "def _get_timelines(self): url = base_url + 'post-request/get_timelines' response = requests.post(url, json={}) followings =", "followers def _get_followers(self): url = base_url + 'author-request/list_followers' response = requests.post(url, json={}) followers", "0: ps = [] for post in posts: p = post.to_dict() p['timestamp'] =", "in followers: fls = follower['followers'] f = FollowersRecord.from_dict(follower) f.followers = fls fs.append(f) return", "fs # Parses timelines def _parse_timelines(self, timelines): ts = [] for timeline in", "list of generated posts def _get_generated_posts(self): url = base_url + 'post-request/get_generated_posts' response =", "posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts t = Timeline.from_dict(timeline) ts.append(t)", "get list of followings def _get_followings(self): url = base_url + 'author-request/list_followings' response =", "p = post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p) url = base_url + 'post-request/create_posts' response", "fs = [] for following in followings: fls = following['followings'] f = FollowingsRecord.from_dict(following)", "to get list of followings def _get_followings(self): url = base_url + 'author-request/list_followings' response", "json={}) followings = response.json() return followings # Client to get list of followings", "def _get_followings(self): url = base_url + 'author-request/list_followings' response = requests.post(url, json={}) followings =", "+ 'author-request/follows' response = requests.post(url, json=follows) # print(response.json()) # Client to add follows", "[] for follow in follow_requests: f = follow.to_dict() follows.append(f) url = base_url +", "base_url + 'author-request/list_followings' response = requests.post(url, json={}) followings = response.json() return followings #", "json=req) # Parsing data for main program def get_outputs(self, followers, followings, timelines, generated_posts):", "# print(response.json()) # Client to add follows data def _add_posts(self, posts): if len(posts)", "in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post) ps.append(p) return ps", "for post in posts: p = post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p) url =", "Client to get list of generated posts def _get_generated_posts(self): url = base_url +", "{} req['user_id'] = ir.user_id req['length'] = ir.length url = base_url + 'post-request/generate_post' response", "= self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return followers, followings, timelines, generated_posts # Parses followers", "followers = response.json() return followers # Client to get list of followings def", "data def _add_posts(self, posts): if len(posts) > 0: ps = [] for post", "followings # Client to get list of followings def _get_timelines(self): url = base_url", "main program def get_outputs(self, followers, followings, timelines, generated_posts): followers = self._parse_followers(followers) followings =", "Client to add a generated post def _add_generated_post(self, input_record): for ir in input_record:", "posts = timeline['posts'] for post in posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts']", "Client to get list of followers def _get_followers(self): url = base_url + 'author-request/list_followers'", "def get_outputs(self, followers, followings, timelines, generated_posts): followers = self._parse_followers(followers) followings = self._parse_followings(followings) timelines", "= self._get_followers() followings = self._get_followings() timelines = self._get_timelines() generated_posts = self._get_generated_posts() return self.get_outputs(followers,", "posts: p = post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p) url = base_url + 'post-request/create_posts'", "# Client to add a generated post def _add_generated_post(self, input_record): for ir in", "requests.post(url, json=follows) # print(response.json()) # Client to add follows data def _add_posts(self, posts):", "following['followings'] f = FollowingsRecord.from_dict(following) f.followings = fls fs.append(f) return fs # Parses timelines", "[] for post in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post)", "post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post) ps.append(p) return ps if __name__", "fs = [] for follower in followers: fls = follower['followers'] f = FollowersRecord.from_dict(follower)", "followings): fs = [] for following in followings: fls = following['followings'] f =", "followers, followings, timelines, generated_posts # Parses followers def _parse_followers(self, followers): fs = []", "followings = self._get_followings() timelines = self._get_timelines() generated_posts = self._get_generated_posts() return self.get_outputs(followers, followings, timelines,", "+ 'author-request/list_followings' response = requests.post(url, json={}) followings = response.json() return followings # Client", "followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client to add follows data", "for ir in input_record: req = {} req['user_id'] = ir.user_id req['length'] = ir.length", "%H:%M:%S.%f') timeline['posts'] = posts t = Timeline.from_dict(timeline) ts.append(t) return ts # Parses generated", "requests.post(url, json=req) # Parsing data for main program def get_outputs(self, followers, followings, timelines,", "def add_data(self, followings, followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client to", "def _add_follow_requests(self, follow_requests): if len(follow_requests) > 0: follows = [] for follow in", "ps = [] for post in posts: p = post.to_dict() p['timestamp'] = str(post.timestamp)", "= requests.post(url, json={}) followings = response.json() return followings # Client to get list", "= [] for follow in follow_requests: f = follow.to_dict() follows.append(f) url = base_url", "a generated post def _add_generated_post(self, input_record): for ir in input_record: req = {}", "'post-request/get_timelines' response = requests.post(url, json={}) followings = response.json() return followings # Client to", "t = Timeline.from_dict(timeline) ts.append(t) return ts # Parses generated posts def _parse_generated_posts(self, generated_posts):", "to get list of followers def _get_followers(self): url = base_url + 'author-request/list_followers' response", "f = FollowingsRecord.from_dict(following) f.followings = fls fs.append(f) return fs # Parses timelines def", "base_url + 'post-request/generate_post' response = requests.post(url, json=req) # Parsing data for main program", "requests.post(url, json={}) generated_posts = response.json() return generated_posts def add_data(self, followings, followers, follow_requests, posts,", "data def _add_follow_requests(self, follow_requests): if len(follow_requests) > 0: follows = [] for follow", "App(): def evaluate(self): followers = self._get_followers() followings = self._get_followings() timelines = self._get_timelines() generated_posts", "followings def _parse_followings(self, followings): fs = [] for following in followings: fls =", "to get list of followings def _get_timelines(self): url = base_url + 'post-request/get_timelines' response", "input_record: req = {} req['user_id'] = ir.user_id req['length'] = ir.length url = base_url", "self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return followers, followings, timelines, generated_posts # Parses followers def", "in posts: p = post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p) url = base_url +", "followers = self._parse_followers(followers) followings = self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return", "for follow in follow_requests: f = follow.to_dict() follows.append(f) url = base_url + 'author-request/follows'", "> 0: ps = [] for post in posts: p = post.to_dict() p['timestamp']", "[] for follower in followers: fls = follower['followers'] f = FollowersRecord.from_dict(follower) f.followers =", "= base_url + 'author-request/follows' response = requests.post(url, json=follows) # print(response.json()) # Client to", "def _add_posts(self, posts): if len(posts) > 0: ps = [] for post in", "following in followings: fls = following['followings'] f = FollowingsRecord.from_dict(following) f.followings = fls fs.append(f)", "post def _add_generated_post(self, input_record): for ir in input_record: req = {} req['user_id'] =", "timelines def _parse_timelines(self, timelines): ts = [] for timeline in timelines: posts =", "datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post) ps.append(p) return ps if __name__ == \"__main__\":", "+ 'post-request/create_posts' response = requests.post(url, json=ps) # print(response.json()) # Client to add a", "= base_url + 'author-request/list_followers' response = requests.post(url, json={}) followers = response.json() return followers", "= follower['followers'] f = FollowersRecord.from_dict(follower) f.followers = fls fs.append(f) return fs # Parses", "f.followings = fls fs.append(f) return fs # Parses timelines def _parse_timelines(self, timelines): ts", "# Client to get list of generated posts def _get_generated_posts(self): url = base_url", "self._get_timelines() generated_posts = self._get_generated_posts() return self.get_outputs(followers, followings, timelines, generated_posts) # Client to get", "followings, timelines, generated_posts): followers = self._parse_followers(followers) followings = self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts", "follower in followers: fls = follower['followers'] f = FollowersRecord.from_dict(follower) f.followers = fls fs.append(f)", "followers = self._get_followers() followings = self._get_followings() timelines = self._get_timelines() generated_posts = self._get_generated_posts() return", "generated post def _add_generated_post(self, input_record): for ir in input_record: req = {} req['user_id']", "req['user_id'] = ir.user_id req['length'] = ir.length url = base_url + 'post-request/generate_post' response =", "fls = following['followings'] f = FollowingsRecord.from_dict(following) f.followings = fls fs.append(f) return fs #", "f = follow.to_dict() follows.append(f) url = base_url + 'author-request/follows' response = requests.post(url, json=follows)", "return generated_posts def add_data(self, followings, followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) #", "requests.post(url, json=ps) # print(response.json()) # Client to add a generated post def _add_generated_post(self,", "Parses followings def _parse_followings(self, followings): fs = [] for following in followings: fls", "get list of followings def _get_timelines(self): url = base_url + 'post-request/get_timelines' response =", "+ 'author-request/list_followers' response = requests.post(url, json={}) followers = response.json() return followers # Client", "to get list of generated posts def _get_generated_posts(self): url = base_url + 'post-request/get_generated_posts'", "'%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts t = Timeline.from_dict(timeline) ts.append(t) return ts # Parses", "= datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post) ps.append(p) return ps if __name__ ==", "= 'http://127.0.0.1:5000/' class App(): def evaluate(self): followers = self._get_followers() followings = self._get_followings() timelines", "if len(follow_requests) > 0: follows = [] for follow in follow_requests: f =", "timelines = self._get_timelines() generated_posts = self._get_generated_posts() return self.get_outputs(followers, followings, timelines, generated_posts) # Client", "to add a generated post def _add_generated_post(self, input_record): for ir in input_record: req", "'post-request/create_posts' response = requests.post(url, json=ps) # print(response.json()) # Client to add a generated", "_get_followers(self): url = base_url + 'author-request/list_followers' response = requests.post(url, json={}) followers = response.json()", "requests.post(url, json={}) followings = response.json() return followings # Client to get list of", "posts def _parse_generated_posts(self, generated_posts): ps = [] for post in generated_posts: post['timestamp'] =", "response = requests.post(url, json=ps) # print(response.json()) # Client to add a generated post", "posts): if len(posts) > 0: ps = [] for post in posts: p", "Client to get list of followings def _get_followings(self): url = base_url + 'author-request/list_followings'", "'post-request/generate_post' response = requests.post(url, json=req) # Parsing data for main program def get_outputs(self,", "generated_posts) # Client to get list of followers def _get_followers(self): url = base_url", "# Parses followings def _parse_followings(self, followings): fs = [] for following in followings:", "'post-request/get_generated_posts' response = requests.post(url, json={}) generated_posts = response.json() return generated_posts def add_data(self, followings,", "add follows data def _add_follow_requests(self, follow_requests): if len(follow_requests) > 0: follows = []", "= self._parse_generated_posts(generated_posts) return followers, followings, timelines, generated_posts # Parses followers def _parse_followers(self, followers):", "list of followers def _get_followers(self): url = base_url + 'author-request/list_followers' response = requests.post(url,", "list of followings def _get_followings(self): url = base_url + 'author-request/list_followings' response = requests.post(url,", "followings, timelines, generated_posts # Parses followers def _parse_followers(self, followers): fs = [] for", "timelines = self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return followers, followings, timelines, generated_posts # Parses", "Timeline.from_dict(timeline) ts.append(t) return ts # Parses generated posts def _parse_generated_posts(self, generated_posts): ps =", "add_data(self, followings, followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client to add", "json={}) followers = response.json() return followers # Client to get list of followings", "= base_url + 'post-request/get_timelines' response = requests.post(url, json={}) followings = response.json() return followings", "program def get_outputs(self, followers, followings, timelines, generated_posts): followers = self._parse_followers(followers) followings = self._parse_followings(followings)", "Parsing data for main program def get_outputs(self, followers, followings, timelines, generated_posts): followers =", "post in posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts t =", "= {} req['user_id'] = ir.user_id req['length'] = ir.length url = base_url + 'post-request/generate_post'", "req = {} req['user_id'] = ir.user_id req['length'] = ir.length url = base_url +", "post.to_dict() p['timestamp'] = str(post.timestamp) ps.append(p) url = base_url + 'post-request/create_posts' response = requests.post(url,", "follow_requests): if len(follow_requests) > 0: follows = [] for follow in follow_requests: f", "= posts t = Timeline.from_dict(timeline) ts.append(t) return ts # Parses generated posts def", "self.get_outputs(followers, followings, timelines, generated_posts) # Client to get list of followers def _get_followers(self):", "followings, followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client to add follows", "print(response.json()) # Client to add a generated post def _add_generated_post(self, input_record): for ir", "followings def _get_timelines(self): url = base_url + 'post-request/get_timelines' response = requests.post(url, json={}) followings", "follows.append(f) url = base_url + 'author-request/follows' response = requests.post(url, json=follows) # print(response.json()) #", "for post in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post) ps.append(p)", "_get_timelines(self): url = base_url + 'post-request/get_timelines' response = requests.post(url, json={}) followings = response.json()", "fs.append(f) return fs # Parses followings def _parse_followings(self, followings): fs = [] for", "follows data def _add_posts(self, posts): if len(posts) > 0: ps = [] for", "return fs # Parses timelines def _parse_timelines(self, timelines): ts = [] for timeline", "FollowingsRecord.from_dict(following) f.followings = fls fs.append(f) return fs # Parses timelines def _parse_timelines(self, timelines):", "generated_posts = self._get_generated_posts() return self.get_outputs(followers, followings, timelines, generated_posts) # Client to get list", "= self._parse_followers(followers) followings = self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return followers,", "= following['followings'] f = FollowingsRecord.from_dict(following) f.followings = fls fs.append(f) return fs # Parses", "def _parse_generated_posts(self, generated_posts): ps = [] for post in generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'],", "for main program def get_outputs(self, followers, followings, timelines, generated_posts): followers = self._parse_followers(followers) followings", "timeline in timelines: posts = timeline['posts'] for post in posts: post['timestamp'] = datetime.strptime(post['timestamp'],", "def _parse_followers(self, followers): fs = [] for follower in followers: fls = follower['followers']", "to add follows data def _add_posts(self, posts): if len(posts) > 0: ps =", "= base_url + 'post-request/create_posts' response = requests.post(url, json=ps) # print(response.json()) # Client to", "generated_posts = response.json() return generated_posts def add_data(self, followings, followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests)", "= requests.post(url, json=follows) # print(response.json()) # Client to add follows data def _add_posts(self,", "generated_posts def add_data(self, followings, followers, follow_requests, posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client", "return followers # Client to get list of followings def _get_followings(self): url =", "return followers, followings, timelines, generated_posts # Parses followers def _parse_followers(self, followers): fs =", "post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts t = Timeline.from_dict(timeline) ts.append(t) return", "followings = response.json() return followings # Client to get list of followings def", "'author-request/follows' response = requests.post(url, json=follows) # print(response.json()) # Client to add follows data", "+ 'post-request/generate_post' response = requests.post(url, json=req) # Parsing data for main program def", "of followings def _get_followings(self): url = base_url + 'author-request/list_followings' response = requests.post(url, json={})", "def _parse_followings(self, followings): fs = [] for following in followings: fls = following['followings']", "mblogger.record_types import * base_url = 'http://127.0.0.1:5000/' class App(): def evaluate(self): followers = self._get_followers()", "= response.json() return followings # Client to get list of generated posts def", "response.json() return followings # Client to get list of generated posts def _get_generated_posts(self):", "ir.length url = base_url + 'post-request/generate_post' response = requests.post(url, json=req) # Parsing data", "follower['followers'] f = FollowersRecord.from_dict(follower) f.followers = fls fs.append(f) return fs # Parses followings", "= requests.post(url, json=req) # Parsing data for main program def get_outputs(self, followers, followings,", "followings: fls = following['followings'] f = FollowingsRecord.from_dict(following) f.followings = fls fs.append(f) return fs", "= ir.length url = base_url + 'post-request/generate_post' response = requests.post(url, json=req) # Parsing", "generated_posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') p = Post.from_dict(post) ps.append(p) return ps if", "of generated posts def _get_generated_posts(self): url = base_url + 'post-request/get_generated_posts' response = requests.post(url,", "posts, input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client to add follows data def _add_follow_requests(self,", "base_url + 'author-request/list_followers' response = requests.post(url, json={}) followers = response.json() return followers #", "= follow.to_dict() follows.append(f) url = base_url + 'author-request/follows' response = requests.post(url, json=follows) #", "follow in follow_requests: f = follow.to_dict() follows.append(f) url = base_url + 'author-request/follows' response", "def _get_generated_posts(self): url = base_url + 'post-request/get_generated_posts' response = requests.post(url, json={}) generated_posts =", "in follow_requests: f = follow.to_dict() follows.append(f) url = base_url + 'author-request/follows' response =", "def _add_generated_post(self, input_record): for ir in input_record: req = {} req['user_id'] = ir.user_id", "# Parses generated posts def _parse_generated_posts(self, generated_posts): ps = [] for post in", "[] for following in followings: fls = following['followings'] f = FollowingsRecord.from_dict(following) f.followings =", "base_url + 'author-request/follows' response = requests.post(url, json=follows) # print(response.json()) # Client to add", "base_url + 'post-request/get_timelines' response = requests.post(url, json={}) followings = response.json() return followings #", "req['length'] = ir.length url = base_url + 'post-request/generate_post' response = requests.post(url, json=req) #", "timeline['posts'] for post in posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts", "self._parse_followings(followings) timelines = self._parse_timelines(timelines) generated_posts = self._parse_generated_posts(generated_posts) return followers, followings, timelines, generated_posts #", "_get_followings(self): url = base_url + 'author-request/list_followings' response = requests.post(url, json={}) followings = response.json()", "followings # Client to get list of generated posts def _get_generated_posts(self): url =", "response.json() return followings # Client to get list of followings def _get_timelines(self): url", "_parse_followings(self, followings): fs = [] for following in followings: fls = following['followings'] f", "Client to get list of followings def _get_timelines(self): url = base_url + 'post-request/get_timelines'", "= base_url + 'post-request/generate_post' response = requests.post(url, json=req) # Parsing data for main", "url = base_url + 'author-request/list_followings' response = requests.post(url, json={}) followings = response.json() return", "f.followers = fls fs.append(f) return fs # Parses followings def _parse_followings(self, followings): fs", "fls fs.append(f) return fs # Parses followings def _parse_followings(self, followings): fs = []", "class App(): def evaluate(self): followers = self._get_followers() followings = self._get_followings() timelines = self._get_timelines()", "input_record): self._add_follow_requests(follow_requests) self._add_posts(posts) self._add_generated_post(input_record) # Client to add follows data def _add_follow_requests(self, follow_requests):", "followers: fls = follower['followers'] f = FollowersRecord.from_dict(follower) f.followers = fls fs.append(f) return fs", "= datetime.strptime(post['timestamp'], '%Y-%m-%d %H:%M:%S.%f') timeline['posts'] = posts t = Timeline.from_dict(timeline) ts.append(t) return ts", "of followers def _get_followers(self): url = base_url + 'author-request/list_followers' response = requests.post(url, json={})", "= self._get_followings() timelines = self._get_timelines() generated_posts = self._get_generated_posts() return self.get_outputs(followers, followings, timelines, generated_posts)", "return ts # Parses generated posts def _parse_generated_posts(self, generated_posts): ps = [] for", "_get_generated_posts(self): url = base_url + 'post-request/get_generated_posts' response = requests.post(url, json={}) generated_posts = response.json()", "# Client to get list of followings def _get_timelines(self): url = base_url +", "get_outputs(self, followers, followings, timelines, generated_posts): followers = self._parse_followers(followers) followings = self._parse_followings(followings) timelines =", "= [] for following in followings: fls = following['followings'] f = FollowingsRecord.from_dict(following) f.followings", "in timelines: posts = timeline['posts'] for post in posts: post['timestamp'] = datetime.strptime(post['timestamp'], '%Y-%m-%d", "print(response.json()) # Client to add follows data def _add_posts(self, posts): if len(posts) >", "import requests from mblogger.record_types import * base_url = 'http://127.0.0.1:5000/' class App(): def evaluate(self):", "fls = follower['followers'] f = FollowersRecord.from_dict(follower) f.followers = fls fs.append(f) return fs #", "ir in input_record: req = {} req['user_id'] = ir.user_id req['length'] = ir.length url", "url = base_url + 'post-request/get_generated_posts' response = requests.post(url, json={}) generated_posts = response.json() return" ]
[ "G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one", "\"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F", "column in self.df.columns: columns_list.append('温度取り_' + column) self.df.columns = columns_list def create_conversion_data(self): print(\"Outputing formatted", "if month < 10: month = \"0\" + str(month) if day < 10:", "= (curr_time - pre_time).days time_gap = (curr_time - pre_time).seconds if date_gap != 0", "& (self.df['運転モード9'] == 0) ] for month in range(1,13): for day in range(1,32):", "事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南", "\"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F", "{} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df = df # 入力ファイルパス self.filename = filepath", "range(1,32): one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day == day)] # 含まれている時間がある時だけ処理 if", "データの種類 self.data_kind = kind def edit_columns(self,column,start): rem_list = [] for i in range(column-2):", "# 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F 事務室中 PACG_') or (one == 'C5F 事務室南", "PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\",", "== 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1)", "\"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F", "# 新しく時間列として定義 df = df.set_index('時間') # 時間列をインデックスとして定義 start_time = df.index[0] # 開始時間 end_time", "# 終了時間 day_gap = (end_time - start_time).days # 開始~終了までの日数を取得 df = pd.concat( #", "事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南", "1: pre_time = df.index[0] curr_time = df.index[1] init_bems_list_time.append(0) else: curr_time = df.index[i] date_gap", "0 or time_gap != 60: init_bems_list_time.append(i) pre_time = curr_time df_re_index = df.reset_index() df_re_index", "PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\",", "x:x[11:16],df.index.astype(str))) df.index.name = '時間' return df print(output) for key,value in data.items(): if key", "# df_re_index.index = time_array print(df_re_index) return df_re_index df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')]", "\"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\",", "\"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\",", "self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 2) |", "# 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self):", "1: output_path_folder = self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] #", "< 10: day = \"0\" + str(day) folder_name = \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name)", "PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 #", "(self.df['運転モード4'] == 0) & (self.df['運転モード5'] == 0) & (self.df['運転モード6'] == 0) & (self.df['運転モード7']", "in data.items(): if key == \"init_bems\" and index == False: result = value", "PACG_') or (one == 'C5F 事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] ==", "edit_df.columns = edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'}) #", "3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8:", "== day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) result_data =", "\"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F", "== 0) & (self.df['運転モード6'] == 0) & (self.df['運転モード7'] == 0) & (self.df['運転モード8'] ==", "| (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8: #", "PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\",", "PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\",", "pre_time).days time_gap = (curr_time - pre_time).seconds if date_gap != 0 or time_gap !=", "== 0) & (self.df['運転モード7'] == 0) & (self.df['運転モード8'] == 0) & (self.df['運転モード9'] ==", "事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ", "\"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F", "kind def edit_columns(self,column,start): rem_list = [] for i in range(column-2): rem_list.append(i) for j", "one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data =", "(self.df['運転モード3'] == 0) & (self.df['運転モード4'] == 0) & (self.df['運転モード5'] == 0) & (self.df['運転モード6']", "print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time = [] for i in range(1,len(df)): if", "= time_array print(df_re_index) return df_re_index df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df", "return output def create_conversion_file(self,output,data,index): def conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間'", "8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] = 1 #", "PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\",", "G50_省エネレベル'] == 2) | (self.df['C館 5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード']", "PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\",", "# time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index = time_array print(df_re_index) return df_re_index", "range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day ==", "= columns_list def create_conversion_data(self): print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list = [] for", "pd import os class DataFile: # 1日分のデータが格納された辞書 data_files = {} def __init__(self,df,filepath,outpath,kind): #", "\"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\",", "def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df = df # 入力ファイルパス self.filename = filepath #", "開始時間 end_time = df.index[-1] # 終了時間 day_gap = (end_time - start_time).days # 開始~終了までの日数を取得", "PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\",", "self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data", "# 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')]", "not in column and 'B館 RF 外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館", ") print(df.index,start_time,end_time) self.df = df def select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出", "if self.df.index[0].month == 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード']", "if 'C5F 事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_' not", "2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F 事務室中 PACG_') or (one == 'C5F", "== month) & (self.df.index.day == day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder", "df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義 df = df.set_index('時間') # 時間列をインデックスとして定義 start_time = df.index[0]", "print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time = [] for", "10: month = \"0\" + str(month) if day < 10: day = \"0\"", "'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list", "PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\",", "& (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき", "in self.df.columns: columns_list.append('温度取り_' + column) self.df.columns = columns_list def create_conversion_data(self): print(\"Outputing formatted input", "\"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\",", "self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 2) | (self.df['C館 5F G50_省エネレベル'] == 3)", "'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def", "df # 入力ファイルパス self.filename = filepath # 出力先パス self.output_dir = outpath # データの種類", "\"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating a data output destination directory.........\")", "事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御", "PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df", "含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル", "PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\",", "# df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index = time_array print(df_re_index) return df_re_index df_bems_control_list =", "時間列をインデックスとして定義 start_time = df.index[0] # 開始時間 end_time = df.index[-1] # 終了時間 day_gap =", "= self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] #", "not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ", "== 0) & (self.df['運転モード1'] == 0) & (self.df['運転モード2'] == 0) & (self.df['運転モード3'] ==", "df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'control':df_bems_control, 'init_bems':df_bems_init,", "\"init_bems\" and index == False: result = value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1]", "事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中", "& (self.df['運転モード7'] == 0) & (self.df['運転モード8'] == 0) & (self.df['運転モード9'] == 0) ]", "事務室東南 PAC_' not in column and 'B館 RF 外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data)", "\"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F", "(self.df['運転モード2'] == 0) & (self.df['運転モード3'] == 0) & (self.df['運転モード4'] == 0) & (self.df['運転モード5']", "(end_time - start_time).days # 開始~終了までの日数を取得 df = pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ]", "def conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間' return df print(output) for", "- pre_time).seconds if date_gap != 0 or time_gap != 60: init_bems_list_time.append(i) pre_time =", "conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間' return df print(output) for key,value", "(self.df.index.day == day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) result_data", "\"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F", "= df.index[0] curr_time = df.index[1] init_bems_list_time.append(0) else: curr_time = df.index[i] date_gap = (curr_time", "# 1ファイルの内容 self.df = df # 入力ファイルパス self.filename = filepath # 出力先パス self.output_dir", "# 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df", "0) & (self.df['運転モード8'] == 0) & (self.df['運転モード9'] == 0) ] for month in", "df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0'] == 0)", "one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval =", "output destination directory.........\") print(\"-------------------------------------------------------\") if month < 10: month = \"0\" + str(month)", "信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義 df = df.set_index('時間') # 時間列をインデックスとして定義 start_time =", "self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0'] == 0) & (self.df['運転モード1'] == 0) &", "df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = {", "& (self.df.index.day == day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day)", "出力先パス self.output_dir = outpath # データの種類 self.data_kind = kind def edit_columns(self,column,start): rem_list =", "# 1日分のデータが格納された辞書 data_files = {} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df = df #", "\"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating a data", "if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) result_data = { 'measure':one_day_df } self.create_conversion_file(output_path_folder,one_day_df,True)", "for column,data in self.df.iteritems(): if 'C5F 事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif", "time_array = df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index = time_array", "(curr_time - pre_time).days time_gap = (curr_time - pre_time).seconds if date_gap != 0 or", "and 'B館 RF 外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F", "self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month", "事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南", "\"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\",", "事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南", "= list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output))", "def control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F", "運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] ==", "\"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\",", "'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')]", "\"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating a data output destination", "df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index = time_array print(df_re_index) return", "10: day = \"0\" + str(day) folder_name = \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try:", "= df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ", "start_time).days # 開始~終了までの日数を取得 df = pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time)", "PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\",", "事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南", "df = df.set_index('時間') # 時間列をインデックスとして定義 start_time = df.index[0] # 開始時間 end_time = df.index[-1]", "PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\",", "連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval }", "事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for one in", "air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_']", "運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード']", "self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0'] == 0) & (self.df['運転モード1'] ==", "= df.set_index('時間') # 時間列をインデックスとして定義 start_time = df.index[0] # 開始時間 end_time = df.index[-1] #", "= list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間' return df print(output) for key,value in data.items():", "print(\"-------------------------------------------------------\") if month < 10: month = \"0\" + str(month) if day <", "column and 'B館 RF 外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\",", "column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_' not in column and 'B館 RF", "PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\",", "# 抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ PACG_','C5F", "self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init =", "# 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval", "事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_' not in column", "self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\",", "[] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in range(1,13): for day in range(1,32):", "df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')]", "事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード']", "& ((self.df['C館 5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 #", "df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" # time_array = df_re_index['時間'] # time_array.append(\"EOF\")", "self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] =", "\"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\",", "day in range(1,32): one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day == day)] #", "df = pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df = df", "- start_time).days # 開始~終了までの日数を取得 df = pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] )", "len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init =", "== 0) & (self.df['運転モード8'] == 0) & (self.df['運転モード9'] == 0) ] for month", "= self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0'] == 0) & (self.df['運転モード1'] == 0)", "(self.df['運転モード0'] == 0) & (self.df['運転モード1'] == 0) & (self.df['運転モード2'] == 0) & (self.df['運転モード3']", "create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time = [] for i in range(1,len(df)): if i ==", "= conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df):", "month) & (self.df.index.day == day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder =", "df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True)", "PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\",", "\"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\",", "== 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F 事務室中 PACG_') or", "\"0\" + str(month) if day < 10: day = \"0\" + str(day) folder_name", "else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] ==", "day < 10: day = \"0\" + str(day) folder_name = \"{0}-{1}\".format(month,day) output =", "# 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data", "PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\",", "df_re_index.loc[-1] = \"EOF\" # time_array = df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1)", "\"EOF\" # time_array = df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index", "range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name):", "= [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0'] == 0) &", "output def create_conversion_file(self,output,data,index): def conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間' return", "PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\",", "list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing", "5F G50_省エネレベル'] == 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) &", "事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中", "self.df.columns = columns_list def create_conversion_data(self): print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list = []", "columns_list def create_conversion_data(self): print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list = [] for month", "含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) result_data = { 'measure':one_day_df }", "\"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\",", "df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')]", "= (end_time - start_time).days # 開始~終了までの日数を取得 df = pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True)", "事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ", "df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')]", "0) & (self.df['運転モード6'] == 0) & (self.df['運転モード7'] == 0) & (self.df['運転モード8'] == 0)", "data.items(): if key == \"init_bems\" and index == False: result = value result['時間']", "print(\"Creating a data output destination directory.........\") print(\"-------------------------------------------------------\") if month < 10: month =", "\"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F", "制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] #", "== 0) & (self.df['運転モード9'] == 0) ] for month in range(1,13): for day", "formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list = [] for month in range(1,13): for day", "formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init,", "result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w')", "output) except FileExistsError: pass print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index): def conversion_index(df): df.index =", "str(month) if day < 10: day = \"0\" + str(day) folder_name = \"{0}-{1}\".format(month,day)", "PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\",", "& (self.df['運転モード8'] == 0) & (self.df['運転モード9'] == 0) ] for month in range(1,13):", "= df # 入力ファイルパス self.filename = filepath # 出力先パス self.output_dir = outpath #", "= [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in range(1,13): for day in", "df_re_index.drop('時間',axis=1) # df_re_index.index = time_array print(df_re_index) return df_re_index df_bems_control_list = [] self.df =", "5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df =", "'B館 RF 外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ", "self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\",", "事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\",", "and index == False: result = value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] =", "df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval }", "'時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義 df = df.set_index('時間') # 時間列をインデックスとして定義", "評価用ファイル result_data = { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self):", "= \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder' + output) except FileExistsError:", "\"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\",", "\"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F", "\"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\",", "\"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F", "= value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result =", "= self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in range(1,13): for day in range(1,32): one_day_df =", "'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data()", "df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False)", "import pandas as pd import os class DataFile: # 1日分のデータが格納された辞書 data_files = {}", "# 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] #", "= pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df = df def", "# 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1)", "= [] for i in range(1,len(df)): if i == 1: pre_time = df.index[0]", "- pre_time).days time_gap = (curr_time - pre_time).seconds if date_gap != 0 or time_gap", "\"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\",", "data_files = {} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df = df # 入力ファイルパス self.filename", "else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self):", "output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder' + output) except FileExistsError: pass print(\"-------------------------------------------------------\")", "インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data in self.df.iteritems(): if 'C5F 事務室南 PACG_' in column:", "(one == 'C5F 事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] +=", "} self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list =", "= df.index[-1] # 終了時間 day_gap = (end_time - start_time).days # 開始~終了までの日数を取得 df =", "!= 0 or time_gap != 60: init_bems_list_time.append(i) pre_time = curr_time df_re_index = df.reset_index()", "elif 'C5F 事務室東南 PAC_' not in column and 'B館 RF 外気温度' not in", "\"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F", "in range(1,len(df)): if i == 1: pre_time = df.index[0] curr_time = df.index[1] init_bems_list_time.append(0)", "df.index.name = '時間' return df print(output) for key,value in data.items(): if key ==", "\"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\",", "\"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F", "PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] =", "[] for month in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month ==", "self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南", "事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南", "columns_list = [] for column in self.df.columns: columns_list.append('温度取り_' + column) self.df.columns = columns_list", "\"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" },", "result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def", "str(day) folder_name = \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder' + output)", "except FileExistsError: pass print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index): def conversion_index(df): df.index = list(map(lambda", "2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data in self.df.iteritems(): if 'C5F", "# 評価用ファイル result_data = { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def", "PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\",", "df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ", "== 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] = 1", "one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル']", "事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中", "事務室中 PACG_') or (one == 'C5F 事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード']", "\"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F", "& (self.df['運転モード6'] == 0) & (self.df['運転モード7'] == 0) & (self.df['運転モード8'] == 0) &", "1日分のデータが格納された辞書 data_files = {} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df = df # 入力ファイルパス", "i in range(column-2): rem_list.append(i) for j in range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns", "in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\",", "PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\",", "事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南", "= df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area = ['C5F", "folder_name = \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder' + output) except", "key == \"init_bems\" and index == False: result = value result['時間'] = list(map(lambda", "df.index[1] init_bems_list_time.append(0) else: curr_time = df.index[i] date_gap = (curr_time - pre_time).days time_gap =", "init_bems_list_time.append(i) pre_time = curr_time df_re_index = df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\"", "((self.df['C館 5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2)", "edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df =", "2) | (self.df['C館 5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3", "df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間' return df print(output) for key,value in", "\"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F", "= 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 2) | (self.df['C館", "& (self.df['運転モード4'] == 0) & (self.df['運転モード5'] == 0) & (self.df['運転モード6'] == 0) &", "print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index): def conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name =", "# インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data in self.df.iteritems(): if 'C5F 事務室南 PACG_' in", "0) & (self.df['運転モード5'] == 0) & (self.df['運転モード6'] == 0) & (self.df['運転モード7'] == 0)", "date_gap != 0 or time_gap != 60: init_bems_list_time.append(i) pre_time = curr_time df_re_index =", "= ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] #", "# 時間列をインデックスとして定義 start_time = df.index[0] # 開始時間 end_time = df.index[-1] # 終了時間 day_gap", "1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 1)", "== False: result = value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index)", "= one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) #", "remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list = [] for column in self.df.columns:", "\"0\" + str(day) folder_name = \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder'", "print(output) for key,value in data.items(): if key == \"init_bems\" and index == False:", "value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value)", "= df.index[i] date_gap = (curr_time - pre_time).days time_gap = (curr_time - pre_time).seconds if", "df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index = time_array print(df_re_index) return df_re_index df_bems_control_list = []", "PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\",", "}, inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\",", "PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for one", "\"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F", "= df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index = time_array print(df_re_index)", "self.convesion_airconditioning_data() for month in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month ==", "# time_array = df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index =", "formatted input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time = [] for i", "self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\",", "\"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\",", "\"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\",", "\"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder' + output) except FileExistsError: pass", "for key,value in data.items(): if key == \"init_bems\" and index == False: result", "((self.df['C館 5F G50_省エネレベル'] == 2) | (self.df['C館 5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード']", "事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0", "convesion_airconditioning_data(self): for column,data in self.df.iteritems(): if 'C5F 事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data)", "= \"0\" + str(month) if day < 10: day = \"0\" + str(day)", "__init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df = df # 入力ファイルパス self.filename = filepath # 出力先パス", "= edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間'])", "2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F 事務室中 PACG_') or (one", "> 1: output_path_folder = self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')]", "G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month", "== 'C5F 事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4", "outpath # データの種類 self.data_kind = kind def edit_columns(self,column,start): rem_list = [] for i", "df def select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] #", "事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True)", "運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 2) | (self.df['C館 5F G50_省エネレベル'] ==", "新しく時間列として定義 df = df.set_index('時間') # 時間列をインデックスとして定義 start_time = df.index[0] # 開始時間 end_time =", "PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\",", "| (self.df['C館 5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 #", "事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南", "self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2", "& (self.df['運転モード5'] == 0) & (self.df['運転モード6'] == 0) & (self.df['運転モード7'] == 0) &", "self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_' not in column and 'B館 RF 外気温度' not", "= self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name:", "rem_list.append(i) for j in range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df", "class MeasureDataFile(DataFile): def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list = [] for", "# 全てのカラムに含まれる接頭辞 for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) &", "in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_' not in column and 'B館", "G50_省エネレベル'] == 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館", "\"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\",", "FileExistsError: pass print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index): def conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str)))", "# 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = {", "PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def", "PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\",", "= 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館", "print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems =", "入力ファイルパス self.filename = filepath # 出力先パス self.output_dir = outpath # データの種類 self.data_kind =", "事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ", "df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" # time_array = df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index =", "PAC_風速\":\"風速9\" }, inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\",", "self.df[ (self.df['運転モード0'] == 0) & (self.df['運転モード1'] == 0) & (self.df['運転モード2'] == 0) &", "= filepath # 出力先パス self.output_dir = outpath # データの種類 self.data_kind = kind def", "df print(output) for key,value in data.items(): if key == \"init_bems\" and index ==", "= df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" # time_array = df_re_index['時間'] #", "pandas as pd import os class DataFile: # 1日分のデータが格納された辞書 data_files = {} def", "curr_time = df.index[i] date_gap = (curr_time - pre_time).days time_gap = (curr_time - pre_time).seconds", "self.df.index[0].month == 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] =", "df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義 df =", "in range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def", "df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称') def", "\"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ])", "formatted_no_operation_init_bems(df): init_bems_list_time = [] for i in range(1,len(df)): if i == 1: pre_time", "RF 外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\",", "事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南", "# データの種類 self.data_kind = kind def edit_columns(self,column,start): rem_list = [] for i in", "(curr_time - pre_time).seconds if date_gap != 0 or time_gap != 60: init_bems_list_time.append(i) pre_time", "1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F", "事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ", "\"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F", "\"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F", "\"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\",", "= \"EOF\" # time_array = df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1) #", "0) & (self.df['運転モード4'] == 0) & (self.df['運転モード5'] == 0) & (self.df['運転モード6'] == 0)", "\"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F", "事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ", "= 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] ==", "事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ", "range(1,len(df)): if i == 1: pre_time = df.index[0] curr_time = df.index[1] init_bems_list_time.append(0) else:", "create_conversion_data(self): df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in range(1,13): for", "'C5F 事務室東南 PAC_' not in column and 'B館 RF 外気温度' not in column:", "1ファイルの内容 self.df = df # 入力ファイルパス self.filename = filepath # 出力先パス self.output_dir =", "'C5F 事務室中 PACG_') or (one == 'C5F 事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) &", "= \"0\" + str(day) folder_name = \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created", "air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 2)", "& (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F 事務室中", "= df.index[0] # 開始時間 end_time = df.index[-1] # 終了時間 day_gap = (end_time -", "column,data in self.df.iteritems(): if 'C5F 事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F", "time_array.append(\"EOF\") # df_re_index = df_re_index.drop('時間',axis=1) # df_re_index.index = time_array print(df_re_index) return df_re_index df_bems_control_list", "= [] for month in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month", "\"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F", "PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\",", "data output destination directory.........\") print(\"-------------------------------------------------------\") if month < 10: month = \"0\" +", "(self.df['C館 5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3)", "print(\"-------------------------------------------------------\") df_bems_control_list = [] for month in range(1,13): for day in range(1,32): one_day_df", "\"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\",", "== 0) & (self.df['運転モード2'] == 0) & (self.df['運転モード3'] == 0) & (self.df['運転モード4'] ==", "self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in range(1,13): for day in range(1,32): one_day_df", "df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area =", "PAC_' not in column and 'B館 RF 外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={", "pass print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index): def conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name", "\"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F", "0) & (self.df['運転モード7'] == 0) & (self.df['運転モード8'] == 0) & (self.df['運転モード9'] == 0)", "\"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F", "\"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating a", "one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self):", "edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) #", "pd.to_datetime(df['時間']) # 新しく時間列として定義 df = df.set_index('時間') # 時間列をインデックスとして定義 start_time = df.index[0] # 開始時間", "range(column-2): rem_list.append(i) for j in range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values", "] ) print(df.index,start_time,end_time) self.df = df def select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] #", "time_array print(df_re_index) return df_re_index df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df =", "data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time = [] for i in range(1,len(df)):", "for month in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month == month)", "control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南", "for day in range(1,32): one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day == day)]", "self.filename = filepath # 出力先パス self.output_dir = outpath # データの種類 self.data_kind = kind", "= self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init", "result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input", "list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間' return df print(output) for key,value in data.items(): if", "'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self):", "= self.df[(self.df.index.month == month) & (self.df.index.day == day)] # 含まれている時間がある時だけ処理 if len(one_day_df) >", "for i in range(column-2): rem_list.append(i) for j in range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list])", "print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time = []", "for j in range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df =", "i == 1: pre_time = df.index[0] curr_time = df.index[1] init_bems_list_time.append(0) else: curr_time =", "df_re_index df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0'] ==", "= one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval", "\"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating a data output", "self.df = self.df[ (self.df['運転モード0'] == 0) & (self.df['運転モード1'] == 0) & (self.df['運転モード2'] ==", "df_bems_control_list = [] for month in range(1,13): for day in range(1,32): one_day_df =", "\"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F", "+ str(month) if day < 10: day = \"0\" + str(day) folder_name =", "8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] =", "j in range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0])", "0) & (self.df['運転モード3'] == 0) & (self.df['運転モード4'] == 0) & (self.df['運転モード5'] == 0)", "in column and 'B館 RF 外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF", "事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南", "in range(column-2): rem_list.append(i) for j in range(i+2,start-2): rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns =", "\"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\",", "def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義", "try: os.makedirs(output) print('Created folder' + output) except FileExistsError: pass print(\"-------------------------------------------------------\") return output def", "folder' + output) except FileExistsError: pass print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index): def conversion_index(df):", "in range(1,32): one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day == day)] # 含まれている時間がある時だけ処理", "\"運転モード9\", \"風速9\", \"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating a data output destination directory.........\") print(\"-------------------------------------------------------\")", "運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F 事務室中 PACG_') or (one == 'C5F 事務室南 PACG_'):", "!= 60: init_bems_list_time.append(i) pre_time = curr_time df_re_index = df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1]", "else: curr_time = df.index[i] date_gap = (curr_time - pre_time).days time_gap = (curr_time -", "事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df = self.df.reindex(columns=[", "def create_conversion_data(self): print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list = [] for month in", "date_gap = (curr_time - pre_time).days time_gap = (curr_time - pre_time).seconds if date_gap !=", "(one == 'C5F 事務室中 PACG_') or (one == 'C5F 事務室南 PACG_'): # 冬季のインペリ側", "conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義 df", "\"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\")", "PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\",", "conversion_columns_name(self): columns_list = [] for column in self.df.columns: columns_list.append('温度取り_' + column) self.df.columns =", "# 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) result_data = { 'measure':one_day_df", "os.makedirs(output) print('Created folder' + output) except FileExistsError: pass print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index):", "self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_' not in column and 'B館 RF 外気温度'", "\"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F", "\"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\",", "PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\",", "= self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list = [] for column in self.df.columns: columns_list.append('温度取り_' +", "# 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義 df = df.set_index('時間') # 時間列をインデックスとして定義 start_time", "df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F", "# 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 2) | (self.df['C館 5F G50_省エネレベル']", "df.index[0] # 開始時間 end_time = df.index[-1] # 終了時間 day_gap = (end_time - start_time).days", "== 0) & (self.df['運転モード3'] == 0) & (self.df['運転モード4'] == 0) & (self.df['運転モード5'] ==", "(self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F 事務室中 PACG_')", "開始~終了までの日数を取得 df = pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df =", "== 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one ==", "# 開始時間 end_time = df.index[-1] # 終了時間 day_gap = (end_time - start_time).days #", "one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する", "\"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df =", "class DataFile: # 1日分のデータが格納された辞書 data_files = {} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df =", "conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time", "print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list = [] for month in range(1,13): for", "(self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data in self.df.iteritems():", "\"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\",", "= df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称')", "a data output destination directory.........\") print(\"-------------------------------------------------------\") if month < 10: month = \"0\"", "\"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F", "if key == \"init_bems\" and index == False: result = value result['時間'] =", "\"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F", "\"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F", "\"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\",", "[] for column in self.df.columns: columns_list.append('温度取り_' + column) self.df.columns = columns_list def create_conversion_data(self):", "\"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ]) def", "df.index[i] date_gap = (curr_time - pre_time).days time_gap = (curr_time - pre_time).seconds if date_gap", "5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード'] = 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if", "== day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) df_bems_control =", "\"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F", "one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile):", "== 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F", "select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems", "# 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル']", "(self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1)", "= kind def edit_columns(self,column,start): rem_list = [] for i in range(column-2): rem_list.append(i) for", "# 開始~終了までの日数を取得 df = pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df", "self.df[(self.df.index.month == month) & (self.df.index.day == day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1:", "0) & (self.df['運転モード1'] == 0) & (self.df['運転モード2'] == 0) & (self.df['運転モード3'] == 0)", "'C5F 事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 #", "事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ", "最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df = df def select_input_data(self,floor): print(self.df.columns) df_C5F", "+ str(day) folder_name = \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder' +", "& ((self.df['C館 5F G50_省エネレベル'] == 2) | (self.df['C館 5F G50_省エネレベル'] == 3) |", "[] for i in range(column-2): rem_list.append(i) for j in range(i+2,start-2): rem_list.append(j) edit_df =", "事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南", "self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ", "edit_columns(self,column,start): rem_list = [] for i in range(column-2): rem_list.append(i) for j in range(i+2,start-2):", "= self.df[ (self.df['運転モード0'] == 0) & (self.df['運転モード1'] == 0) & (self.df['運転モード2'] == 0)", "def conversion_columns_name(self): columns_list = [] for column in self.df.columns: columns_list.append('温度取り_' + column) self.df.columns", "\"設定温度6\", \"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\",", "# 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df = df def select_input_data(self,floor): print(self.df.columns)", "in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] ==", "in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day", "評価用ファイル result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list =", "PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\"", "\"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\",", "(self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8: # 8月の場合", "self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0'] == 0) & (self.df['運転モード1'] == 0) & (self.df['運転モード2']", "self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list = [] for column in self.df.columns: columns_list.append('温度取り_'", "time_gap != 60: init_bems_list_time.append(i) pre_time = curr_time df_re_index = df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time]", "} self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month", "self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list = [] for column in self.df.columns: columns_list.append('温度取り_' + column)", "= one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class", "df_re_index = df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" # time_array = df_re_index['時間']", "\"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder' + output) except FileExistsError: pass print(\"-------------------------------------------------------\") return output", "df.index[-1] # 終了時間 day_gap = (end_time - start_time).days # 開始~終了までの日数を取得 df = pd.concat(", "外気温度' not in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F", "== 2) | (self.df['C館 5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] =", "MeasureDataFile(DataFile): def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list = [] for column", "# 評価用ファイル result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list", "filepath # 出力先パス self.output_dir = outpath # データの種類 self.data_kind = kind def edit_columns(self,column,start):", "= '時間' return df print(output) for key,value in data.items(): if key == \"init_bems\"", "print(df.index,start_time,end_time) self.df = df def select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems", "self.data_kind = kind def edit_columns(self,column,start): rem_list = [] for i in range(column-2): rem_list.append(i)", "+ output) except FileExistsError: pass print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index): def conversion_index(df): df.index", "= formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'no_operation':df_bems_control,", "in self.df.iteritems(): if 'C5F 事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南", "rem_list.append(j) edit_df = self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df", "self.output_dir = outpath # データの種類 self.data_kind = kind def edit_columns(self,column,start): rem_list = []", "PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\",", "month = \"0\" + str(month) if day < 10: day = \"0\" +", "= df.index[1] init_bems_list_time.append(0) else: curr_time = df.index[i] date_gap = (curr_time - pre_time).days time_gap", "for column in self.df.columns: columns_list.append('温度取り_' + column) self.df.columns = columns_list def create_conversion_data(self): print(\"Outputing", "PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\",", "import os class DataFile: # 1日分のデータが格納された辞書 data_files = {} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容", "全てのカラムに含まれる接頭辞 for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館", "as pd import os class DataFile: # 1日分のデータが格納された辞書 data_files = {} def __init__(self,df,filepath,outpath,kind):", "0) ] for month in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month", "self.df.iteritems(): if 'C5F 事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_'", "result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") def", "if day < 10: day = \"0\" + str(day) folder_name = \"{0}-{1}\".format(month,day) output", "curr_time df_re_index = df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" # time_array =", "= df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval", "== 'C5F 事務室中 PACG_') or (one == 'C5F 事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1)", "\"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F", "def create_conversion_data(self): df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in range(1,13):", "== 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month ==", "# 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称') def control_mode_edit(self):", "= [] for i in range(column-2): rem_list.append(i) for j in range(i+2,start-2): rem_list.append(j) edit_df", "3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) &", "事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0)", "{ 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list = [] self.df =", "3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if self.df.index[0].month == 8: # 8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F", "result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time =", "init_bems_list_time.append(0) else: curr_time = df.index[i] date_gap = (curr_time - pre_time).days time_gap = (curr_time", "\"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F", "初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data =", "# 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 1) & (self.df[one+'運転モード'] == 2)),one+'運転モード']", "PAC_'] # 全てのカラムに含まれる接頭辞 for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1)", "curr_time = df.index[1] init_bems_list_time.append(0) else: curr_time = df.index[i] date_gap = (curr_time - pre_time).days", "day_gap = (end_time - start_time).days # 開始~終了までの日数を取得 df = pd.concat( # 最終整形データの定義 [", "冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for", "\"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F", "PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\",", "df.index[0] curr_time = df.index[1] init_bems_list_time.append(0) else: curr_time = df.index[i] date_gap = (curr_time -", "if date_gap != 0 or time_gap != 60: init_bems_list_time.append(i) pre_time = curr_time df_re_index", "or time_gap != 60: init_bems_list_time.append(i) pre_time = curr_time df_re_index = df.reset_index() df_re_index =", "(self.df['運転モード7'] == 0) & (self.df['運転モード8'] == 0) & (self.df['運転モード9'] == 0) ] for", "5F G50_省エネレベル'] == 2) | (self.df['C館 5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] ==", "& (self.df[one+'運転モード'] == 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data in", "def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list = [] for column in", "= [] for column in self.df.columns: columns_list.append('温度取り_' + column) self.df.columns = columns_list def", "'C5F 事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_' not in", "(self.df['運転モード9'] == 0) ] for month in range(1,13): for day in range(1,32): one_day_df", "= outpath # データの種類 self.data_kind = kind def edit_columns(self,column,start): rem_list = [] for", "PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\",", "df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df = df_bems", "事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\",", "self.df = df def select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems =", "事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中", "False: result = value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else:", "事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ", "data...\") print(\"-------------------------------------------------------\") df_bems_control_list = [] for month in range(1,13): for day in range(1,32):", "= self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義 df = df.set_index('時間')", "= (curr_time - pre_time).seconds if date_gap != 0 or time_gap != 60: init_bems_list_time.append(i)", "= df def select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')]", "self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in", "column) self.df.columns = columns_list def create_conversion_data(self): print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list =", "事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ", "\"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\",", "\"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating", "事務室中 PACG_運転モード_2\":\"運転モード3\", \"C5F 事務室中 PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ", "return df print(output) for key,value in data.items(): if key == \"init_bems\" and index", "[ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df = df def select_input_data(self,floor): print(self.df.columns) df_C5F =", "[] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0'] == 0) & (self.df['運転モード1']", "事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南", "外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\",", "one_day_df = self.df[(self.df.index.month == month) & (self.df.index.day == day)] # 含まれている時間がある時だけ処理 if len(one_day_df)", "(self.df.index.day == day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) df_bems_control", "def edit_columns(self,column,start): rem_list = [] for i in range(column-2): rem_list.append(i) for j in", "input data...\") print(\"-------------------------------------------------------\") df_bems_control_list = [] for month in range(1,13): for day in", "output_path_folder = self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル", "== 2),one+'吸込温度'] += 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data in self.df.iteritems(): if", "PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\",", "# 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 1) &", "init_bems_list_time = [] for i in range(1,len(df)): if i == 1: pre_time =", "== \"init_bems\" and index == False: result = value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str)))", "事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for", "= edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換", "[] for i in range(1,len(df)): if i == 1: pre_time = df.index[0] curr_time", "事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\", \"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ", "PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\", \"C5F 事務室東南 PAC_運転モード\":\"運転モード9\",", "df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df = df def select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))]", "PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\",", "['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞", "事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中", "= pd.to_datetime(df['時間']) # 新しく時間列として定義 df = df.set_index('時間') # 時間列をインデックスとして定義 start_time = df.index[0] #", "& (self.df['運転モード1'] == 0) & (self.df['運転モード2'] == 0) & (self.df['運転モード3'] == 0) &", "df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init)", "print('Created folder' + output) except FileExistsError: pass print(\"-------------------------------------------------------\") return output def create_conversion_file(self,output,data,index): def", "\"C5F 事務室南ペリ PACG_吸込温度_2\":\"吸込温度5\", \"C5F 事務室南ペリ PACG_設定温度_2\":\"設定温度5\", \"C5F 事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F", "1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: # 8月以外のとき self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル']", "def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time = [] for i in range(1,len(df)): if i", "\"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\",", "# 制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval", "PACG_','C5F 事務室南ペリ PACG_','C5F 事務室南 PACG_','C5F 事務室東南 PAC_'] # 全てのカラムに含まれる接頭辞 for one in air_con_area:", "& (self.df['運転モード3'] == 0) & (self.df['運転モード4'] == 0) & (self.df['運転モード5'] == 0) &", "(self.df['運転モード8'] == 0) & (self.df['運転モード9'] == 0) ] for month in range(1,13): for", "result = value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result", "0) & (self.df['運転モード9'] == 0) ] for month in range(1,13): for day in", "if (one == 'C5F 事務室中 PACG_') or (one == 'C5F 事務室南 PACG_'): #", "PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\",", "day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')]", "pre_time = df.index[0] curr_time = df.index[1] init_bems_list_time.append(0) else: curr_time = df.index[i] date_gap =", "= {} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df = df # 入力ファイルパス self.filename =", "self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間'] = pd.to_datetime(df['時間']) # 新しく時間列として定義 df = df.set_index('時間') #", "事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中", "edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'}) # 信号名称カラムが時間なので名前を変換 df['時間']", "\"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F", "self.df = df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中 PACG_','C5F", "= one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def", "事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南", "def create_result_folder(self,month,day): print(\"Creating a data output destination directory.........\") print(\"-------------------------------------------------------\") if month < 10:", "month in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month == month) &", "x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted", "key,value in data.items(): if key == \"init_bems\" and index == False: result =", "pre_time = curr_time df_re_index = df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" #", "5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area", "= 2 # 運転状態が1で省エネレベルが1で運転モードが2のとき暖房(2) if (one == 'C5F 事務室中 PACG_') or (one ==", "result_data = { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list = []", "os class DataFile: # 1日分のデータが格納された辞書 data_files = {} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df", "def formatted_no_operation_init_bems(df): init_bems_list_time = [] for i in range(1,len(df)): if i == 1:", "for one in air_con_area: self.df.loc[self.df[one+'運転']==0,one+'運転モード'] = 0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F", "== 0) & (self.df['運転モード4'] == 0) & (self.df['運転モード5'] == 0) & (self.df['運転モード6'] ==", "+ column) self.df.columns = columns_list def create_conversion_data(self): print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list", "i in range(1,len(df)): if i == 1: pre_time = df.index[0] curr_time = df.index[1]", "事務室中 PACG_運転モード\":\"運転モード2\", \"C5F 事務室中 PACG_風速\":\"風速2\", \"C5F 事務室中 PACG_吸込温度_2\":\"吸込温度3\", \"C5F 事務室中 PACG_設定温度_2\":\"設定温度3\", \"C5F 事務室中", "= one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data", "= curr_time df_re_index = df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" # time_array", "\"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\", \"風速9\", \"外気温\", ]) def create_result_folder(self,month,day):", "self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all') def conversion_columns_name(self): columns_list = []", "= \"EOF\" result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w',index=index) else: result = conversion_index(value) result.to_csv(output+\"\\\\{}.csv\".format(key),encoding='shift-jis',mode='w') print(\"作成フォルダ:{}\\nBEMSデータ整形完了しました\".format(output)) print(\"Outputing formatted input data...\")", "\"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F", "df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" # time_array = df_re_index['時間'] # time_array.append(\"EOF\") #", "create_conversion_data(self): print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\") df_bems_control_list = [] for month in range(1,13):", "\"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F", "result_data = { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self): self.df", "= df_re_index.drop('時間',axis=1) # df_re_index.index = time_array print(df_re_index) return df_re_index df_bems_control_list = [] self.df", "def select_input_data(self,floor): print(self.df.columns) df_C5F = self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出", "= df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] = \"EOF\" # time_array = df_re_index['時間'] # time_array.append(\"EOF\") # df_re_index", "== 0) ] for month in range(1,13): for day in range(1,32): one_day_df =", "事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南", "day = \"0\" + str(day) folder_name = \"{0}-{1}\".format(month,day) output = \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output)", "\"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\",", "(self.df['運転モード6'] == 0) & (self.df['運転モード7'] == 0) & (self.df['運転モード8'] == 0) & (self.df['運転モード9']", "事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ PACG_風速_2\":\"風速1\", \"C5F 事務室中 PACG_吸込温度\":\"吸込温度2\", \"C5F 事務室中 PACG_設定温度\":\"設定温度2\", \"C5F 事務室中", "4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data in self.df.iteritems(): if 'C5F 事務室南 PACG_'", "# 入力ファイルパス self.filename = filepath # 出力先パス self.output_dir = outpath # データの種類 self.data_kind", "index == False: result = value result['時間'] = list(map(lambda x:x[11:16],result['時間'].astype(str))) result.iloc[-1] = \"EOF\"", "PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F 事務室東南 PAC_吸込温度\":\"吸込温度9\", \"C5F 事務室東南 PAC_設定温度\":\"設定温度9\",", "df.set_index('時間') # 時間列をインデックスとして定義 start_time = df.index[0] # 開始時間 end_time = df.index[-1] # 終了時間", "抽出した中でもさらに絞り込み self.df = df_bems self.conversion_time_column('信号名称') def control_mode_edit(self): air_con_area = ['C5F 事務室中ペリ PACG_','C5F 事務室中", "pd.concat( # 最終整形データの定義 [ df.loc[str(start_time.year)+\"-\"+str(start_time.month)+\"-\"+str(start_time.day):str(end_time.year)+\"-\"+str(end_time.month)+\"-\"+str(end_time.day)].between_time('0:00','23:59',include_end=True) ] ) print(df.index,start_time,end_time) self.df = df def select_input_data(self,floor):", "\"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ PACG_風速\":\"風速0\", \"C5F", "df_re_index.index = time_array print(df_re_index) return df_re_index df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data()", "] for month in range(1,13): for day in range(1,32): one_day_df = self.df[(self.df.index.month ==", "day)] # 含まれている時間がある時だけ処理 if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) result_data = {", "one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル", "return df_re_index df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[ (self.df['運転モード0']", "(self.df['運転モード5'] == 0) & (self.df['運転モード6'] == 0) & (self.df['運転モード7'] == 0) & (self.df['運転モード8']", "self.df.columns: columns_list.append('温度取り_' + column) self.df.columns = columns_list def create_conversion_data(self): print(\"Outputing formatted input data...\")", "self.df.drop(self.df.index[rem_list]) edit_df.columns = edit_df.iloc[0].values self.df = edit_df.drop(edit_df.index[0]) def conversion_time_column(self,time_name): df = self.df.rename(columns={time_name: '時間'})", "start_time = df.index[0] # 開始時間 end_time = df.index[-1] # 終了時間 day_gap = (end_time", "# 出力先パス self.output_dir = outpath # データの種類 self.data_kind = kind def edit_columns(self,column,start): rem_list", "制御ファイル df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval =", "print(df_re_index) return df_re_index df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() self.df = self.df[", "== 0) & (self.df['運転モード5'] == 0) & (self.df['運転モード6'] == 0) & (self.df['運転モード7'] ==", "終了時間 day_gap = (end_time - start_time).days # 開始~終了までの日数を取得 df = pd.concat( # 最終整形データの定義", "for i in range(1,len(df)): if i == 1: pre_time = df.index[0] curr_time =", "PAC_運転モード\":\"運転モード9\", \"C5F 事務室東南 PAC_風速\":\"風速9\" }, inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\",", "= \"{0}\\\\{1}\".format(self.output_dir,folder_name) try: os.makedirs(output) print('Created folder' + output) except FileExistsError: pass print(\"-------------------------------------------------------\") return", "destination directory.........\") print(\"-------------------------------------------------------\") if month < 10: month = \"0\" + str(month) if", "df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for month in range(1,13): for day", "= one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = formatted_no_operation_init_bems(df_bems_init) # 連続しない時間帯は初期値を先頭に設定する df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] #", "def convesion_airconditioning_data(self): for column,data in self.df.iteritems(): if 'C5F 事務室南 PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data)", "create_result_folder(self,month,day): print(\"Creating a data output destination directory.........\") print(\"-------------------------------------------------------\") if month < 10: month", "初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル result_data = { 'control':df_bems_control,", "column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.rename(columns={ \"B館 RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F", "directory.........\") print(\"-------------------------------------------------------\") if month < 10: month = \"0\" + str(month) if day", "rem_list = [] for i in range(column-2): rem_list.append(i) for j in range(i+2,start-2): rem_list.append(j)", "df_bems_init = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度|外気温')] # 初期ファイル df_bems_init = df_bems_init[df_bems_init.index.astype(str).str.contains(':00:00|:30:00')] df_bems_eval = one_day_df.loc[:,one_day_df.columns.str.contains('吸込温度')] # 評価用ファイル", "RF 外気温度\":\"外気温\", \"C5F 事務室中ペリ PACG_吸込温度\":\"吸込温度0\", \"C5F 事務室中ペリ PACG_設定温度\":\"設定温度0\", \"C5F 事務室中ペリ PACG_運転モード\":\"運転モード0\", \"C5F 事務室中ペリ", "= self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\",", "PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\",", "= { 'no_operation':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list = [] self.df", "time_gap = (curr_time - pre_time).seconds if date_gap != 0 or time_gap != 60:", "\"風速9\", \"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating a data output destination directory.........\") print(\"-------------------------------------------------------\") if", "\"運転モード6\", \"風速6\", \"吸込温度7\", \"設定温度7\", \"運転モード7\", \"風速7\", \"吸込温度8\", \"設定温度8\", \"運転モード8\", \"風速8\", \"吸込温度9\", \"設定温度9\", \"運転モード9\",", "60: init_bems_list_time.append(i) pre_time = curr_time df_re_index = df.reset_index() df_re_index = df_re_index.loc[init_bems_list_time] df_re_index.loc[-1] =", "\"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F 事務室南 PACG_設定温度_3\":\"設定温度8\", \"C5F 事務室南 PACG_運転モード_3\":\"運転モード8\", \"C5F 事務室南 PACG_風速_3\":\"風速8\", \"C5F", "month < 10: month = \"0\" + str(month) if day < 10: day", "(self.df['運転モード1'] == 0) & (self.df['運転モード2'] == 0) & (self.df['運転モード3'] == 0) & (self.df['運転モード4']", "'時間' return df print(output) for key,value in data.items(): if key == \"init_bems\" and", "5F G50_省エネレベル'] == 3) | (self.df[one+'運転モード'] == 3)),one+'運転モード'] = 3 # 運転状態が1で省エネレベルが2,3または運転モードが3なら送風(3) if", "事務室南ペリ PACG_運転モード_2\":\"運転モード5\", \"C5F 事務室南ペリ PACG_風速_2\":\"風速5\", \"C5F 事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南", "end_time = df.index[-1] # 終了時間 day_gap = (end_time - start_time).days # 開始~終了までの日数を取得 df", "\"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\",", "def create_conversion_file(self,output,data,index): def conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間' return df", "8月の場合 self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else:", "pre_time).seconds if date_gap != 0 or time_gap != 60: init_bems_list_time.append(i) pre_time = curr_time", "+= 4 # インペリ側で運転ONかつ暖房のときは+4℃アップ制御 def convesion_airconditioning_data(self): for column,data in self.df.iteritems(): if 'C5F 事務室南", "== 1: pre_time = df.index[0] curr_time = df.index[1] init_bems_list_time.append(0) else: curr_time = df.index[i]", "columns_list.append('温度取り_' + column) self.df.columns = columns_list def create_conversion_data(self): print(\"Outputing formatted input data...\") print(\"-------------------------------------------------------\")", "'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,False) def create_conversion_data(self): df_bems_control_list = [] self.df = self.df.loc[:,~self.df.columns.str.contains('ロスナイ|省エネ')] self.convesion_airconditioning_data() for", "DataFile: # 1日分のデータが格納された辞書 data_files = {} def __init__(self,df,filepath,outpath,kind): # 1ファイルの内容 self.df = df", "self.df = df # 入力ファイルパス self.filename = filepath # 出力先パス self.output_dir = outpath", "]) def create_result_folder(self,month,day): print(\"Creating a data output destination directory.........\") print(\"-------------------------------------------------------\") if month <", "\"運転モード1\", \"風速1\", \"吸込温度2\", \"設定温度2\", \"運転モード2\", \"風速2\", \"吸込温度3\", \"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\",", "self.df.loc[:,self.df.columns.str.contains('{}|信号名称|外気温'.format(floor))] # 5Fのカラムのみ抽出 df_bems = df_C5F.loc[:,df_C5F.columns.str.contains('吸込温度|設定温度|_運転|省エネレベル|運転モード|信号名称|風速|外気温')] # 5Fの中の特定のカラムのみ抽出 df_bems = df_bems.loc[:,df_bems.columns.str.contains('中|南|東|信号名称|省エネレベル|外気温')] # 抽出した中でもさらに絞り込み", "if len(one_day_df) > 1: output_path_folder = self.create_result_folder(month,day) df_bems_control = one_day_df.loc[:,one_day_df.columns.str.contains('設定温度|運転モード|風速')] # 制御ファイル df_bems_init", "< 10: month = \"0\" + str(month) if day < 10: day =", "PACG_風速_2\":\"風速3\", \"C5F 事務室南ペリ PACG_吸込温度\":\"吸込温度4\", \"C5F 事務室南ペリ PACG_設定温度\":\"設定温度4\", \"C5F 事務室南ペリ PACG_運転モード\":\"運転モード4\", \"C5F 事務室南ペリ PACG_風速\":\"風速4\",", "\"外気温\", ]) def create_result_folder(self,month,day): print(\"Creating a data output destination directory.........\") print(\"-------------------------------------------------------\") if month", "self.df.loc[(self.df[one+'運転']==1) & (self.df['C館 5F G50_省エネレベル'] == 1),one+'運転モード'] = 1 # 運転状態が1で省エネレベルが1の場合は冷房(1) else: #", "create_conversion_file(self,output,data,index): def conversion_index(df): df.index = list(map(lambda x:x[11:16],df.index.astype(str))) df.index.name = '時間' return df print(output)", "& (self.df['運転モード2'] == 0) & (self.df['運転モード3'] == 0) & (self.df['運転モード4'] == 0) &", "= { 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self): self.df =", "事務室中ペリ PACG_風速\":\"風速0\", \"C5F 事務室中ペリ PACG_吸込温度_2\":\"吸込温度1\", \"C5F 事務室中ペリ PACG_設定温度_2\":\"設定温度1\", \"C5F 事務室中ペリ PACG_運転モード_2\":\"運転モード1\", \"C5F 事務室中ペリ", "0) & (self.df['運転モード2'] == 0) & (self.df['運転モード3'] == 0) & (self.df['運転モード4'] == 0)", "inplace=True) self.df = self.df.reindex(columns=[ \"吸込温度0\", \"設定温度0\", \"運転モード0\", \"風速0\", \"吸込温度1\", \"設定温度1\", \"運転モード1\", \"風速1\", \"吸込温度2\",", "PACG_' in column: self.df.insert(self.df.columns.get_loc(column)+1,column+'_2',data) self.df.insert(self.df.columns.get_loc(column)+2,column+'_3',data) elif 'C5F 事務室東南 PAC_' not in column and", "\"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\", \"C5F 事務室南 PACG_運転モード_2\":\"運転モード7\", \"C5F 事務室南 PACG_風速_2\":\"風速7\", \"C5F 事務室南 PACG_吸込温度_3\":\"吸込温度8\", \"C5F", "{ 'control':df_bems_control, 'init_bems':df_bems_init, 'evaluation':df_bems_eval } self.create_conversion_file(output_path_folder,result_data,True) class MeasureDataFile(DataFile): def remove_null_data(self): self.df = self.df.drop('@date()',axis=1).dropna(how='all')", "\"設定温度3\", \"運転モード3\", \"風速3\", \"吸込温度4\", \"設定温度4\", \"運転モード4\", \"風速4\", \"吸込温度5\", \"設定温度5\", \"運転モード5\", \"風速5\", \"吸込温度6\", \"設定温度6\",", "事務室南 PACG_吸込温度\":\"吸込温度6\", \"C5F 事務室南 PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南", "or (one == 'C5F 事務室南 PACG_'): # 冬季のインペリ側 self.df.loc[(self.df[one+'運転']==1) & (self.df[one+'運転モード'] == 2),one+'吸込温度']", "PACG_設定温度\":\"設定温度6\", \"C5F 事務室南 PACG_運転モード\":\"運転モード6\", \"C5F 事務室南 PACG_風速\":\"風速6\", \"C5F 事務室南 PACG_吸込温度_2\":\"吸込温度7\", \"C5F 事務室南 PACG_設定温度_2\":\"設定温度7\",", "0 # 運連状態が0なら電源OFF(0) self.df.loc[(self.df[one+'運転']==1) & ((self.df['C館 5F G50_省エネレベル'] == 2) | (self.df['C館 5F", "input data...\") print(\"-------------------------------------------------------\") def create_no_operation_conversion_data(self): def formatted_no_operation_init_bems(df): init_bems_list_time = [] for i in", "if i == 1: pre_time = df.index[0] curr_time = df.index[1] init_bems_list_time.append(0) else: curr_time" ]
[ "containing optimizer state and encoder/decoder weights. Parameters ---------- path : str Path to", "decode(embedding) Generate matrices from embeddings. save_weights(enc_path, dec_path) Save encoder/decoder weights. load_weights(enc_path, dec_path) Load", "way. Only necessary for bad # hyperparam config such as optimizer learning rate", "/ len(data) logs['global_step'] = (epoch - 1) * len(train_loader) + batch_idx for callback", "self.encoder = ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid hparams type:", "for bad # hyperparam config such as optimizer learning rate # being large.", "optimizer : torch.optim.Optimizer Pytorch optimizer used to train model. loss_func : function Loss", "use with ResnetVAE use (num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines the model architecture", "{:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\" Loads checkpoint file containing optimizer state and encoder/decoder", "callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data = data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar = self.model(data)", "of epochs to train for checkpoint : str Path to checkpoint file to", "saved. callbacks : list Contains molecules.utils.callback.Callback objects which are called during training. \"\"\"", "\"\"\" Train model Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data valid_loader :", "implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type and", "in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader, callbacks, logs) for callback", "space. decode(embedding) Generate matrices from embeddings. save_weights(enc_path, dec_path) Save encoder/decoder weights. load_weights(enc_path, dec_path)", "member variable # RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams)", "function, optional Defines an optional loss function with inputs (recon_x, x, mu, logvar)", "True prints training and validation loss to stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape =", "Defines an optional loss function with inputs (recon_x, x, mu, logvar) and ouput", ": str Path to checkpoint file Returns ------- Epoch of training corresponding to", "latent space. decode(embedding) Generate matrices from embeddings. save_weights(enc_path, dec_path) Save encoder/decoder weights. load_weights(enc_path,", "add more to logs for callback in callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data =", "= self.decoder(x) # TODO: see if we can remove this to speed things", "epoch, logs) data = data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar = self.model(data) loss =", "print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader, callbacks, logs): \"\"\"", "x, mu, logvar def encode(self, x): # mu layer return self.encoder.encode(x) def decode(self,", "VAE models. Takes arbitrary encoder/decoder models specified by the choice of hyperparameters. Assumes", "# being large. #x = torch.where(torch.isnan(x), torch.zeros_like(x), x) return x, mu, logvar def", "self.device = torch.device('cuda' if cuda and torch.cuda.is_available() else 'cpu') self.model = VAEModel(input_shape, hparams).to(self.device)", "from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__ = ['VAE'] class VAEModel(nn.Module): def __init__(self, input_shape,", "a batch of data with dimension (batch-size, input_shape) Returns ------- torch.Tensor of embeddings", "Underlying Pytorch model with encoder/decoder attributes. optimizer : torch.optim.Optimizer Pytorch optimizer used to", "self.decoder = ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid hparams type: {type(hparams)}.') def reparameterize(self, mu,", "are called during training. logs : dict Filled with data for callbacks \"\"\"", "data for callbacks \"\"\" self.model.train() train_loss = 0. for batch_idx, data in enumerate(train_loader):", "data. Note: For use with SymmetricVAE use (1, num_residues, num_residues) For use with", "logs['global_step'] = epoch if self.verbose: print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss)) def", "will allow the ability to set the train/test # data to cuda as", "Loss function used to train model. Methods ------- train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[])", "of training callbacks : list Contains molecules.utils.callback.Callback objects which are called during training.", "= get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss if loss is None else loss def", "path : str Path to checkpoint file Returns ------- Epoch of training corresponding", "\"\"\" return self.model.encode(x) def decode(self, embedding): \"\"\" Generate matrices from embeddings. Parameters ----------", "logvar) and ouput torch loss. cuda : bool True specifies to use cuda", "data into the latent space. Parameters ---------- x : torch.Tensor Data to encode,", "from .symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__ = ['VAE'] class", ": list Contains molecules.utils.callback.Callback objects which are called during training. logs : dict", "data. model : torch.nn.Module (VAEModel) Underlying Pytorch model with encoder/decoder attributes. optimizer :", "in callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data = data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar =", "data, mu, logvar).item() valid_loss /= len(valid_loader.dataset) if callbacks: logs['valid_loss'] = valid_loss if self.verbose:", "called during training. logs : dict Filled with data for callbacks \"\"\" self.model.train()", "Contains molecules.utils.callback.Callback objects which are called during training. \"\"\" if callbacks: logs =", "input_shape : tuple shape of incomming data. model : torch.nn.Module (VAEModel) Underlying Pytorch", "input_shape self.verbose = verbose # TODO: consider passing in device (this will allow", "encoder/decoder models specified by the choice of hyperparameters. Assumes the shape of the", "import torch from torch import nn from torch.nn import functional as F from", "train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]) Train model encode(x) Embed data into the latent", "torch.nn.Module (VAEModel) Underlying Pytorch model with encoder/decoder attributes. optimizer : torch.optim.Optimizer Pytorch optimizer", "x = self.decoder(x) # TODO: see if we can remove this to speed", "Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x, x, reduction='sum')", "during training. \"\"\" if callbacks: logs = {'model': self.model, 'optimizer': self.optimizer} else: logs", "data.to(self.device) recon_batch, mu, logvar = self.model(data) valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss", "optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\" Parameters ---------- input_shape : tuple shape of incomming", "the ability to set the train/test # data to cuda as well, since", "epochs=1, checkpoint='', callbacks=[]): \"\"\" Train model Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training", "reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std", "Attributes ---------- input_shape : tuple shape of incomming data. model : torch.nn.Module (VAEModel)", "optimizer learning rate # being large. #x = torch.where(torch.isnan(x), torch.zeros_like(x), x) return x,", "def train(self, train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\" Train model Parameters ---------- train_loader", "self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss /= len(valid_loader.dataset) if callbacks: logs['valid_loss'] = valid_loss if", "def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu +", "def decode(self, embedding): return self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self,", "loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\" Loads checkpoint file containing optimizer state and", "to load and resume training from the epoch when the checkpoint was saved.", "of training corresponding to the saved checkpoint. \"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict'])", "hyperparameters. Currently implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer", "for callback in callbacks: callback.on_train_end(logs) def _train(self, train_loader, epoch, callbacks, logs): \"\"\" Train", "self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\" Loads checkpoint file containing", "training, testing and saving VAE models. Takes arbitrary encoder/decoder models specified by the", ": tuple shape of incomming data. model : torch.nn.Module (VAEModel) Underlying Pytorch model", "int Number of epochs to train for checkpoint : str Path to checkpoint", ": molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type and corresponding hyperparameters. loss: : function, optional", "checkpoint file to load and resume training from the epoch when the checkpoint", "* sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1", "as optimizer learning rate # being large. #x = torch.where(torch.isnan(x), torch.zeros_like(x), x) return", "isinstance(hparams, ResnetVAEHyperparams): from .resnet import ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams) self.decoder =", "hyperparameters. Assumes the shape of the data is square. Attributes ---------- input_shape :", "with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss if", "str Path to checkpoint file to load and resume training from the epoch", "for callback in callbacks: callback.on_epoch_end(epoch, logs) for callback in callbacks: callback.on_train_end(logs) def _train(self,", "the decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path, dec_path): \"\"\" Load saved", "TODO: consider making optimizer_hparams a member variable # RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08,", "+ logvar - mu.pow(2) - logvar.exp()) return BCE + KLD # TODO: set", "for data in valid_loader: data = data.to(self.device) recon_batch, mu, logvar = self.model(data) valid_loss", "encoder weights. dec_path : str Path to save the decoder weights. \"\"\" self.model.save_weights(enc_path,", "\"\"\" if callbacks: logs = {'model': self.model, 'optimizer': self.optimizer} else: logs = {}", "------- torch.Tensor of embeddings of shape (batch-size, latent_dim) \"\"\" return self.model.encode(x) def decode(self,", "load and resume training from the epoch when the checkpoint was saved. callbacks", "torch.nn import functional as F from .resnet import ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams", "hparams) self.decoder = ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid hparams type: {type(hparams)}.') def reparameterize(self,", "SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from .resnet import", "save_weights(enc_path, dec_path) Save encoder/decoder weights. load_weights(enc_path, dec_path) Load saved encoder/decoder weights. \"\"\" def", "SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams,", "type of the hparams if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder", "Defines the model architecture hyperparameters. Currently implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams :", "str Path to save the decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path,", "Loads checkpoint file containing optimizer state and encoder/decoder weights. Parameters ---------- path :", "optimizer type and corresponding hyperparameters. loss: : function, optional Defines an optional loss", "ResnetVAEHyperparams): from .resnet import ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape,", "# RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc =", "= train_loss logs['global_step'] = epoch if self.verbose: print('====> Epoch: {} Average loss: {:.4f}'.format(epoch,", "with inputs (recon_x, x, mu, logvar) and ouput torch loss. cuda : bool", "callbacks \"\"\" self.model.train() train_loss = 0. for batch_idx, data in enumerate(train_loader): if callbacks:", "square. Attributes ---------- input_shape : tuple shape of incomming data. model : torch.nn.Module", "loss. cuda : bool True specifies to use cuda if it is available.", "checkpoint : str Path to checkpoint file to load and resume training from", "else loss def __repr__(self): return str(self.model) def train(self, train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]):", "def encode(self, x): \"\"\" Embed data into the latent space. Parameters ---------- x", "sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 +", "initialization hparams class VAE: \"\"\" Provides high level interface for training, testing and", "ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type and corresponding hyperparameters. loss: :", "= self.model(data) loss = self.loss_fnc(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step()", "bad # hyperparam config such as optimizer learning rate # being large. #x", "torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE + KLD # TODO:", "# TODO: consider making optimizer_hparams a member variable # RMSprop with lr=0.001, alpha=0.9,", "if loss is None else loss def __repr__(self): return str(self.model) def train(self, train_loader,", "x): mu, logvar = self.encoder(x) x = self.reparameterize(mu, logvar) x = self.decoder(x) #", "logs) self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader, callbacks, logs) for callback in callbacks: callback.on_epoch_end(epoch,", "checkpoint was saved. callbacks : list Contains molecules.utils.callback.Callback objects which are called during", "well, since device will be a variable in the user space) self.device =", "Parameters ---------- input_shape : tuple shape of incomming data. Note: For use with", "train(self, train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\" Train model Parameters ---------- train_loader :", "batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() / len(data))) train_loss", "valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data callbacks : list Contains molecules.utils.callback.Callback objects which", "logvar def encode(self, x): # mu layer return self.encoder.encode(x) def decode(self, embedding): return", "= torch.randn_like(std) return mu + eps*std def forward(self, x): mu, logvar = self.encoder(x)", "1 if checkpoint: start_epoch += self._load_checkpoint(checkpoint) for callback in callbacks: callback.on_train_begin(logs) for epoch", "ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5 * sum(1", "inputs (recon_x, x, mu, logvar) and ouput torch loss. cuda : bool True", "data valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data epochs : int Number of epochs", "molecules.utils.callback.Callback objects which are called during training. logs : dict Filled with data", "decode(self, embedding): \"\"\" Generate matrices from embeddings. Parameters ---------- embedding : torch.Tensor Embedding", "= SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from .resnet import ResnetEncoder, ResnetDecoder self.encoder", "to encode, could be a batch of data with dimension (batch-size, input_shape) Returns", "and validation loss to stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape self.verbose =", "epochs + 1): for callback in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks, logs)", "callback.on_batch_begin(batch_idx, epoch, logs) data = data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar = self.model(data) loss", "def _load_checkpoint(self, path): \"\"\" Loads checkpoint file containing optimizer state and encoder/decoder weights.", "def forward(self, x): mu, logvar = self.encoder(x) x = self.reparameterize(mu, logvar) x =", "OptimizerHyperparams, get_optimizer __all__ = ['VAE'] class VAEModel(nn.Module): def __init__(self, input_shape, hparams): super(VAEModel, self).__init__()", "logs = {'model': self.model, 'optimizer': self.optimizer} else: logs = {} start_epoch = 1", "train_loss = 0. for batch_idx, data in enumerate(train_loader): if callbacks: pass # TODO:", "raise TypeError(f'Invalid hparams type: {type(hparams)}.') def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps", "\"\"\" Generate matrices from embeddings. Parameters ---------- embedding : torch.Tensor Embedding data, could", "torch.Tensor Embedding data, could be a batch of data with dimension (batch-size, latent_dim)", "+ batch_idx for callback in callbacks: callback.on_batch_end(batch_idx, epoch, logs) if self.verbose: print('Train Epoch:", "enumerate(train_loader): if callbacks: pass # TODO: add more to logs for callback in", "function with inputs (recon_x, x, mu, logvar) and ouput torch loss. cuda :", "def __init__(self, input_shape, hparams): super(VAEModel, self).__init__() # Select encoder/decoder models by the type", "callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader, callbacks, logs) for callback in callbacks:", "{} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx /", "training callbacks : list Contains molecules.utils.callback.Callback objects which are called during training. logs", "<reponame>hengma1001/molecules<filename>molecules/ml/unsupervised/vae/vae.py import torch from torch import nn from torch.nn import functional as F", "(VAEModel) Underlying Pytorch model with encoder/decoder attributes. optimizer : torch.optim.Optimizer Pytorch optimizer used", "True specifies to use cuda if it is available. False uses cpu. verbose", "None else loss def __repr__(self): return str(self.model) def train(self, train_loader, valid_loader, epochs=1, checkpoint='',", "print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. *", "SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__ = ['VAE'] class VAEModel(nn.Module): def __init__(self,", "def vae_loss(recon_x, x, mu, logvar): \"\"\" Effects ------- Reconstruction + KL divergence losses", "train_loss)) def _validate(self, valid_loader, callbacks, logs): \"\"\" Test model on validation set. Parameters", "+= self._load_checkpoint(checkpoint) for callback in callbacks: callback.on_train_begin(logs) for epoch in range(start_epoch, epochs +", "model architecture hyperparameters. Currently implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines", "logs : dict Filled with data for callbacks \"\"\" self.model.train() train_loss = 0.", ".symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__ = ['VAE'] class VAEModel(nn.Module):", "['VAE'] class VAEModel(nn.Module): def __init__(self, input_shape, hparams): super(VAEModel, self).__init__() # Select encoder/decoder models", "- 1) * len(train_loader) + batch_idx for callback in callbacks: callback.on_batch_end(batch_idx, epoch, logs)", "Average loss: {:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader, callbacks, logs): \"\"\" Test model on", "= self.model(data) valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss /= len(valid_loader.dataset) if callbacks:", "hparams) else: raise TypeError(f'Invalid hparams type: {type(hparams)}.') def reparameterize(self, mu, logvar): std =", "an inplace way. Only necessary for bad # hyperparam config such as optimizer", "from torch import nn from torch.nn import functional as F from .resnet import", "objects which are called during training. logs : dict Filled with data for", "logs['valid_loss'] = valid_loss if self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\"", "for 1 epoch Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data epoch :", "(batch-size, input_shape) \"\"\" return self.model.decode(embedding) def save_weights(self, enc_path, dec_path): \"\"\" Save encoder/decoder weights.", "Parameters ---------- enc_path : str Path to save the encoder weights. dec_path :", "+ KL divergence losses summed over all elements and batch See Appendix B", "dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu,", "enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu, logvar): \"\"\" Effects ------- Reconstruction", ": torch.nn.Module (VAEModel) Underlying Pytorch model with encoder/decoder attributes. optimizer : torch.optim.Optimizer Pytorch", "and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type and corresponding hyperparameters. loss:", "= data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar = self.model(data) loss = self.loss_fnc(recon_batch, data, mu,", "len(train_loader), loss.item() / len(data))) train_loss /= len(train_loader.dataset) if callbacks: logs['train_loss'] = train_loss logs['global_step']", "the encoder weights. dec_path : str Path to save the decoder weights. \"\"\"", "------- torch.Tensor of generated matrices of shape (batch-size, input_shape) \"\"\" return self.model.decode(embedding) def", "models by the type of the hparams if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import", "1): for callback in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader, callbacks,", "self.model.encode(x) def decode(self, embedding): \"\"\" Generate matrices from embeddings. Parameters ---------- embedding :", ": torch.Tensor Data to encode, could be a batch of data with dimension", "are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type and corresponding", "2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5 * sum(1 +", "* batch_idx / len(train_loader), loss.item() / len(data))) train_loss /= len(train_loader.dataset) if callbacks: logs['train_loss']", "---------- input_shape : tuple shape of incomming data. Note: For use with SymmetricVAE", "hyperparam config such as optimizer learning rate # being large. #x = torch.where(torch.isnan(x),", "Load saved encoder/decoder weights. \"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True):", "self.model.train() train_loss = 0. for batch_idx, data in enumerate(train_loader): if callbacks: pass #", "dict Filled with data for callbacks \"\"\" self.model.eval() valid_loss = 0 with torch.no_grad():", "be a batch of data with dimension (batch-size, latent_dim) Returns ------- torch.Tensor of", "callbacks: logs = {'model': self.model, 'optimizer': self.optimizer} else: logs = {} start_epoch =", "---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data epoch : int Current epoch of", "torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self, x): \"\"\" Embed data into", "callback.on_epoch_end(epoch, logs) for callback in callbacks: callback.on_train_end(logs) def _train(self, train_loader, epoch, callbacks, logs):", "Filled with data for callbacks \"\"\" self.model.train() train_loss = 0. for batch_idx, data", "to cuda as well, since device will be a variable in the user", "self._load_checkpoint(checkpoint) for callback in callbacks: callback.on_train_begin(logs) for epoch in range(start_epoch, epochs + 1):", "shape (batch-size, latent_dim) \"\"\" return self.model.encode(x) def decode(self, embedding): \"\"\" Generate matrices from", "Defines the optimizer type and corresponding hyperparameters. loss: : function, optional Defines an", "optimizer_hparams) self.loss_fnc = vae_loss if loss is None else loss def __repr__(self): return", "the hparams if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape,", "Test model on validation set. Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data", "if self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\" Loads checkpoint file", "list Contains molecules.utils.callback.Callback objects which are called during training. \"\"\" if callbacks: logs", "x : torch.Tensor Data to encode, could be a batch of data with", "of shape (batch-size, input_shape) \"\"\" return self.model.decode(embedding) def save_weights(self, enc_path, dec_path): \"\"\" Save", "weights. dec_path : str Path to save the decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path)", "more to logs for callback in callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data = data.to(self.device)", "torch from torch import nn from torch.nn import functional as F from .resnet", ": molecules.ml.hyperparams.Hyperparams Defines the model architecture hyperparameters. Currently implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams.", "def _validate(self, valid_loader, callbacks, logs): \"\"\" Test model on validation set. Parameters ----------", "consider passing in device (this will allow the ability to set the train/test", "/ len(train_loader), loss.item() / len(data))) train_loss /= len(train_loader.dataset) if callbacks: logs['train_loss'] = train_loss", "str Path to save the encoder weights. dec_path : str Path to save", "mu.pow(2) - logvar.exp()) return BCE + KLD # TODO: set weight initialization hparams", "# data to cuda as well, since device will be a variable in", "shape (batch-size, input_shape) \"\"\" return self.model.decode(embedding) def save_weights(self, enc_path, dec_path): \"\"\" Save encoder/decoder", "type: {type(hparams)}.') def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return", "this to speed things up # or find an inplace way. Only necessary", "= 0 with torch.no_grad(): for data in valid_loader: data = data.to(self.device) recon_batch, mu,", "Path to save the decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path, dec_path):", "dec_path) Load saved encoder/decoder weights. \"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True,", "str(self.model) def train(self, train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\" Train model Parameters ----------", "an optional loss function with inputs (recon_x, x, mu, logvar) and ouput torch", "mu, logvar): \"\"\" Effects ------- Reconstruction + KL divergence losses summed over all", "in the user space) self.device = torch.device('cuda' if cuda and torch.cuda.is_available() else 'cpu')", "import SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__ = ['VAE'] class VAEModel(nn.Module): def", "Currently implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type", "embeddings of shape (batch-size, latent_dim) \"\"\" return self.model.encode(x) def decode(self, embedding): \"\"\" Generate", "the optimizer type and corresponding hyperparameters. loss: : function, optional Defines an optional", "int Current epoch of training callbacks : list Contains molecules.utils.callback.Callback objects which are", "{:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader, callbacks, logs): \"\"\" Test model on validation set.", "if callbacks: logs['valid_loss'] = valid_loss if self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self,", "def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu, logvar): \"\"\" Effects", "incomming data. Note: For use with SymmetricVAE use (1, num_residues, num_residues) For use", "/ len(data))) train_loss /= len(train_loader.dataset) if callbacks: logs['train_loss'] = train_loss logs['global_step'] = epoch", "enc_path : str Path to save the encoder weights. dec_path : str Path", "std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def forward(self, x):", "objects which are called during training. \"\"\" if callbacks: logs = {'model': self.model,", "torch.optim.Optimizer Pytorch optimizer used to train model. loss_func : function Loss function used", ": bool True specifies to use cuda if it is available. False uses", "from embeddings. Parameters ---------- embedding : torch.Tensor Embedding data, could be a batch", "else 'cpu') self.model = VAEModel(input_shape, hparams).to(self.device) # TODO: consider making optimizer_hparams a member", "epoch Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data epoch : int Current", "remove this to speed things up # or find an inplace way. Only", "- sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())", "self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path, dec_path): \"\"\" Load saved encoder/decoder weights. Parameters ----------", "super(VAEModel, self).__init__() # Select encoder/decoder models by the type of the hparams if", "mu, logvar) and ouput torch loss. cuda : bool True specifies to use", "self.decoder(x) # TODO: see if we can remove this to speed things up", "self.optimizer} else: logs = {} start_epoch = 1 if checkpoint: start_epoch += self._load_checkpoint(checkpoint)", "callbacks: callback.on_train_begin(logs) for epoch in range(start_epoch, epochs + 1): for callback in callbacks:", "torch import nn from torch.nn import functional as F from .resnet import ResnetVAEHyperparams", "_load_checkpoint(self, path): \"\"\" Loads checkpoint file containing optimizer state and encoder/decoder weights. Parameters", "set weight initialization hparams class VAE: \"\"\" Provides high level interface for training,", "len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() / len(data))) train_loss /= len(train_loader.dataset)", "is None else loss def __repr__(self): return str(self.model) def train(self, train_loader, valid_loader, epochs=1,", "choice of hyperparameters. Assumes the shape of the data is square. Attributes ----------", "Epoch of training corresponding to the saved checkpoint. \"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict'])", "matrices from embeddings. Parameters ---------- embedding : torch.Tensor Embedding data, could be a", "use with SymmetricVAE use (1, num_residues, num_residues) For use with ResnetVAE use (num_residues,", "input_shape) Returns ------- torch.Tensor of embeddings of shape (batch-size, latent_dim) \"\"\" return self.model.encode(x)", "\"\"\" return self.model.decode(embedding) def save_weights(self, enc_path, dec_path): \"\"\" Save encoder/decoder weights. Parameters ----------", "the model architecture hyperparameters. Currently implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams", "embeddings. Parameters ---------- embedding : torch.Tensor Embedding data, could be a batch of", "ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid hparams", "optimizer_hparams.validate() self.input_shape = input_shape self.verbose = verbose # TODO: consider passing in device", "data to cuda as well, since device will be a variable in the", "in device (this will allow the ability to set the train/test # data", "callbacks: callback.on_train_end(logs) def _train(self, train_loader, epoch, callbacks, logs): \"\"\" Train for 1 epoch", "KLD # TODO: set weight initialization hparams class VAE: \"\"\" Provides high level", "optimizer state and encoder/decoder weights. Parameters ---------- path : str Path to checkpoint", "torch.randn_like(std) return mu + eps*std def forward(self, x): mu, logvar = self.encoder(x) x", "_validate(self, valid_loader, callbacks, logs): \"\"\" Test model on validation set. Parameters ---------- valid_loader", "Data to encode, could be a batch of data with dimension (batch-size, input_shape)", "with data for callbacks \"\"\" self.model.train() train_loss = 0. for batch_idx, data in", "\"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path, dec_path): \"\"\" Load saved encoder/decoder weights. Parameters", "verbose : bool True prints training and validation loss to stdout. \"\"\" hparams.validate()", "cuda : bool True specifies to use cuda if it is available. False", "len(data))) train_loss /= len(train_loader.dataset) if callbacks: logs['train_loss'] = train_loss logs['global_step'] = epoch if", "= verbose # TODO: consider passing in device (this will allow the ability", "latent_dim) Returns ------- torch.Tensor of generated matrices of shape (batch-size, input_shape) \"\"\" return", "hparams : molecules.ml.hyperparams.Hyperparams Defines the model architecture hyperparameters. Currently implemented are SymmetricVAEHyperparams and", "Contains molecules.utils.callback.Callback objects which are called during training. logs : dict Filled with", "self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self, x): \"\"\" Embed data into the latent space.", "incomming data. model : torch.nn.Module (VAEModel) Underlying Pytorch model with encoder/decoder attributes. optimizer", ": torch.optim.Optimizer Pytorch optimizer used to train model. loss_func : function Loss function", "Pytorch optimizer used to train model. loss_func : function Loss function used to", "validation loss to stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape self.verbose = verbose", "VAE: \"\"\" Provides high level interface for training, testing and saving VAE models.", "ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid hparams type: {type(hparams)}.') def", "decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss if loss is None else", "VAEModel(nn.Module): def __init__(self, input_shape, hparams): super(VAEModel, self).__init__() # Select encoder/decoder models by the", "encode(x) Embed data into the latent space. decode(embedding) Generate matrices from embeddings. save_weights(enc_path,", "a variable in the user space) self.device = torch.device('cuda' if cuda and torch.cuda.is_available()", "logvar) x = self.decoder(x) # TODO: see if we can remove this to", "functional as F from .resnet import ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams", "hparams).to(self.device) # TODO: consider making optimizer_hparams a member variable # RMSprop with lr=0.001,", "callback.on_batch_end(batch_idx, epoch, logs) if self.verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx", "self.model.decode(embedding) def save_weights(self, enc_path, dec_path): \"\"\" Save encoder/decoder weights. Parameters ---------- enc_path :", "or find an inplace way. Only necessary for bad # hyperparam config such", "hparams type: {type(hparams)}.') def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std)", "consider making optimizer_hparams a member variable # RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0", "torch.utils.data.dataloader.DataLoader Contains validation data epochs : int Number of epochs to train for", "self).__init__() # Select encoder/decoder models by the type of the hparams if isinstance(hparams,", "+ KLD # TODO: set weight initialization hparams class VAE: \"\"\" Provides high", "embedding : torch.Tensor Embedding data, could be a batch of data with dimension", "Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader, callbacks, logs): \"\"\" Test", "weight initialization hparams class VAE: \"\"\" Provides high level interface for training, testing", "else: logs = {} start_epoch = 1 if checkpoint: start_epoch += self._load_checkpoint(checkpoint) for", "epoch, logs) if self.verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx *", "optimizer used to train model. loss_func : function Loss function used to train", "valid_loss /= len(valid_loader.dataset) if callbacks: logs['valid_loss'] = valid_loss if self.verbose: print('====> Validation loss:", ".resnet import ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape, hparams) else:", "to train for checkpoint : str Path to checkpoint file to load and", "+ eps*std def forward(self, x): mu, logvar = self.encoder(x) x = self.reparameterize(mu, logvar)", ": function, optional Defines an optional loss function with inputs (recon_x, x, mu,", "Effects ------- Reconstruction + KL divergence losses summed over all elements and batch", "callback.on_train_begin(logs) for epoch in range(start_epoch, epochs + 1): for callback in callbacks: callback.on_epoch_begin(epoch,", "1) * len(train_loader) + batch_idx for callback in callbacks: callback.on_batch_end(batch_idx, epoch, logs) if", "range(start_epoch, epochs + 1): for callback in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks,", "logs): \"\"\" Test model on validation set. Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains", "of the data is square. Attributes ---------- input_shape : tuple shape of incomming", "cuda=True, verbose=True): \"\"\" Parameters ---------- input_shape : tuple shape of incomming data. Note:", "hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape self.verbose = verbose # TODO: consider passing in", "are called during training. \"\"\" if callbacks: logs = {'model': self.model, 'optimizer': self.optimizer}", "specified by the choice of hyperparameters. Assumes the shape of the data is", "mu, logvar = self.encoder(x) x = self.reparameterize(mu, logvar) x = self.decoder(x) # TODO:", "corresponding hyperparameters. loss: : function, optional Defines an optional loss function with inputs", "logs['train_loss'] = train_loss logs['global_step'] = epoch if self.verbose: print('====> Epoch: {} Average loss:", "if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder", ": dict Filled with data for callbacks \"\"\" self.model.train() train_loss = 0. for", "0. for batch_idx, data in enumerate(train_loader): if callbacks: pass # TODO: add more", "valid_loss = 0 with torch.no_grad(): for data in valid_loader: data = data.to(self.device) recon_batch,", "(1, num_residues, num_residues) For use with ResnetVAE use (num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams", "torch.zeros_like(x), x) return x, mu, logvar def encode(self, x): # mu layer return", "(epoch - 1) * len(train_loader) + batch_idx for callback in callbacks: callback.on_batch_end(batch_idx, epoch,", "from torch.nn import functional as F from .resnet import ResnetVAEHyperparams from .symmetric import", "corresponding to the saved checkpoint. \"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return", "checkpoint. \"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self, x):", "+ 1): for callback in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader,", ": list Contains molecules.utils.callback.Callback objects which are called during training. \"\"\" if callbacks:", "loss to stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape self.verbose = verbose #", "in callbacks: callback.on_train_end(logs) def _train(self, train_loader, epoch, callbacks, logs): \"\"\" Train for 1", "to speed things up # or find an inplace way. Only necessary for", "all elements and batch See Appendix B from VAE paper: Kingma and Welling.", "recon_batch, mu, logvar = self.model(data) valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss /=", "callback in callbacks: callback.on_batch_end(batch_idx, epoch, logs) if self.verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss:", "of data with dimension (batch-size, latent_dim) Returns ------- torch.Tensor of generated matrices of", "logvar): \"\"\" Effects ------- Reconstruction + KL divergence losses summed over all elements", "Train model Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data valid_loader : torch.utils.data.dataloader.DataLoader", ".resnet import ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__", "training. \"\"\" if callbacks: logs = {'model': self.model, 'optimizer': self.optimizer} else: logs =", "for callback in callbacks: callback.on_batch_end(batch_idx, epoch, logs) if self.verbose: print('Train Epoch: {} [{}/{}", "variable # RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc", "Parameters ---------- path : str Path to checkpoint file Returns ------- Epoch of", "- mu.pow(2) - logvar.exp()) return BCE + KLD # TODO: set weight initialization", "load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu, logvar): \"\"\" Effects -------", "logvar.exp()) return BCE + KLD # TODO: set weight initialization hparams class VAE:", "embeddings. save_weights(enc_path, dec_path) Save encoder/decoder weights. load_weights(enc_path, dec_path) Load saved encoder/decoder weights. \"\"\"", "Parameters ---------- embedding : torch.Tensor Embedding data, could be a batch of data", "= ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid hparams type: {type(hparams)}.')", "str Path to checkpoint file Returns ------- Epoch of training corresponding to the", "be a variable in the user space) self.device = torch.device('cuda' if cuda and", "loss def __repr__(self): return str(self.model) def train(self, train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\"", "the epoch when the checkpoint was saved. callbacks : list Contains molecules.utils.callback.Callback objects", "data, mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step() if callbacks: logs['train_loss'] = loss.item()", "{} start_epoch = 1 if checkpoint: start_epoch += self._load_checkpoint(checkpoint) for callback in callbacks:", "self.optimizer.step() if callbacks: logs['train_loss'] = loss.item() / len(data) logs['global_step'] = (epoch - 1)", "was saved. callbacks : list Contains molecules.utils.callback.Callback objects which are called during training.", "over all elements and batch See Appendix B from VAE paper: Kingma and", "(num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines the model architecture hyperparameters. Currently implemented are", "dec_path : str Path to save the decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path) def", "x = self.reparameterize(mu, logvar) x = self.decoder(x) # TODO: see if we can", "callbacks: callback.on_epoch_end(epoch, logs) for callback in callbacks: callback.on_train_end(logs) def _train(self, train_loader, epoch, callbacks,", "loss.item() / len(data))) train_loss /= len(train_loader.dataset) if callbacks: logs['train_loss'] = train_loss logs['global_step'] =", "(recon_x, x, mu, logvar) and ouput torch loss. cuda : bool True specifies", "allow the ability to set the train/test # data to cuda as well,", "self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path)", "SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams):", "if checkpoint: start_epoch += self._load_checkpoint(checkpoint) for callback in callbacks: callback.on_train_begin(logs) for epoch in", ": torch.utils.data.dataloader.DataLoader Contains training data valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data epochs :", "self.verbose: print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader, callbacks, logs):", "logs) self._validate(valid_loader, callbacks, logs) for callback in callbacks: callback.on_epoch_end(epoch, logs) for callback in", "Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x,", "latent_dim) \"\"\" return self.model.encode(x) def decode(self, embedding): \"\"\" Generate matrices from embeddings. Parameters", "for callbacks \"\"\" self.model.train() train_loss = 0. for batch_idx, data in enumerate(train_loader): if", "dimension (batch-size, input_shape) Returns ------- torch.Tensor of embeddings of shape (batch-size, latent_dim) \"\"\"", "for batch_idx, data in enumerate(train_loader): if callbacks: pass # TODO: add more to", "generated matrices of shape (batch-size, input_shape) \"\"\" return self.model.decode(embedding) def save_weights(self, enc_path, dec_path):", "VAEModel(input_shape, hparams).to(self.device) # TODO: consider making optimizer_hparams a member variable # RMSprop with", "loss.item() self.optimizer.step() if callbacks: logs['train_loss'] = loss.item() / len(data) logs['global_step'] = (epoch -", "if it is available. False uses cpu. verbose : bool True prints training", "import functional as F from .resnet import ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams from", "self.model.eval() valid_loss = 0 with torch.no_grad(): for data in valid_loader: data = data.to(self.device)", "molecules.utils.callback.Callback objects which are called during training. \"\"\" if callbacks: logs = {'model':", "# TODO: set weight initialization hparams class VAE: \"\"\" Provides high level interface", "callback in callbacks: callback.on_train_begin(logs) for epoch in range(start_epoch, epochs + 1): for callback", "\"\"\" self.model.train() train_loss = 0. for batch_idx, data in enumerate(train_loader): if callbacks: pass", "logs : dict Filled with data for callbacks \"\"\" self.model.eval() valid_loss = 0", "data for callbacks \"\"\" self.model.eval() valid_loss = 0 with torch.no_grad(): for data in", "Returns ------- Epoch of training corresponding to the saved checkpoint. \"\"\" cp =", "def save_weights(self, enc_path, dec_path): \"\"\" Save encoder/decoder weights. Parameters ---------- enc_path : str", "= -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE +", "learning rate # being large. #x = torch.where(torch.isnan(x), torch.zeros_like(x), x) return x, mu,", "+= self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss /= len(valid_loader.dataset) if callbacks: logs['valid_loss'] = valid_loss", "of hyperparameters. Assumes the shape of the data is square. Attributes ---------- input_shape", "Contains training data valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data epochs : int Number", "---------- input_shape : tuple shape of incomming data. model : torch.nn.Module (VAEModel) Underlying", "models specified by the choice of hyperparameters. Assumes the shape of the data", "from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams,", "losses summed over all elements and batch See Appendix B from VAE paper:", "# mu layer return self.encoder.encode(x) def decode(self, embedding): return self.decoder.decode(embedding) def save_weights(self, enc_path,", "the latent space. decode(embedding) Generate matrices from embeddings. save_weights(enc_path, dec_path) Save encoder/decoder weights.", "train_loader, epoch, callbacks, logs): \"\"\" Train for 1 epoch Parameters ---------- train_loader :", "self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu, logvar):", "{type(hparams)}.') def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu", "use cuda if it is available. False uses cpu. verbose : bool True", "callbacks \"\"\" self.model.eval() valid_loss = 0 with torch.no_grad(): for data in valid_loader: data", "of data with dimension (batch-size, input_shape) Returns ------- torch.Tensor of embeddings of shape", "level interface for training, testing and saving VAE models. Takes arbitrary encoder/decoder models", "loss is None else loss def __repr__(self): return str(self.model) def train(self, train_loader, valid_loader,", ": torch.utils.data.dataloader.DataLoader Contains training data epoch : int Current epoch of training callbacks", "/= len(valid_loader.dataset) if callbacks: logs['valid_loss'] = valid_loss if self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss))", "can remove this to speed things up # or find an inplace way.", "of shape (batch-size, latent_dim) \"\"\" return self.model.encode(x) def decode(self, embedding): \"\"\" Generate matrices", "save_weights(self, enc_path, dec_path): \"\"\" Save encoder/decoder weights. Parameters ---------- enc_path : str Path", "logs) for callback in callbacks: callback.on_epoch_end(epoch, logs) for callback in callbacks: callback.on_train_end(logs) def", "x, reduction='sum') # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD", "callback in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader, callbacks, logs) for", "---------- x : torch.Tensor Data to encode, could be a batch of data", "Only necessary for bad # hyperparam config such as optimizer learning rate #", "self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from .resnet import ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams)", "\"\"\" Test model on validation set. Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains validation", "\"\"\" Embed data into the latent space. Parameters ---------- x : torch.Tensor Data", "batch of data with dimension (batch-size, input_shape) Returns ------- torch.Tensor of embeddings of", "data with dimension (batch-size, latent_dim) Returns ------- torch.Tensor of generated matrices of shape", "_train(self, train_loader, epoch, callbacks, logs): \"\"\" Train for 1 epoch Parameters ---------- train_loader", "print('====> Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\" Loads checkpoint file containing optimizer", "\"\"\" Parameters ---------- input_shape : tuple shape of incomming data. Note: For use", "self.input_shape = input_shape self.verbose = verbose # TODO: consider passing in device (this", "loss = self.loss_fnc(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step() if callbacks:", "as F from .resnet import ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams import", "Contains validation data callbacks : list Contains molecules.utils.callback.Callback objects which are called during", "------- Reconstruction + KL divergence losses summed over all elements and batch See", "* torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE + KLD #", "is available. False uses cpu. verbose : bool True prints training and validation", "decode(self, embedding): return self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path,", "epoch of training callbacks : list Contains molecules.utils.callback.Callback objects which are called during", "to the saved checkpoint. \"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch']", "user space) self.device = torch.device('cuda' if cuda and torch.cuda.is_available() else 'cpu') self.model =", "data = data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar = self.model(data) loss = self.loss_fnc(recon_batch, data,", "mu layer return self.encoder.encode(x) def decode(self, embedding): return self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path):", "callbacks: logs['valid_loss'] = valid_loss if self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path):", "cp['epoch'] def encode(self, x): \"\"\" Embed data into the latent space. Parameters ----------", "train model. Methods ------- train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]) Train model encode(x) Embed", "file containing optimizer state and encoder/decoder weights. Parameters ---------- path : str Path", "self.loss_fnc(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step() if callbacks: logs['train_loss'] =", "https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5 * sum(1 + log(sigma^2)", "of the hparams if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder =", "get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss if loss is None else loss def __repr__(self):", "logvar = self.encoder(x) x = self.reparameterize(mu, logvar) x = self.decoder(x) # TODO: see", "def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\" Parameters ---------- input_shape :", "of embeddings of shape (batch-size, latent_dim) \"\"\" return self.model.encode(x) def decode(self, embedding): \"\"\"", "save the decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path, dec_path): \"\"\" Load", "For use with SymmetricVAE use (1, num_residues, num_residues) For use with ResnetVAE use", "being large. #x = torch.where(torch.isnan(x), torch.zeros_like(x), x) return x, mu, logvar def encode(self,", "self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu, logvar): \"\"\"", "if cuda and torch.cuda.is_available() else 'cpu') self.model = VAEModel(input_shape, hparams).to(self.device) # TODO: consider", "set. Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data callbacks : list Contains", "torch loss. cuda : bool True specifies to use cuda if it is", "* len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() / len(data))) train_loss /=", "the choice of hyperparameters. Assumes the shape of the data is square. Attributes", "hparams if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams)", "# TODO: consider passing in device (this will allow the ability to set", "molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__ = ['VAE'] class VAEModel(nn.Module): def __init__(self, input_shape, hparams):", "KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE", "large. #x = torch.where(torch.isnan(x), torch.zeros_like(x), x) return x, mu, logvar def encode(self, x):", "eps*std def forward(self, x): mu, logvar = self.encoder(x) x = self.reparameterize(mu, logvar) x", "Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data valid_loader : torch.utils.data.dataloader.DataLoader Contains validation", "weights. \"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\" Parameters ----------", "dec_path): \"\"\" Save encoder/decoder weights. Parameters ---------- enc_path : str Path to save", "callback.on_train_end(logs) def _train(self, train_loader, epoch, callbacks, logs): \"\"\" Train for 1 epoch Parameters", "False uses cpu. verbose : bool True prints training and validation loss to", "training. logs : dict Filled with data for callbacks \"\"\" self.model.train() train_loss =", "start_epoch += self._load_checkpoint(checkpoint) for callback in callbacks: callback.on_train_begin(logs) for epoch in range(start_epoch, epochs", "validation set. Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data callbacks : list", "return str(self.model) def train(self, train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\" Train model Parameters", "__all__ = ['VAE'] class VAEModel(nn.Module): def __init__(self, input_shape, hparams): super(VAEModel, self).__init__() # Select", "and torch.cuda.is_available() else 'cpu') self.model = VAEModel(input_shape, hparams).to(self.device) # TODO: consider making optimizer_hparams", "cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self, x): \"\"\" Embed", "x, mu, logvar): \"\"\" Effects ------- Reconstruction + KL divergence losses summed over", "Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5", "dec_path) Save encoder/decoder weights. load_weights(enc_path, dec_path) Load saved encoder/decoder weights. \"\"\" def __init__(self,", "Number of epochs to train for checkpoint : str Path to checkpoint file", "TODO: see if we can remove this to speed things up # or", "logs) for callback in callbacks: callback.on_train_end(logs) def _train(self, train_loader, epoch, callbacks, logs): \"\"\"", "batch_idx / len(train_loader), loss.item() / len(data))) train_loss /= len(train_loader.dataset) if callbacks: logs['train_loss'] =", "the user space) self.device = torch.device('cuda' if cuda and torch.cuda.is_available() else 'cpu') self.model", "input_shape) \"\"\" return self.model.decode(embedding) def save_weights(self, enc_path, dec_path): \"\"\" Save encoder/decoder weights. Parameters", "we can remove this to speed things up # or find an inplace", "since device will be a variable in the user space) self.device = torch.device('cuda'", "epoch : int Current epoch of training callbacks : list Contains molecules.utils.callback.Callback objects", "uses cpu. verbose : bool True prints training and validation loss to stdout.", "during training. logs : dict Filled with data for callbacks \"\"\" self.model.train() train_loss", "the latent space. Parameters ---------- x : torch.Tensor Data to encode, could be", "self.model(data) valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss /= len(valid_loader.dataset) if callbacks: logs['valid_loss']", "if self.verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset),", "function used to train model. Methods ------- train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]) Train", "self.encoder(x) x = self.reparameterize(mu, logvar) x = self.decoder(x) # TODO: see if we", "if we can remove this to speed things up # or find an", "model. Methods ------- train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]) Train model encode(x) Embed data", "embedding): return self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path):", "{} Average loss: {:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader, callbacks, logs): \"\"\" Test model", "in range(start_epoch, epochs + 1): for callback in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch,", "Pytorch model with encoder/decoder attributes. optimizer : torch.optim.Optimizer Pytorch optimizer used to train", "batch of data with dimension (batch-size, latent_dim) Returns ------- torch.Tensor of generated matrices", "Returns ------- torch.Tensor of embeddings of shape (batch-size, latent_dim) \"\"\" return self.model.encode(x) def", "---------- path : str Path to checkpoint file Returns ------- Epoch of training", "Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\" Loads checkpoint file containing optimizer state", "* len(train_loader) + batch_idx for callback in callbacks: callback.on_batch_end(batch_idx, epoch, logs) if self.verbose:", "eps = torch.randn_like(std) return mu + eps*std def forward(self, x): mu, logvar =", "verbose=True): \"\"\" Parameters ---------- input_shape : tuple shape of incomming data. Note: For", "training corresponding to the saved checkpoint. \"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict'])", "sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return", "file to load and resume training from the epoch when the checkpoint was", "# hyperparam config such as optimizer learning rate # being large. #x =", "callbacks : list Contains molecules.utils.callback.Callback objects which are called during training. \"\"\" if", "it is available. False uses cpu. verbose : bool True prints training and", ": int Number of epochs to train for checkpoint : str Path to", "with dimension (batch-size, latent_dim) Returns ------- torch.Tensor of generated matrices of shape (batch-size,", "torch.utils.data.dataloader.DataLoader Contains training data epoch : int Current epoch of training callbacks :", "with data for callbacks \"\"\" self.model.eval() valid_loss = 0 with torch.no_grad(): for data", "will be a variable in the user space) self.device = torch.device('cuda' if cuda", "Path to checkpoint file to load and resume training from the epoch when", "= 1 if checkpoint: start_epoch += self._load_checkpoint(checkpoint) for callback in callbacks: callback.on_train_begin(logs) for", "for callback in callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data = data.to(self.device) self.optimizer.zero_grad() recon_batch, mu,", "model on validation set. Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data callbacks", "device (this will allow the ability to set the train/test # data to", "BCE = F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5 * sum(1 + log(sigma^2) - mu^2", "matrices from embeddings. save_weights(enc_path, dec_path) Save encoder/decoder weights. load_weights(enc_path, dec_path) Load saved encoder/decoder", "SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape,", "callbacks, logs): \"\"\" Train for 1 epoch Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains", "def decode(self, embedding): \"\"\" Generate matrices from embeddings. Parameters ---------- embedding : torch.Tensor", "use (1, num_residues, num_residues) For use with ResnetVAE use (num_residues, num_residues) hparams :", "training and validation loss to stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape self.verbose", "num_residues, num_residues) For use with ResnetVAE use (num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines", "mu, logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def", "Load saved encoder/decoder weights. Parameters ---------- enc_path : str Path to save the", "= F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5 * sum(1 + log(sigma^2) - mu^2 -", "SymmetricVAE use (1, num_residues, num_residues) For use with ResnetVAE use (num_residues, num_residues) hparams", "return cp['epoch'] def encode(self, x): \"\"\" Embed data into the latent space. Parameters", "torch.Tensor of generated matrices of shape (batch-size, input_shape) \"\"\" return self.model.decode(embedding) def save_weights(self,", "lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss if loss", "the train/test # data to cuda as well, since device will be a", "a batch of data with dimension (batch-size, latent_dim) Returns ------- torch.Tensor of generated", "Save encoder/decoder weights. load_weights(enc_path, dec_path) Load saved encoder/decoder weights. \"\"\" def __init__(self, input_shape,", "with torch.no_grad(): for data in valid_loader: data = data.to(self.device) recon_batch, mu, logvar =", "self.loss_fnc = vae_loss if loss is None else loss def __repr__(self): return str(self.model)", ": torch.utils.data.dataloader.DataLoader Contains validation data callbacks : list Contains molecules.utils.callback.Callback objects which are", "inplace way. Only necessary for bad # hyperparam config such as optimizer learning", "x): # mu layer return self.encoder.encode(x) def decode(self, embedding): return self.decoder.decode(embedding) def save_weights(self,", "model with encoder/decoder attributes. optimizer : torch.optim.Optimizer Pytorch optimizer used to train model.", "testing and saving VAE models. Takes arbitrary encoder/decoder models specified by the choice", "and resume training from the epoch when the checkpoint was saved. callbacks :", "valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\" Train model Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains", "callbacks, logs) self._validate(valid_loader, callbacks, logs) for callback in callbacks: callback.on_epoch_end(epoch, logs) for callback", ": int Current epoch of training callbacks : list Contains molecules.utils.callback.Callback objects which", "if callbacks: pass # TODO: add more to logs for callback in callbacks:", "logs for callback in callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data = data.to(self.device) self.optimizer.zero_grad() recon_batch,", "logs['train_loss'] = loss.item() / len(data) logs['global_step'] = (epoch - 1) * len(train_loader) +", "weights. dec_path : str Path to save the decoder weights. \"\"\" self.model.load_weights(enc_path, dec_path)", "type and corresponding hyperparameters. loss: : function, optional Defines an optional loss function", "F from .resnet import ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams,", "space) self.device = torch.device('cuda' if cuda and torch.cuda.is_available() else 'cpu') self.model = VAEModel(input_shape,", "---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data callbacks : list Contains molecules.utils.callback.Callback objects", "making optimizer_hparams a member variable # RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer", "things up # or find an inplace way. Only necessary for bad #", "encode, could be a batch of data with dimension (batch-size, input_shape) Returns -------", "to logs for callback in callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data = data.to(self.device) self.optimizer.zero_grad()", "data epochs : int Number of epochs to train for checkpoint : str", "# TODO: add more to logs for callback in callbacks: callback.on_batch_begin(batch_idx, epoch, logs)", "to save the encoder weights. dec_path : str Path to save the decoder", "model Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data valid_loader : torch.utils.data.dataloader.DataLoader Contains", "(batch-size, latent_dim) Returns ------- torch.Tensor of generated matrices of shape (batch-size, input_shape) \"\"\"", "Parameters ---------- x : torch.Tensor Data to encode, could be a batch of", "torch.where(torch.isnan(x), torch.zeros_like(x), x) return x, mu, logvar def encode(self, x): # mu layer", "Generate matrices from embeddings. save_weights(enc_path, dec_path) Save encoder/decoder weights. load_weights(enc_path, dec_path) Load saved", ": tuple shape of incomming data. Note: For use with SymmetricVAE use (1,", "__repr__(self): return str(self.model) def train(self, train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\" Train model", "train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\" Train model Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader", "the saved checkpoint. \"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def", "Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x, x, reduction='sum') #", "matrices of shape (batch-size, input_shape) \"\"\" return self.model.decode(embedding) def save_weights(self, enc_path, dec_path): \"\"\"", "to save the decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path, dec_path): \"\"\"", "mu + eps*std def forward(self, x): mu, logvar = self.encoder(x) x = self.reparameterize(mu,", "len(data) logs['global_step'] = (epoch - 1) * len(train_loader) + batch_idx for callback in", "= (epoch - 1) * len(train_loader) + batch_idx for callback in callbacks: callback.on_batch_end(batch_idx,", "cuda if it is available. False uses cpu. verbose : bool True prints", "architecture hyperparameters. Currently implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the", "self.verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100.", "to train model. loss_func : function Loss function used to train model. Methods", "valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss /= len(valid_loader.dataset) if callbacks: logs['valid_loss'] =", "the shape of the data is square. Attributes ---------- input_shape : tuple shape", "optional loss function with inputs (recon_x, x, mu, logvar) and ouput torch loss.", "def load_weights(self, enc_path, dec_path): \"\"\" Load saved encoder/decoder weights. Parameters ---------- enc_path :", "[{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader),", "tuple shape of incomming data. Note: For use with SymmetricVAE use (1, num_residues,", "ability to set the train/test # data to cuda as well, since device", "logvar = self.model(data) loss = self.loss_fnc(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item()", "loss function with inputs (recon_x, x, mu, logvar) and ouput torch loss. cuda", "data callbacks : list Contains molecules.utils.callback.Callback objects which are called during training. logs", "log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar -", "logvar): std = torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def forward(self,", "epochs=1, checkpoint='', callbacks=[]) Train model encode(x) Embed data into the latent space. decode(embedding)", "len(train_loader) + batch_idx for callback in callbacks: callback.on_batch_end(batch_idx, epoch, logs) if self.verbose: print('Train", "weights. Parameters ---------- enc_path : str Path to save the encoder weights. dec_path", "{'model': self.model, 'optimizer': self.optimizer} else: logs = {} start_epoch = 1 if checkpoint:", "= self.loss_fnc(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step() if callbacks: logs['train_loss']", "\"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\" Parameters ---------- input_shape", "a member variable # RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model,", "vae_loss if loss is None else loss def __repr__(self): return str(self.model) def train(self,", "attributes. optimizer : torch.optim.Optimizer Pytorch optimizer used to train model. loss_func : function", "recon_batch, mu, logvar = self.model(data) loss = self.loss_fnc(recon_batch, data, mu, logvar) loss.backward() train_loss", "self.model(data) loss = self.loss_fnc(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step() if", "enc_path, dec_path): \"\"\" Save encoder/decoder weights. Parameters ---------- enc_path : str Path to", "loss=None, cuda=True, verbose=True): \"\"\" Parameters ---------- input_shape : tuple shape of incomming data.", "device will be a variable in the user space) self.device = torch.device('cuda' if", "data epoch : int Current epoch of training callbacks : list Contains molecules.utils.callback.Callback", "loss: {:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader, callbacks, logs): \"\"\" Test model on validation", "optional Defines an optional loss function with inputs (recon_x, x, mu, logvar) and", "Generate matrices from embeddings. Parameters ---------- embedding : torch.Tensor Embedding data, could be", "SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type and corresponding hyperparameters.", "return self.encoder.encode(x) def decode(self, embedding): return self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path)", "to checkpoint file Returns ------- Epoch of training corresponding to the saved checkpoint.", "hparams): super(VAEModel, self).__init__() # Select encoder/decoder models by the type of the hparams", "self.model, 'optimizer': self.optimizer} else: logs = {} start_epoch = 1 if checkpoint: start_epoch", "bool True specifies to use cuda if it is available. False uses cpu.", "loss.item() / len(data) logs['global_step'] = (epoch - 1) * len(train_loader) + batch_idx for", "Embed data into the latent space. Parameters ---------- x : torch.Tensor Data to", "# TODO: see if we can remove this to speed things up #", "is square. Attributes ---------- input_shape : tuple shape of incomming data. model :", "epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() / len(data)))", "which are called during training. logs : dict Filled with data for callbacks", "def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def", "logs) data = data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar = self.model(data) loss = self.loss_fnc(recon_batch,", "be a batch of data with dimension (batch-size, input_shape) Returns ------- torch.Tensor of", "return self.model.decode(embedding) def save_weights(self, enc_path, dec_path): \"\"\" Save encoder/decoder weights. Parameters ---------- enc_path", "logvar = self.model(data) valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss /= len(valid_loader.dataset) if", "interface for training, testing and saving VAE models. Takes arbitrary encoder/decoder models specified", "variable in the user space) self.device = torch.device('cuda' if cuda and torch.cuda.is_available() else", "\"\"\" self.model.eval() valid_loss = 0 with torch.no_grad(): for data in valid_loader: data =", "Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data callbacks : list Contains molecules.utils.callback.Callback", "epoch in range(start_epoch, epochs + 1): for callback in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader,", "training from the epoch when the checkpoint was saved. callbacks : list Contains", "logvar).item() valid_loss /= len(valid_loader.dataset) if callbacks: logs['valid_loss'] = valid_loss if self.verbose: print('====> Validation", "epoch, callbacks, logs): \"\"\" Train for 1 epoch Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader", "torch.Tensor Data to encode, could be a batch of data with dimension (batch-size,", "input_shape, hparams): super(VAEModel, self).__init__() # Select encoder/decoder models by the type of the", "loss.backward() train_loss += loss.item() self.optimizer.step() if callbacks: logs['train_loss'] = loss.item() / len(data) logs['global_step']", "enc_path, dec_path): \"\"\" Load saved encoder/decoder weights. Parameters ---------- enc_path : str Path", "len(valid_loader.dataset) if callbacks: logs['valid_loss'] = valid_loss if self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss)) def", "= torch.where(torch.isnan(x), torch.zeros_like(x), x) return x, mu, logvar def encode(self, x): # mu", "bool True prints training and validation loss to stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape", "train for checkpoint : str Path to checkpoint file to load and resume", "valid_loss if self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\" Loads checkpoint", "hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from .resnet import ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape,", "torch.Tensor of embeddings of shape (batch-size, latent_dim) \"\"\" return self.model.encode(x) def decode(self, embedding):", "See Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR,", ": torch.utils.data.dataloader.DataLoader Contains validation data epochs : int Number of epochs to train", "0 with torch.no_grad(): for data in valid_loader: data = data.to(self.device) recon_batch, mu, logvar", "Embed data into the latent space. decode(embedding) Generate matrices from embeddings. save_weights(enc_path, dec_path)", "alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss if loss is", "from .resnet import ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape, hparams)", "self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu, logvar): \"\"\" Effects ------- Reconstruction + KL divergence", "import nn from torch.nn import functional as F from .resnet import ResnetVAEHyperparams from", "epoch when the checkpoint was saved. callbacks : list Contains molecules.utils.callback.Callback objects which", "num_residues) For use with ResnetVAE use (num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines the", "TODO: set weight initialization hparams class VAE: \"\"\" Provides high level interface for", "(batch-size, input_shape) Returns ------- torch.Tensor of embeddings of shape (batch-size, latent_dim) \"\"\" return", "(batch-size, latent_dim) \"\"\" return self.model.encode(x) def decode(self, embedding): \"\"\" Generate matrices from embeddings.", "stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape self.verbose = verbose # TODO: consider", "Train for 1 epoch Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data epoch", ": str Path to save the decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self,", "by the choice of hyperparameters. Assumes the shape of the data is square.", ": function Loss function used to train model. Methods ------- train(train_loader, valid_loader, epochs=1,", "latent space. Parameters ---------- x : torch.Tensor Data to encode, could be a", "embedding): \"\"\" Generate matrices from embeddings. Parameters ---------- embedding : torch.Tensor Embedding data,", "space. Parameters ---------- x : torch.Tensor Data to encode, could be a batch", "torch.no_grad(): for data in valid_loader: data = data.to(self.device) recon_batch, mu, logvar = self.model(data)", "cuda as well, since device will be a variable in the user space)", "valid_loader, epochs=1, checkpoint='', callbacks=[]) Train model encode(x) Embed data into the latent space.", "cuda and torch.cuda.is_available() else 'cpu') self.model = VAEModel(input_shape, hparams).to(self.device) # TODO: consider making", "---------- enc_path : str Path to save the encoder weights. dec_path : str", "hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\" Parameters ---------- input_shape : tuple shape of", "= input_shape self.verbose = verbose # TODO: consider passing in device (this will", "\"\"\" Save encoder/decoder weights. Parameters ---------- enc_path : str Path to save the", "saved checkpoint. \"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self,", "data with dimension (batch-size, input_shape) Returns ------- torch.Tensor of embeddings of shape (batch-size,", "checkpoint file Returns ------- Epoch of training corresponding to the saved checkpoint. \"\"\"", "return x, mu, logvar def encode(self, x): # mu layer return self.encoder.encode(x) def", "\"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape self.verbose = verbose # TODO: consider passing", "callbacks: logs['train_loss'] = train_loss logs['global_step'] = epoch if self.verbose: print('====> Epoch: {} Average", "dec_path) def load_weights(self, enc_path, dec_path): \"\"\" Load saved encoder/decoder weights. Parameters ---------- enc_path", "with ResnetVAE use (num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines the model architecture hyperparameters.", "mu, logvar).item() valid_loss /= len(valid_loader.dataset) if callbacks: logs['valid_loss'] = valid_loss if self.verbose: print('====>", "For use with ResnetVAE use (num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines the model", "logs['global_step'] = (epoch - 1) * len(train_loader) + batch_idx for callback in callbacks:", "and saving VAE models. Takes arbitrary encoder/decoder models specified by the choice of", "self.encoder.encode(x) def decode(self, embedding): return self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def", "optimizer_hparams : molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type and corresponding hyperparameters. loss: : function,", "epoch, callbacks, logs) self._validate(valid_loader, callbacks, logs) for callback in callbacks: callback.on_epoch_end(epoch, logs) for", "encoder/decoder weights. Parameters ---------- enc_path : str Path to save the encoder weights.", "resume training from the epoch when the checkpoint was saved. callbacks : list", "arbitrary encoder/decoder models specified by the choice of hyperparameters. Assumes the shape of", "x, mu, logvar) and ouput torch loss. cuda : bool True specifies to", "= {'model': self.model, 'optimizer': self.optimizer} else: logs = {} start_epoch = 1 if", "# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5", "if self.verbose: print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader, callbacks,", "= valid_loss if self.verbose: print('====> Validation loss: {:.4f}'.format(valid_loss)) def _load_checkpoint(self, path): \"\"\" Loads", "Assumes the shape of the data is square. Attributes ---------- input_shape : tuple", "validation data epochs : int Number of epochs to train for checkpoint :", "divergence losses summed over all elements and batch See Appendix B from VAE", "encode(self, x): \"\"\" Embed data into the latent space. Parameters ---------- x :", "torch.device('cuda' if cuda and torch.cuda.is_available() else 'cpu') self.model = VAEModel(input_shape, hparams).to(self.device) # TODO:", "data, could be a batch of data with dimension (batch-size, latent_dim) Returns -------", "\"\"\" BCE = F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5 * sum(1 + log(sigma^2) -", "the data is square. Attributes ---------- input_shape : tuple shape of incomming data.", "x): \"\"\" Embed data into the latent space. Parameters ---------- x : torch.Tensor", "in callbacks: callback.on_batch_end(batch_idx, epoch, logs) if self.verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(", "mu, logvar = self.model(data) loss = self.loss_fnc(recon_batch, data, mu, logvar) loss.backward() train_loss +=", "with SymmetricVAE use (1, num_residues, num_residues) For use with ResnetVAE use (num_residues, num_residues)", "training. logs : dict Filled with data for callbacks \"\"\" self.model.eval() valid_loss =", "which are called during training. \"\"\" if callbacks: logs = {'model': self.model, 'optimizer':", "saved encoder/decoder weights. \"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\"", "__init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\" Parameters ---------- input_shape : tuple", "enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x,", "Methods ------- train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]) Train model encode(x) Embed data into", "to set the train/test # data to cuda as well, since device will", "({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item()", "import ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__ =", "train_loss /= len(train_loader.dataset) if callbacks: logs['train_loss'] = train_loss logs['global_step'] = epoch if self.verbose:", "model : torch.nn.Module (VAEModel) Underlying Pytorch model with encoder/decoder attributes. optimizer : torch.optim.Optimizer", "= loss.item() / len(data) logs['global_step'] = (epoch - 1) * len(train_loader) + batch_idx", "model. loss_func : function Loss function used to train model. Methods ------- train(train_loader,", "mu, logvar) loss.backward() train_loss += loss.item() self.optimizer.step() if callbacks: logs['train_loss'] = loss.item() /", "logs): \"\"\" Train for 1 epoch Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training", "find an inplace way. Only necessary for bad # hyperparam config such as", "return BCE + KLD # TODO: set weight initialization hparams class VAE: \"\"\"", "self.reparameterize(mu, logvar) x = self.decoder(x) # TODO: see if we can remove this", "- logvar.exp()) return BCE + KLD # TODO: set weight initialization hparams class", "self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu, logvar): \"\"\" Effects ------- Reconstruction + KL", ": torch.Tensor Embedding data, could be a batch of data with dimension (batch-size,", "Embedding data, could be a batch of data with dimension (batch-size, latent_dim) Returns", "in callbacks: callback.on_train_begin(logs) for epoch in range(start_epoch, epochs + 1): for callback in", "= self.reparameterize(mu, logvar) x = self.decoder(x) # TODO: see if we can remove", "train_loss += loss.item() self.optimizer.step() if callbacks: logs['train_loss'] = loss.item() / len(data) logs['global_step'] =", "checkpoint='', callbacks=[]) Train model encode(x) Embed data into the latent space. decode(embedding) Generate", "callbacks: callback.on_batch_end(batch_idx, epoch, logs) if self.verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch,", "from .resnet import ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer", "/= len(train_loader.dataset) if callbacks: logs['train_loss'] = train_loss logs['global_step'] = epoch if self.verbose: print('====>", "data is square. Attributes ---------- input_shape : tuple shape of incomming data. model", "encoder weights. dec_path : str Path to save the decoder weights. \"\"\" self.model.load_weights(enc_path,", "self.optimizer.zero_grad() recon_batch, mu, logvar = self.model(data) loss = self.loss_fnc(recon_batch, data, mu, logvar) loss.backward()", "ouput torch loss. cuda : bool True specifies to use cuda if it", "\"\"\" cp = torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self, x): \"\"\"", "len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() / len(data))) train_loss /= len(train_loader.dataset) if", "tuple shape of incomming data. model : torch.nn.Module (VAEModel) Underlying Pytorch model with", "epochs to train for checkpoint : str Path to checkpoint file to load", "checkpoint: start_epoch += self._load_checkpoint(checkpoint) for callback in callbacks: callback.on_train_begin(logs) for epoch in range(start_epoch,", "def __repr__(self): return str(self.model) def train(self, train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]): \"\"\" Train", "up # or find an inplace way. Only necessary for bad # hyperparam", "Path to checkpoint file Returns ------- Epoch of training corresponding to the saved", "Returns ------- torch.Tensor of generated matrices of shape (batch-size, input_shape) \"\"\" return self.model.decode(embedding)", "Path to save the encoder weights. dec_path : str Path to save the", "------- Epoch of training corresponding to the saved checkpoint. \"\"\" cp = torch.load(path)", "VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE", "as well, since device will be a variable in the user space) self.device", "get_optimizer __all__ = ['VAE'] class VAEModel(nn.Module): def __init__(self, input_shape, hparams): super(VAEModel, self).__init__() #", "---------- embedding : torch.Tensor Embedding data, could be a batch of data with", "= torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def forward(self, x): mu,", "when the checkpoint was saved. callbacks : list Contains molecules.utils.callback.Callback objects which are", "passing in device (this will allow the ability to set the train/test #", "elements and batch See Appendix B from VAE paper: Kingma and Welling. Auto-Encoding", "self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self, x): \"\"\" Embed data into the latent", "hparams class VAE: \"\"\" Provides high level interface for training, testing and saving", "weights. load_weights(enc_path, dec_path) Load saved encoder/decoder weights. \"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(),", "and corresponding hyperparameters. loss: : function, optional Defines an optional loss function with", ": bool True prints training and validation loss to stdout. \"\"\" hparams.validate() optimizer_hparams.validate()", "with dimension (batch-size, input_shape) Returns ------- torch.Tensor of embeddings of shape (batch-size, latent_dim)", "could be a batch of data with dimension (batch-size, latent_dim) Returns ------- torch.Tensor", "len(train_loader.dataset) if callbacks: logs['train_loss'] = train_loss logs['global_step'] = epoch if self.verbose: print('====> Epoch:", "KL divergence losses summed over all elements and batch See Appendix B from", "during training. logs : dict Filled with data for callbacks \"\"\" self.model.eval() valid_loss", "epochs : int Number of epochs to train for checkpoint : str Path", "Contains validation data epochs : int Number of epochs to train for checkpoint", "use (num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines the model architecture hyperparameters. Currently implemented", ": str Path to save the encoder weights. dec_path : str Path to", "to stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape self.verbose = verbose # TODO:", "weights. \"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path, dec_path): \"\"\" Load saved encoder/decoder weights.", "class VAE: \"\"\" Provides high level interface for training, testing and saving VAE", "self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from", "return mu + eps*std def forward(self, x): mu, logvar = self.encoder(x) x =", "self.verbose = verbose # TODO: consider passing in device (this will allow the", "valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data epochs : int Number of epochs to", "'optimizer': self.optimizer} else: logs = {} start_epoch = 1 if checkpoint: start_epoch +=", "config such as optimizer learning rate # being large. #x = torch.where(torch.isnan(x), torch.zeros_like(x),", "training data epoch : int Current epoch of training callbacks : list Contains", "import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif", "------- train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]) Train model encode(x) Embed data into the", "B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114", "data in valid_loader: data = data.to(self.device) recon_batch, mu, logvar = self.model(data) valid_loss +=", "Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5 *", "= epoch if self.verbose: print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss)) def _validate(self,", "model encode(x) Embed data into the latent space. decode(embedding) Generate matrices from embeddings.", "num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines the model architecture hyperparameters. Currently implemented are SymmetricVAEHyperparams", "and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE = F.binary_cross_entropy(recon_x, x,", "in callbacks: callback.on_epoch_end(epoch, logs) for callback in callbacks: callback.on_train_end(logs) def _train(self, train_loader, epoch,", "= ['VAE'] class VAEModel(nn.Module): def __init__(self, input_shape, hparams): super(VAEModel, self).__init__() # Select encoder/decoder", "of incomming data. Note: For use with SymmetricVAE use (1, num_residues, num_residues) For", "molecules.ml.hyperparams.Hyperparams Defines the model architecture hyperparameters. Currently implemented are SymmetricVAEHyperparams and ResnetVAEHyperparams. optimizer_hparams", "Save encoder/decoder weights. Parameters ---------- enc_path : str Path to save the encoder", "= data.to(self.device) recon_batch, mu, logvar = self.model(data) valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item()", "TODO: add more to logs for callback in callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data", "logvar) loss.backward() train_loss += loss.item() self.optimizer.step() if callbacks: logs['train_loss'] = loss.item() / len(data)", "could be a batch of data with dimension (batch-size, input_shape) Returns ------- torch.Tensor", "encoder/decoder attributes. optimizer : torch.optim.Optimizer Pytorch optimizer used to train model. loss_func :", "valid_loader, callbacks, logs): \"\"\" Test model on validation set. Parameters ---------- valid_loader :", "__init__(self, input_shape, hparams): super(VAEModel, self).__init__() # Select encoder/decoder models by the type of", "\"\"\" Provides high level interface for training, testing and saving VAE models. Takes", "logs = {} start_epoch = 1 if checkpoint: start_epoch += self._load_checkpoint(checkpoint) for callback", "callbacks, logs): \"\"\" Test model on validation set. Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader", ": str Path to checkpoint file to load and resume training from the", "self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss if loss is None else loss", "for callbacks \"\"\" self.model.eval() valid_loss = 0 with torch.no_grad(): for data in valid_loader:", "= {} start_epoch = 1 if checkpoint: start_epoch += self._load_checkpoint(checkpoint) for callback in", "= self.encoder(x) x = self.reparameterize(mu, logvar) x = self.decoder(x) # TODO: see if", "dec_path): \"\"\" Load saved encoder/decoder weights. Parameters ---------- enc_path : str Path to", "function Loss function used to train model. Methods ------- train(train_loader, valid_loader, epochs=1, checkpoint='',", "-0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE + KLD", "checkpoint='', callbacks=[]): \"\"\" Train model Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data", "molecules.ml.hyperparams.OptimizerHyperparams Defines the optimizer type and corresponding hyperparameters. loss: : function, optional Defines", "ResnetVAE use (num_residues, num_residues) hparams : molecules.ml.hyperparams.Hyperparams Defines the model architecture hyperparameters. Currently", "layer return self.encoder.encode(x) def decode(self, embedding): return self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path)", "= SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from .resnet", "in enumerate(train_loader): if callbacks: pass # TODO: add more to logs for callback", "batch_idx, data in enumerate(train_loader): if callbacks: pass # TODO: add more to logs", "ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid hparams type: {type(hparams)}.') def reparameterize(self, mu, logvar): std", "data into the latent space. decode(embedding) Generate matrices from embeddings. save_weights(enc_path, dec_path) Save", "set the train/test # data to cuda as well, since device will be", "import OptimizerHyperparams, get_optimizer __all__ = ['VAE'] class VAEModel(nn.Module): def __init__(self, input_shape, hparams): super(VAEModel,", "saving VAE models. Takes arbitrary encoder/decoder models specified by the choice of hyperparameters.", "for callback in callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader, callbacks, logs)", "callbacks: logs['train_loss'] = loss.item() / len(data) logs['global_step'] = (epoch - 1) * len(train_loader)", "validation data callbacks : list Contains molecules.utils.callback.Callback objects which are called during training.", "dict Filled with data for callbacks \"\"\" self.model.train() train_loss = 0. for batch_idx,", "train model. loss_func : function Loss function used to train model. Methods -------", "shape of incomming data. model : torch.nn.Module (VAEModel) Underlying Pytorch model with encoder/decoder", "verbose # TODO: consider passing in device (this will allow the ability to", "used to train model. Methods ------- train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]) Train model", "see if we can remove this to speed things up # or find", "Current epoch of training callbacks : list Contains molecules.utils.callback.Callback objects which are called", "forward(self, x): mu, logvar = self.encoder(x) x = self.reparameterize(mu, logvar) x = self.decoder(x)", "# or find an inplace way. Only necessary for bad # hyperparam config", "self.model = VAEModel(input_shape, hparams).to(self.device) # TODO: consider making optimizer_hparams a member variable #", "ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid", "nn from torch.nn import functional as F from .resnet import ResnetVAEHyperparams from .symmetric", "\"\"\" Effects ------- Reconstruction + KL divergence losses summed over all elements and", "Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014", "= 0. for batch_idx, data in enumerate(train_loader): if callbacks: pass # TODO: add", "batch_idx for callback in callbacks: callback.on_batch_end(batch_idx, epoch, logs) if self.verbose: print('Train Epoch: {}", "state and encoder/decoder weights. Parameters ---------- path : str Path to checkpoint file", "encoder/decoder models by the type of the hparams if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric", "and ouput torch loss. cuda : bool True specifies to use cuda if", ".symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim)", "optimizer_hparams a member variable # RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer =", "pass # TODO: add more to logs for callback in callbacks: callback.on_batch_begin(batch_idx, epoch,", "else: raise TypeError(f'Invalid hparams type: {type(hparams)}.') def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar)", "1 epoch Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data epoch : int", "used to train model. loss_func : function Loss function used to train model.", "SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from .resnet import ResnetEncoder, ResnetDecoder self.encoder =", "reduction='sum') # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD =", "into the latent space. Parameters ---------- x : torch.Tensor Data to encode, could", "into the latent space. decode(embedding) Generate matrices from embeddings. save_weights(enc_path, dec_path) Save encoder/decoder", "train/test # data to cuda as well, since device will be a variable", "return self.model.encode(x) def decode(self, embedding): \"\"\" Generate matrices from embeddings. Parameters ---------- embedding", "prints training and validation loss to stdout. \"\"\" hparams.validate() optimizer_hparams.validate() self.input_shape = input_shape", "train_loader : torch.utils.data.dataloader.DataLoader Contains training data epoch : int Current epoch of training", "(this will allow the ability to set the train/test # data to cuda", "Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx", "torch.utils.data.dataloader.DataLoader Contains training data valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data epochs : int", "and encoder/decoder weights. Parameters ---------- path : str Path to checkpoint file Returns", "encoder/decoder weights. \"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\" Parameters", "rate # being large. #x = torch.where(torch.isnan(x), torch.zeros_like(x), x) return x, mu, logvar", "callbacks, logs) for callback in callbacks: callback.on_epoch_end(epoch, logs) for callback in callbacks: callback.on_train_end(logs)", "torch.utils.data.dataloader.DataLoader Contains validation data callbacks : list Contains molecules.utils.callback.Callback objects which are called", "TypeError(f'Invalid hparams type: {type(hparams)}.') def reparameterize(self, mu, logvar): std = torch.exp(0.5*logvar) eps =", "the type of the hparams if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d", "hparams) self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from .resnet import ResnetEncoder,", "logvar - mu.pow(2) - logvar.exp()) return BCE + KLD # TODO: set weight", "of generated matrices of shape (batch-size, input_shape) \"\"\" return self.model.decode(embedding) def save_weights(self, enc_path,", "file Returns ------- Epoch of training corresponding to the saved checkpoint. \"\"\" cp", "x) return x, mu, logvar def encode(self, x): # mu layer return self.encoder.encode(x)", "if callbacks: logs['train_loss'] = train_loss logs['global_step'] = epoch if self.verbose: print('====> Epoch: {}", "speed things up # or find an inplace way. Only necessary for bad", "Takes arbitrary encoder/decoder models specified by the choice of hyperparameters. Assumes the shape", "= VAEModel(input_shape, hparams).to(self.device) # TODO: consider making optimizer_hparams a member variable # RMSprop", "and batch See Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational", "save the encoder weights. dec_path : str Path to save the decoder weights.", "return self.decoder.decode(embedding) def save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path)", "Filled with data for callbacks \"\"\" self.model.eval() valid_loss = 0 with torch.no_grad(): for", "\"\"\" Train for 1 epoch Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data", "mu, logvar def encode(self, x): # mu layer return self.encoder.encode(x) def decode(self, embedding):", "save_weights(self, enc_path, dec_path): self.encoder.save_weights(enc_path) self.decoder.save_weights(dec_path) def load_weights(self, enc_path, dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x,", "for training, testing and saving VAE models. Takes arbitrary encoder/decoder models specified by", "from embeddings. save_weights(enc_path, dec_path) Save encoder/decoder weights. load_weights(enc_path, dec_path) Load saved encoder/decoder weights.", "Contains training data epoch : int Current epoch of training callbacks : list", "Train model encode(x) Embed data into the latent space. decode(embedding) Generate matrices from", "# Select encoder/decoder models by the type of the hparams if isinstance(hparams, SymmetricVAEHyperparams):", "def encode(self, x): # mu layer return self.encoder.encode(x) def decode(self, embedding): return self.decoder.decode(embedding)", "valid_loader: data = data.to(self.device) recon_batch, mu, logvar = self.model(data) valid_loss += self.loss_fnc(recon_batch, data,", "= torch.device('cuda' if cuda and torch.cuda.is_available() else 'cpu') self.model = VAEModel(input_shape, hparams).to(self.device) #", "such as optimizer learning rate # being large. #x = torch.where(torch.isnan(x), torch.zeros_like(x), x)", "for epoch in range(start_epoch, epochs + 1): for callback in callbacks: callback.on_epoch_begin(epoch, logs)", "elif isinstance(hparams, ResnetVAEHyperparams): from .resnet import ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams) self.decoder", "0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 *", "isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d, SymmetricDecoderConv2d self.encoder = SymmetricEncoderConv2d(input_shape, hparams) self.decoder =", "Select encoder/decoder models by the type of the hparams if isinstance(hparams, SymmetricVAEHyperparams): from", "Reconstruction + KL divergence losses summed over all elements and batch See Appendix", "loss_func : function Loss function used to train model. Methods ------- train(train_loader, valid_loader,", "torch.cuda.is_available() else 'cpu') self.model = VAEModel(input_shape, hparams).to(self.device) # TODO: consider making optimizer_hparams a", "list Contains molecules.utils.callback.Callback objects which are called during training. logs : dict Filled", "epoch if self.verbose: print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss)) def _validate(self, valid_loader,", "to checkpoint file to load and resume training from the epoch when the", "train_loss logs['global_step'] = epoch if self.verbose: print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss))", "called during training. logs : dict Filled with data for callbacks \"\"\" self.model.eval()", "models. Takes arbitrary encoder/decoder models specified by the choice of hyperparameters. Assumes the", "#x = torch.where(torch.isnan(x), torch.zeros_like(x), x) return x, mu, logvar def encode(self, x): #", "load_weights(enc_path, dec_path) Load saved encoder/decoder weights. \"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None,", "the checkpoint was saved. callbacks : list Contains molecules.utils.callback.Callback objects which are called", "self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader, callbacks, logs) for callback in callbacks: callback.on_epoch_end(epoch, logs)", "\"\"\" Load saved encoder/decoder weights. Parameters ---------- enc_path : str Path to save", "from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\"", "necessary for bad # hyperparam config such as optimizer learning rate # being", "train_loader : torch.utils.data.dataloader.DataLoader Contains training data valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data epochs", "+ log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar", "RMSprop with lr=0.001, alpha=0.9, epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss", "callback in callbacks: callback.on_train_end(logs) def _train(self, train_loader, epoch, callbacks, logs): \"\"\" Train for", "if callbacks: logs['train_loss'] = loss.item() / len(data) logs['global_step'] = (epoch - 1) *", "= torch.load(path) self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self, x): \"\"\" Embed data", "dimension (batch-size, latent_dim) Returns ------- torch.Tensor of generated matrices of shape (batch-size, input_shape)", "class VAEModel(nn.Module): def __init__(self, input_shape, hparams): super(VAEModel, self).__init__() # Select encoder/decoder models by", "decoder weights. \"\"\" self.model.save_weights(enc_path, dec_path) def load_weights(self, enc_path, dec_path): \"\"\" Load saved encoder/decoder", ": dict Filled with data for callbacks \"\"\" self.model.eval() valid_loss = 0 with", "paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 https://arxiv.org/abs/1312.6114 \"\"\" BCE =", "callbacks : list Contains molecules.utils.callback.Callback objects which are called during training. logs :", "callbacks=[]) Train model encode(x) Embed data into the latent space. decode(embedding) Generate matrices", "hyperparameters. loss: : function, optional Defines an optional loss function with inputs (recon_x,", "mu, logvar = self.model(data) valid_loss += self.loss_fnc(recon_batch, data, mu, logvar).item() valid_loss /= len(valid_loader.dataset)", "BCE + KLD # TODO: set weight initialization hparams class VAE: \"\"\" Provides", "cpu. verbose : bool True prints training and validation loss to stdout. \"\"\"", "called during training. \"\"\" if callbacks: logs = {'model': self.model, 'optimizer': self.optimizer} else:", "on validation set. Parameters ---------- valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data callbacks :", "100. * batch_idx / len(train_loader), loss.item() / len(data))) train_loss /= len(train_loader.dataset) if callbacks:", "'cpu') self.model = VAEModel(input_shape, hparams).to(self.device) # TODO: consider making optimizer_hparams a member variable", "load_weights(self, enc_path, dec_path): \"\"\" Load saved encoder/decoder weights. Parameters ---------- enc_path : str", "saved encoder/decoder weights. Parameters ---------- enc_path : str Path to save the encoder", "Note: For use with SymmetricVAE use (1, num_residues, num_residues) For use with ResnetVAE", "callback in callbacks: callback.on_batch_begin(batch_idx, epoch, logs) data = data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar", "summed over all elements and batch See Appendix B from VAE paper: Kingma", "mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) -", "encoder/decoder weights. load_weights(enc_path, dec_path) Load saved encoder/decoder weights. \"\"\" def __init__(self, input_shape, hparams=SymmetricVAEHyperparams(),", "callbacks: callback.on_epoch_begin(epoch, logs) self._train(train_loader, epoch, callbacks, logs) self._validate(valid_loader, callbacks, logs) for callback in", "encoder/decoder weights. Parameters ---------- path : str Path to checkpoint file Returns -------", "for callback in callbacks: callback.on_train_begin(logs) for epoch in range(start_epoch, epochs + 1): for", "shape of incomming data. Note: For use with SymmetricVAE use (1, num_residues, num_residues)", "start_epoch = 1 if checkpoint: start_epoch += self._load_checkpoint(checkpoint) for callback in callbacks: callback.on_train_begin(logs)", "Provides high level interface for training, testing and saving VAE models. Takes arbitrary", "to train model. Methods ------- train(train_loader, valid_loader, epochs=1, checkpoint='', callbacks=[]) Train model encode(x)", "to use cuda if it is available. False uses cpu. verbose : bool", "= vae_loss if loss is None else loss def __repr__(self): return str(self.model) def", "callbacks=[]): \"\"\" Train model Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data valid_loader", "from the epoch when the checkpoint was saved. callbacks : list Contains molecules.utils.callback.Callback", "dec_path): self.encoder.load_weights(enc_path) self.decoder.load_weights(dec_path) def vae_loss(recon_x, x, mu, logvar): \"\"\" Effects ------- Reconstruction +", "weights. Parameters ---------- path : str Path to checkpoint file Returns ------- Epoch", "if callbacks: logs = {'model': self.model, 'optimizer': self.optimizer} else: logs = {} start_epoch", "training data valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data epochs : int Number of", "self.decoder = SymmetricDecoderConv2d(input_shape, hparams, self.encoder.encoder_dim) elif isinstance(hparams, ResnetVAEHyperparams): from .resnet import ResnetEncoder, ResnetDecoder", "self._validate(valid_loader, callbacks, logs) for callback in callbacks: callback.on_epoch_end(epoch, logs) for callback in callbacks:", "high level interface for training, testing and saving VAE models. Takes arbitrary encoder/decoder", "checkpoint file containing optimizer state and encoder/decoder weights. Parameters ---------- path : str", "import ResnetEncoder, ResnetDecoder self.encoder = ResnetEncoder(input_shape, hparams) self.decoder = ResnetDecoder(input_shape, hparams) else: raise", "data.to(self.device) self.optimizer.zero_grad() recon_batch, mu, logvar = self.model(data) loss = self.loss_fnc(recon_batch, data, mu, logvar)", "callbacks: pass # TODO: add more to logs for callback in callbacks: callback.on_batch_begin(batch_idx,", "by the type of the hparams if isinstance(hparams, SymmetricVAEHyperparams): from .symmetric import SymmetricEncoderConv2d,", "\"\"\" Loads checkpoint file containing optimizer state and encoder/decoder weights. Parameters ---------- path", "callback in callbacks: callback.on_epoch_end(epoch, logs) for callback in callbacks: callback.on_train_end(logs) def _train(self, train_loader,", "in valid_loader: data = data.to(self.device) recon_batch, mu, logvar = self.model(data) valid_loss += self.loss_fnc(recon_batch,", "of incomming data. model : torch.nn.Module (VAEModel) Underlying Pytorch model with encoder/decoder attributes.", "Parameters ---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data epoch : int Current epoch", "path): \"\"\" Loads checkpoint file containing optimizer state and encoder/decoder weights. Parameters ----------", "torch.exp(0.5*logvar) eps = torch.randn_like(std) return mu + eps*std def forward(self, x): mu, logvar", "logs) if self.verbose: print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( epoch, batch_idx * len(data),", "def _train(self, train_loader, epoch, callbacks, logs): \"\"\" Train for 1 epoch Parameters ----------", "data in enumerate(train_loader): if callbacks: pass # TODO: add more to logs for", "loss: : function, optional Defines an optional loss function with inputs (recon_x, x,", "vae_loss(recon_x, x, mu, logvar): \"\"\" Effects ------- Reconstruction + KL divergence losses summed", "self.model.encoder.load_state_dict(cp['encoder_state_dict']) self.model.decoder.load_state_dict(cp['decoder_state_dict']) self.optimizer.load_state_dict(cp['optimizer_state_dict']) return cp['epoch'] def encode(self, x): \"\"\" Embed data into the", "shape of the data is square. Attributes ---------- input_shape : tuple shape of", "specifies to use cuda if it is available. False uses cpu. verbose :", "data = data.to(self.device) recon_batch, mu, logvar = self.model(data) valid_loss += self.loss_fnc(recon_batch, data, mu,", "available. False uses cpu. verbose : bool True prints training and validation loss", "for checkpoint : str Path to checkpoint file to load and resume training", "+= loss.item() self.optimizer.step() if callbacks: logs['train_loss'] = loss.item() / len(data) logs['global_step'] = (epoch", "{:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item() /", "= ResnetDecoder(input_shape, hparams) else: raise TypeError(f'Invalid hparams type: {type(hparams)}.') def reparameterize(self, mu, logvar):", "ResnetVAEHyperparams from .symmetric import SymmetricVAEHyperparams from molecules.ml.hyperparams import OptimizerHyperparams, get_optimizer __all__ = ['VAE']", "- mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2)", "input_shape : tuple shape of incomming data. Note: For use with SymmetricVAE use", "encode(self, x): # mu layer return self.encoder.encode(x) def decode(self, embedding): return self.decoder.decode(embedding) def", "F.binary_cross_entropy(recon_x, x, reduction='sum') # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)", "epsilon=1e-08, decay=0.0 self.optimizer = get_optimizer(self.model, optimizer_hparams) self.loss_fnc = vae_loss if loss is None", "TODO: consider passing in device (this will allow the ability to set the", "batch See Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes.", "---------- train_loader : torch.utils.data.dataloader.DataLoader Contains training data valid_loader : torch.utils.data.dataloader.DataLoader Contains validation data", "input_shape, hparams=SymmetricVAEHyperparams(), optimizer_hparams=OptimizerHyperparams(), loss=None, cuda=True, verbose=True): \"\"\" Parameters ---------- input_shape : tuple shape", "with encoder/decoder attributes. optimizer : torch.optim.Optimizer Pytorch optimizer used to train model. loss_func" ]
[ "w, sim in out2} assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [ ('белый',", "small_model = method(big_ft, **params) assert cosine_sim(vec0, small_model[word1]) > 0.75 out1 = small_model.most_similar(word1) assert", "(sum(x**2) * sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000,", "small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) > 0.75 out2 = small_model2.most_similar(word1) assert word2", "from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x,", "w, sim in out} def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier =", "params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)),", "dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)),", "import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return", "word1 = 'синий' word2 = 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model", "'burger', 'car', 'tree', 'city'], [1, 1, 1, 0, 0, 0] ) assert (classifier.predict(['jet',", "= 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model = method(big_ft, **params) assert", "ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out = ft.most_similar(word1) assert word2 in {w for", "for w, sim in out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) >", "sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL =", "out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'),", "word2 in {w for w, sim in out} def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load(", "def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit(", "= method(big_ft, **params) assert cosine_sim(vec0, small_model[word1]) > 0.75 out1 = small_model.most_similar(word1) assert word2", "assert word2 in {w for w, sim in out2} assert out1[0][1] == pytest.approx(out2[0][1])", "LogisticRegression() ).fit( ['banana', 'soup', 'burger', 'car', 'tree', 'city'], [1, 1, 1, 0, 0,", "('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name)", "model_name) out = ft.most_similar(word1) assert word2 in {w for w, sim in out}", "out2} assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white',", "def cosine_sim(x, y): return sum(x * y) / (sum(x**2) * sum(y**2)) ** 0.5", "BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return sum(x * y) / (sum(x**2) *", "= small_model.most_similar(word1) assert word2 in {w for w, sim in out1} small_model.save('tmp_small.bin') small_model2", "import compress_fasttext from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction import", "assert cosine_sim(vec0, small_model2[word1]) > 0.75 out2 = small_model2.most_similar(word1) assert word2 in {w for", "dict(n_components=32)), ]) def test_prune_save_load(method, params): word1 = 'синий' word2 = 'белый' big_ft =", "== pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white',", "y): return sum(x * y) / (sum(x**2) * sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params',", "assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black',", "**params) assert cosine_sim(vec0, small_model[word1]) > 0.75 out1 = small_model.most_similar(word1) assert word2 in {w", "make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup', 'burger', 'car', 'tree', 'city'], [1, 1, 1,", "new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ])", "+ model_name) out = ft.most_similar(word1) assert word2 in {w for w, sim in", "dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)),", "test_prune_save_load(method, params): word1 = 'синий' word2 = 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 =", "'soup', 'burger', 'car', 'tree', 'city'], [1, 1, 1, 0, 0, 0] ) assert", "0, 0] ) assert (classifier.predict(['jet', 'train', 'cake', 'apple']) == [0, 0, 1, 1]).all()", "pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black',", "y) / (sum(x**2) * sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq,", "sim in out2} assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [ ('белый', 'черный',", "(compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method, params): word1 = 'синий' word2 = 'белый' big_ft", "def test_loading_existing_models(word1, word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out = ft.most_similar(word1) assert", "import gensim import pytest import compress_fasttext from sklearn.pipeline import make_pipeline from sklearn.linear_model import", "['banana', 'soup', 'burger', 'car', 'tree', 'city'], [1, 1, 1, 0, 0, 0] )", "for w, sim in out2} assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [", "{w for w, sim in out2} assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name',", "* y) / (sum(x**2) * sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)),", "('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2, model_name): ft =", "compress_fasttext from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer", "gensim import pytest import compress_fasttext from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression", "in {w for w, sim in out2} assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2,", "in {w for w, sim in out} def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin'", "cosine_sim(vec0, small_model2[word1]) > 0.75 out2 = small_model2.most_similar(word1) assert word2 in {w for w,", "FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup', 'burger', 'car', 'tree', 'city'], [1, 1, 1, 0,", "method(big_ft, **params) assert cosine_sim(vec0, small_model[word1]) > 0.75 out1 = small_model.most_similar(word1) assert word2 in", "= compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out = ft.most_similar(word1) assert word2 in {w for w,", "ft.most_similar(word1) assert word2 in {w for w, sim in out} def test_sklearn_wrapper(): small_model", "= small_model2.most_similar(word1) assert word2 in {w for w, sim in out2} assert out1[0][1]", "in out2} assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1, word2, model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'),", "(compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft,", "model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out = ft.most_similar(word1) assert word2 in {w", "@pytest.mark.parametrize('word1, word2, model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'),", "@pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000,", "word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out = ft.most_similar(word1) assert word2 in", ").fit( ['banana', 'soup', 'burger', 'car', 'tree', 'city'], [1, 1, 1, 0, 0, 0]", "[ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1,", "word2 in {w for w, sim in out2} assert out1[0][1] == pytest.approx(out2[0][1]) @pytest.mark.parametrize('word1,", "'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out", "cosine_sim(x, y): return sum(x * y) / (sum(x**2) * sum(y**2)) ** 0.5 @pytest.mark.parametrize('method,", "= big_ft[word1] small_model = method(big_ft, **params) assert cosine_sim(vec0, small_model[word1]) > 0.75 out1 =", "big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model = method(big_ft, **params) assert cosine_sim(vec0, small_model[word1])", "FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return sum(x", "'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return sum(x * y) / (sum(x**2) * sum(y**2)) **", "1, 0, 0, 0] ) assert (classifier.predict(['jet', 'train', 'cake', 'apple']) == [0, 0,", "new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method, params): word1 =", "import os import gensim import pytest import compress_fasttext from sklearn.pipeline import make_pipeline from", "in out} def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline( FastTextTransformer(model=small_model),", "from sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL", "small_model2[word1]) > 0.75 out2 = small_model2.most_similar(word1) assert word2 in {w for w, sim", "gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model = method(big_ft, **params) assert cosine_sim(vec0, small_model[word1]) > 0.75", "word2 in {w for w, sim in out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert", "small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup',", "'car', 'tree', 'city'], [1, 1, 1, 0, 0, 0] ) assert (classifier.predict(['jet', 'train',", "'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2, model_name): ft", "test_loading_existing_models(word1, word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out = ft.most_similar(word1) assert word2", "compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) > 0.75 out2 = small_model2.most_similar(word1) assert word2 in {w", "[1, 1, 1, 0, 0, 0] ) assert (classifier.predict(['jet', 'train', 'cake', 'apple']) ==", "in {w for w, sim in out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0,", "'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return sum(x * y) / (sum(x**2)", "out} def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression()", "os import gensim import pytest import compress_fasttext from sklearn.pipeline import make_pipeline from sklearn.linear_model", "from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE", "test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana',", "make_pipeline from sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin')", "word2, model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ])", "new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def", "]) def test_loading_existing_models(word1, word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out = ft.most_similar(word1)", "new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method, params): word1", "'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL", "assert word2 in {w for w, sim in out} def test_sklearn_wrapper(): small_model =", "'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model = method(big_ft, **params) assert cosine_sim(vec0,", "assert cosine_sim(vec0, small_model[word1]) > 0.75 out1 = small_model.most_similar(word1) assert word2 in {w for", "params): word1 = 'синий' word2 = 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1]", "(compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method,", "1, 1, 0, 0, 0] ) assert (classifier.predict(['jet', 'train', 'cake', 'apple']) == [0,", "/ (sum(x**2) * sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False,", "sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq,", "sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE =", "out1 = small_model.most_similar(word1) assert word2 in {w for w, sim in out1} small_model.save('tmp_small.bin')", "model_name', [ ('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def", "'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out =", "= compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup', 'burger',", "= gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model = method(big_ft, **params) assert cosine_sim(vec0, small_model[word1]) >", "small_model2.most_similar(word1) assert word2 in {w for w, sim in out2} assert out1[0][1] ==", "'city'], [1, 1, 1, 0, 0, 0] ) assert (classifier.predict(['jet', 'train', 'cake', 'apple'])", "dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method, params): word1 = 'синий' word2 =", "def test_prune_save_load(method, params): word1 = 'синий' word2 = 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0", "0.75 out2 = small_model2.most_similar(word1) assert word2 in {w for w, sim in out2}", "compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name) out = ft.most_similar(word1) assert word2 in {w for w, sim", "big_ft[word1] small_model = method(big_ft, **params) assert cosine_sim(vec0, small_model[word1]) > 0.75 out1 = small_model.most_similar(word1)", "small_model[word1]) > 0.75 out1 = small_model.most_similar(word1) assert word2 in {w for w, sim", "'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup', 'burger', 'car', 'tree',", "(compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000,", "* sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)),", "out2 = small_model2.most_similar(word1) assert word2 in {w for w, sim in out2} assert", ") classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup', 'burger', 'car', 'tree', 'city'],", "= os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return sum(x * y)", "sim in out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) > 0.75 out2", "= 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return sum(x * y) / (sum(x**2) * sum(y**2))", "'синий' word2 = 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model = method(big_ft,", "cosine_sim(vec0, small_model[word1]) > 0.75 out1 = small_model.most_similar(word1) assert word2 in {w for w,", "{w for w, sim in out} def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' )", "vec0 = big_ft[word1] small_model = method(big_ft, **params) assert cosine_sim(vec0, small_model[word1]) > 0.75 out1", "w, sim in out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) > 0.75", "dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method, params):", "sum(x * y) / (sum(x**2) * sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft,", "for w, sim in out} def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier", "{w for w, sim in out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1])", "out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) > 0.75 out2 = small_model2.most_similar(word1)", "return sum(x * y) / (sum(x**2) * sum(y**2)) ** 0.5 @pytest.mark.parametrize('method, params', [", "assert word2 in {w for w, sim in out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin')", "LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def", "compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup', 'burger', 'car',", "in out1} small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) > 0.75 out2 =", "= ft.most_similar(word1) assert word2 in {w for w, sim in out} def test_sklearn_wrapper():", "[ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)), (compress_fasttext.prune_ft,", "os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return sum(x * y) /", "> 0.75 out2 = small_model2.most_similar(word1) assert word2 in {w for w, sim in", "= compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) > 0.75 out2 = small_model2.most_similar(word1) assert word2 in", "out = ft.most_similar(word1) assert word2 in {w for w, sim in out} def", "word2 = 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model = method(big_ft, **params)", "> 0.75 out1 = small_model.most_similar(word1) assert word2 in {w for w, sim in", "'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2, model_name): ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL +", "0.75 out1 = small_model.most_similar(word1) assert word2 in {w for w, sim in out1}", "'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2, model_name):", "'tree', 'city'], [1, 1, 1, 0, 0, 0] ) assert (classifier.predict(['jet', 'train', 'cake',", "new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method, params): word1 = 'синий' word2 = 'белый'", "0, 0, 0] ) assert (classifier.predict(['jet', 'train', 'cake', 'apple']) == [0, 0, 1,", "= make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup', 'burger', 'car', 'tree', 'city'], [1, 1,", "small_model.most_similar(word1) assert word2 in {w for w, sim in out1} small_model.save('tmp_small.bin') small_model2 =", "(compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method, params): word1 = 'синий' word2", "import pytest import compress_fasttext from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from", "BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y): return sum(x *", "small_model.save('tmp_small.bin') small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin') assert cosine_sim(vec0, small_model2[word1]) > 0.75 out2 = small_model2.most_similar(word1) assert", "= 'синий' word2 = 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE) vec0 = big_ft[word1] small_model =", "('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'), ('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'), ('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'), ]) def test_loading_existing_models(word1, word2,", "import make_pipeline from sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)),", "sim in out} def test_sklearn_wrapper(): small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load( 'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin' ) classifier = make_pipeline(", "import LogisticRegression from compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/'", "** 0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True,", "classifier = make_pipeline( FastTextTransformer(model=small_model), LogisticRegression() ).fit( ['banana', 'soup', 'burger', 'car', 'tree', 'city'], [1,", "0.5 @pytest.mark.parametrize('method, params', [ (compress_fasttext.quantize_ft, dict(qdim=32)), (compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000,", "qdim=16)), (compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)), (compress_fasttext.svd_ft, dict(n_components=32)), ]) def test_prune_save_load(method, params): word1 = 'синий'", "]) def test_prune_save_load(method, params): word1 = 'синий' word2 = 'белый' big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE)", "compress_fasttext.feature_extraction import FastTextTransformer BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin') BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/' def cosine_sim(x, y):", "pytest import compress_fasttext from sklearn.pipeline import make_pipeline from sklearn.linear_model import LogisticRegression from compress_fasttext.feature_extraction" ]
[ "print 'ODD NEGATIVE' else: if x % 2 == 0: print 'EVEN POSITIVE'", "x < 0: if x % 2 == 0: print 'EVEN NEGATIVE' else:", "if x % 2 == 0: print 'EVEN NEGATIVE' else: print 'ODD NEGATIVE'", "0: print 'EVEN NEGATIVE' else: print 'ODD NEGATIVE' else: if x % 2", "% 2 == 0: print 'EVEN NEGATIVE' else: print 'ODD NEGATIVE' else: if", "-*- coding: utf-8 -*- for i in range(int(raw_input())): x = int(raw_input()) if x", "x % 2 == 0: print 'EVEN NEGATIVE' else: print 'ODD NEGATIVE' else:", "in range(int(raw_input())): x = int(raw_input()) if x == 0: print 'NULL' elif x", "i in range(int(raw_input())): x = int(raw_input()) if x == 0: print 'NULL' elif", "for i in range(int(raw_input())): x = int(raw_input()) if x == 0: print 'NULL'", "'NULL' elif x < 0: if x % 2 == 0: print 'EVEN", "else: if x % 2 == 0: print 'EVEN POSITIVE' else: print 'ODD", "print 'NULL' elif x < 0: if x % 2 == 0: print", "'EVEN NEGATIVE' else: print 'ODD NEGATIVE' else: if x % 2 == 0:", "elif x < 0: if x % 2 == 0: print 'EVEN NEGATIVE'", "# -*- coding: utf-8 -*- for i in range(int(raw_input())): x = int(raw_input()) if", "= int(raw_input()) if x == 0: print 'NULL' elif x < 0: if", "== 0: print 'EVEN NEGATIVE' else: print 'ODD NEGATIVE' else: if x %", "print 'EVEN NEGATIVE' else: print 'ODD NEGATIVE' else: if x % 2 ==", "else: print 'ODD NEGATIVE' else: if x % 2 == 0: print 'EVEN", "if x % 2 == 0: print 'EVEN POSITIVE' else: print 'ODD POSITIVE'", "'ODD NEGATIVE' else: if x % 2 == 0: print 'EVEN POSITIVE' else:", "int(raw_input()) if x == 0: print 'NULL' elif x < 0: if x", "range(int(raw_input())): x = int(raw_input()) if x == 0: print 'NULL' elif x <", "0: print 'NULL' elif x < 0: if x % 2 == 0:", "0: if x % 2 == 0: print 'EVEN NEGATIVE' else: print 'ODD", "utf-8 -*- for i in range(int(raw_input())): x = int(raw_input()) if x == 0:", "== 0: print 'NULL' elif x < 0: if x % 2 ==", "x = int(raw_input()) if x == 0: print 'NULL' elif x < 0:", "2 == 0: print 'EVEN NEGATIVE' else: print 'ODD NEGATIVE' else: if x", "if x == 0: print 'NULL' elif x < 0: if x %", "-*- for i in range(int(raw_input())): x = int(raw_input()) if x == 0: print", "NEGATIVE' else: print 'ODD NEGATIVE' else: if x % 2 == 0: print", "coding: utf-8 -*- for i in range(int(raw_input())): x = int(raw_input()) if x ==", "x == 0: print 'NULL' elif x < 0: if x % 2", "NEGATIVE' else: if x % 2 == 0: print 'EVEN POSITIVE' else: print", "< 0: if x % 2 == 0: print 'EVEN NEGATIVE' else: print" ]
[ "print(configs) py_files = ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs))", "'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n',", "'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n',", "'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n',", "'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n',", "= ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n',", "'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines = set(map(lambda x: x.strip(), lines)) exp_lines =", "yaml from pyreqgen.ReqParser import * class TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\", 'r+') as", "exp_lines = set(map(lambda x: x.strip(), exp_lines)) self.assertEqual(lines, exp_lines) if __name__ == \"__main__\": unittest.main()", "= yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+')", "Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as req_f:", "as req_f: lines = req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n',", "'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n']", "ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as req_f: lines", "= set(map(lambda x: x.strip(), lines)) exp_lines = set(map(lambda x: x.strip(), exp_lines)) self.assertEqual(lines, exp_lines)", "* class TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\", 'r+') as config_f: configs = yaml.load(config_f,", "with open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as req_f: lines =", "req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n',", "set(map(lambda x: x.strip(), lines)) exp_lines = set(map(lambda x: x.strip(), exp_lines)) self.assertEqual(lines, exp_lines) if", "'re\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines = set(map(lambda x:", "'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n',", "open(\"../config.yaml\", 'r+') as config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs) reqs", "def test_files(self): with open(\"../config.yaml\", 'r+') as config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files", "lines = set(map(lambda x: x.strip(), lines)) exp_lines = set(map(lambda x: x.strip(), exp_lines)) self.assertEqual(lines,", "'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n',", "yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as", "print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n',", "from pyreqgen.ReqParser import * class TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\", 'r+') as config_f:", "req_f: lines = req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n',", "import unittest, yaml from pyreqgen.ReqParser import * class TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\",", "x.strip(), lines)) exp_lines = set(map(lambda x: x.strip(), exp_lines)) self.assertEqual(lines, exp_lines) if __name__ ==", "'xgboost\\n'] lines = set(map(lambda x: x.strip(), lines)) exp_lines = set(map(lambda x: x.strip(), exp_lines))", "'w+') as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as req_f: lines = req_f.readlines() print(lines.sort())", "'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n',", "class TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\", 'r+') as config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader)", "ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\")", "import * class TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\", 'r+') as config_f: configs =", "'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines = set(map(lambda x: x.strip(), lines)) exp_lines", "with open(\"../config.yaml\", 'r+') as config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs)", "'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines", "x: x.strip(), lines)) exp_lines = set(map(lambda x: x.strip(), exp_lines)) self.assertEqual(lines, exp_lines) if __name__", "'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n',", "['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n',", "'r+') as config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs) reqs =", "reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as", "'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n',", "'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n',", "config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with", "'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n',", "'warnings\\n', 'xgboost\\n'] lines = set(map(lambda x: x.strip(), lines)) exp_lines = set(map(lambda x: x.strip(),", "\"r\") as req_f: lines = req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n',", "lines = req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n',", "'typing\\n', 'warnings\\n', 'xgboost\\n'] lines = set(map(lambda x: x.strip(), lines)) exp_lines = set(map(lambda x:", "as config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files)", "req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as req_f: lines = req_f.readlines() print(lines.sort()) exp_lines =", "configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files = ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\",", "'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines =", "unittest, yaml from pyreqgen.ReqParser import * class TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\", 'r+')", "= ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\",", "open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as req_f: lines = req_f.readlines()", "'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines = set(map(lambda x: x.strip(), lines)) exp_lines = set(map(lambda", "open(\"requirements.txt\", \"r\") as req_f: lines = req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n',", "py_files = ReqParser.__get_py_files(configs) reqs = ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs)) with", "pyreqgen.ReqParser import * class TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\", 'r+') as config_f: configs", "lines)) exp_lines = set(map(lambda x: x.strip(), exp_lines)) self.assertEqual(lines, exp_lines) if __name__ == \"__main__\":", "with open(\"requirements.txt\", \"r\") as req_f: lines = req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n',", "as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as req_f: lines = req_f.readlines() print(lines.sort()) exp_lines", "'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n',", "TestParser(unittest.TestCase): def test_files(self): with open(\"../config.yaml\", 'r+') as config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs)", "req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as req_f: lines = req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n',", "'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n',", "'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines = set(map(lambda x: x.strip(),", "exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n', 'gc\\n', 'itertools\\n', 'joblib\\n',", "= req_f.readlines() print(lines.sort()) exp_lines = ['IPython\\n', 'altair\\n', 'bayes_opt\\n', 'catboost\\n', 'category_encoders\\n', 'collections\\n', 'datetime\\n', 'eli5\\n',", "test_files(self): with open(\"../config.yaml\", 'r+') as config_f: configs = yaml.load(config_f, Loader=yaml.FullLoader) print(configs) py_files =", "'pandas\\n', 're\\n', 'seaborn\\n', 'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines = set(map(lambda", "'gc\\n', 'itertools\\n', 'joblib\\n', 'json\\n', 'lightgbm\\n', 'matplotlib\\n', 'networkx\\n', 'numba\\n', 'numpy\\n', 'os\\n', 'pandas\\n', 're\\n', 'seaborn\\n',", "= ReqParser.__get_reqs(py_files) with open(\"requirements.txt\", 'w+') as req_f: req_f.write('\\n'.join(reqs)) with open(\"requirements.txt\", \"r\") as req_f:", "'shap\\n', 'sklearn\\n', 'time\\n', 'tqdm\\n', 'typing\\n', 'warnings\\n', 'xgboost\\n'] lines = set(map(lambda x: x.strip(), lines))" ]
[ "None otherwise. \"\"\" action_name = action_name.lower(); lookup = { 'build': self.buildAction, 'test': self.testAction,", "os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml", "import os import sys import xml.etree.ElementTree as xml from ..Helpers import path_helper from", "return self.contents != None; def actionLookup(self, action_name): \"\"\" This method returns the method", "def __eq__(self, other): return isinstance(other, xcscheme) and self.name == other.name and self.path.root_path ==", "type, otherwise None. \"\"\" action = None; if self.isValid(): action = filter(lambda action:", "''; self.path = path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try: self.contents", "import AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path):", "%s)' % (type(self), self.name, self.path); else: return '(%s : INVALID OBJECT)' % (type(self));", "Returns the 'profile' action for this scheme. \"\"\" action = None; if self.isValid():", "'analyze' action for this scheme. \"\"\" action = None; if self.isValid(): action =", "action = BuildAction(self.getAction('BuildAction')); return action; def testAction(self, container): \"\"\" Returns the 'test' action", "and self.name == other.name and self.path.root_path == other.path.root_path; def __hash__(self): return hash(self.__attrs()); def", "file!'); def __repr__(self): if self.isValid(): return '(%s : %s : %s)' % (type(self),", "the passed action type, otherwise None. \"\"\" action = None; if self.isValid(): action", "other): return isinstance(other, xcscheme) and self.name == other.name and self.path.root_path == other.path.root_path; def", "if self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self, container): \"\"\" Returns the", "This method returns the method for the passed action type, None otherwise. \"\"\"", "buildAction(self, container): \"\"\" Returns the 'build' action for this scheme. \"\"\" action =", "def testAction(self, container): \"\"\" Returns the 'test' action for this scheme. \"\"\" action", "\"%s\" is not an xcscheme file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does", "def actionLookup(self, action_name): \"\"\" This method returns the method for the passed action", "scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid() == True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]:", "scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid() == True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file", "self.shared = False; self.container = ''; self.path = path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0];", "action.tag == action_type, list(self.contents.getroot()))[0]; return action; def buildAction(self, container): \"\"\" Returns the 'build'", "analyzeAction(self, container): \"\"\" Returns the 'analyze' action for this scheme. \"\"\" action =", "path): self.shared = False; self.container = ''; self.path = path_helper(path, ''); self.name =", "__hash__(self): return hash(self.__attrs()); def isValid(self): return self.contents != None; def actionLookup(self, action_name): \"\"\"", "None; if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self,", "'archive': self.archiveAction }; action = None; if action_name in lookup.keys(): action = lookup[action_name];", "action: action.tag == action_type, list(self.contents.getroot()))[0]; return action; def buildAction(self, container): \"\"\" Returns the", "BuildAction(self.getAction('BuildAction')) return action; def launchAction(self, container): \"\"\" Returns the 'launch' action for this", "scheme_file_path = os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml =", "file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not exist!' % dir_path); return", "not exist!' % dir_path); return schemes; class xcscheme(object): def __init__(self, path): self.shared =", "'(%s : %s : %s)' % (type(self), self.name, self.path); else: return '(%s :", "self.isValid(): action = filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0]; return action; def buildAction(self,", "action; def analyzeAction(self, container): \"\"\" Returns the 'analyze' action for this scheme. \"\"\"", "action for this scheme. \"\"\" action = None; if self.isValid(): action = BuildAction(self.getAction('BuildAction'));", "in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path):", "from ..Helpers import path_helper from ..Helpers import xcrun_helper from ..Helpers import logging_helper from", "os.path.exists(dir_path) == True: for scheme_file in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file); if not", "action; def profileAction(self, container): \"\"\" Returns the 'profile' action for this scheme. \"\"\"", "profileAction(self, container): \"\"\" Returns the 'profile' action for this scheme. \"\"\" action =", "actionLookup(self, action_name): \"\"\" This method returns the method for the passed action type,", "action = None; if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return action;", "the 'analyze' action for this scheme. \"\"\" action = None; if self.isValid(): action", "otherwise None. \"\"\" action = None; if self.isValid(): action = filter(lambda action: action.tag", "None; if self.isValid(): action = filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0]; return action;", "self.path); def __eq__(self, other): return isinstance(other, xcscheme) and self.name == other.name and self.path.root_path", "\"\"\" Returns the 'analyze' action for this scheme. \"\"\" action = None; if", "None; try: self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not load contents of xcscheme", "path does not exist!' % dir_path); return schemes; class xcscheme(object): def __init__(self, path):", "scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an xcscheme file!' % scheme_file_path); else:", "action for this scheme. \"\"\" action = None; if self.isValid(): action = LaunchAction(self.getAction('LaunchAction'));", "passed action type, None otherwise. \"\"\" action_name = action_name.lower(); lookup = { 'build':", "action; def launchAction(self, container): \"\"\" Returns the 'launch' action for this scheme. \"\"\"", "\"\"\" action = None; if self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self,", "archiveAction(self, container): \"\"\" Returns the 'archive' action for this scheme. \"\"\" action =", "import xml.etree.ElementTree as xml from ..Helpers import path_helper from ..Helpers import xcrun_helper from", "xcscheme(scheme_file_path); if scheme_xml.isValid() == True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path", "!= None; def actionLookup(self, action_name): \"\"\" This method returns the method for the", "def __attrs(self): return (self.name, self.path); def __eq__(self, other): return isinstance(other, xcscheme) and self.name", "if self.isValid(): action = TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def launchAction(self, container):", "def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return", "of xcscheme file!'); def __repr__(self): if self.isValid(): return '(%s : %s : %s)'", "= None; try: self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not load contents of", "(self.name, self.path); def __eq__(self, other): return isinstance(other, xcscheme) and self.name == other.name and", "def __init__(self, path): self.shared = False; self.container = ''; self.path = path_helper(path, '');", "% scheme_file_path); else: # skipping the known management file if scheme_file != 'xcschememanagement.plist':", "from .XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction from", "None; if self.isValid(): action = BuildAction(self.getAction('BuildAction')); return action; def testAction(self, container): \"\"\" Returns", "def __hash__(self): return hash(self.__attrs()); def isValid(self): return self.contents != None; def actionLookup(self, action_name):", "class xcscheme(object): def __init__(self, path): self.shared = False; self.container = ''; self.path =", "os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def", "import LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction import", "'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path):", "AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self, container): \"\"\" Returns the 'archive'", "os import sys import xml.etree.ElementTree as xml from ..Helpers import path_helper from ..Helpers", "and self.path.root_path == other.path.root_path; def __hash__(self): return hash(self.__attrs()); def isValid(self): return self.contents !=", "if self.isValid(): return '(%s : %s : %s)' % (type(self), self.name, self.path); else:", "self.archiveAction }; action = None; if action_name in lookup.keys(): action = lookup[action_name]; return", "action = None; if self.isValid(): action = filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0];", "logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not exist!' % dir_path); return schemes; class xcscheme(object): def", "return '(%s : INVALID OBJECT)' % (type(self)); def __attrs(self): return (self.name, self.path); def", "scheme_file); if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid()", "action_name.lower(); lookup = { 'build': self.buildAction, 'test': self.testAction, 'launch': self.launchAction, 'profile': self.profileAction, 'analyze':", "ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self, container): \"\"\" Returns the 'analyze' action for this", "self.analyzeAction, 'archive': self.archiveAction }; action = None; if action_name in lookup.keys(): action =", "= None; if action_name in lookup.keys(): action = lookup[action_name]; return action; def getAction(self,", "None; if self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self, container): \"\"\" Returns", "return '(%s : %s : %s)' % (type(self), self.name, self.path); else: return '(%s", "return (self.name, self.path); def __eq__(self, other): return isinstance(other, xcscheme) and self.name == other.name", "type, None otherwise. \"\"\" action_name = action_name.lower(); lookup = { 'build': self.buildAction, 'test':", "return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes = []; if os.path.exists(dir_path) == True: for", "import path_helper from ..Helpers import xcrun_helper from ..Helpers import logging_helper from .XCSchemeActions.BuildAction import", "return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes = [];", "'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an xcscheme file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\"", "= None; if self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self, container): \"\"\"", "the 'test' action for this scheme. \"\"\" action = None; if self.isValid(): action", "This method returns all the object for the passed action type, otherwise None.", "self.container = ''; self.path = path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents = None;", "True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path \"%s\"' % scheme_file_path); else:", "file at path \"%s\"' % scheme_file_path); else: # skipping the known management file", "self.testAction, 'launch': self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction }; action = None;", "sys import xml.etree.ElementTree as xml from ..Helpers import path_helper from ..Helpers import xcrun_helper", "other.name and self.path.root_path == other.path.root_path; def __hash__(self): return hash(self.__attrs()); def isValid(self): return self.contents", "import TestAction from .XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction import", "the 'build' action for this scheme. \"\"\" action = None; if self.isValid(): action", "scheme. \"\"\" action = None; if self.isValid(): action = TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction'))", "XCSchemeParseDirectory(dir_path): schemes = []; if os.path.exists(dir_path) == True: for scheme_file in os.listdir(dir_path): scheme_file_path", "\"\"\" Returns the 'archive' action for this scheme. \"\"\" action = None; if", "lookup = { 'build': self.buildAction, 'test': self.testAction, 'launch': self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction,", "= BuildAction(self.getAction('BuildAction')) return action; def launchAction(self, container): \"\"\" Returns the 'launch' action for", "import ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def", "scheme. \"\"\" action = None; if self.isValid(): action = BuildAction(self.getAction('BuildAction')); return action; def", "self.path = path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try: self.contents =", "Returns the 'test' action for this scheme. \"\"\" action = None; if self.isValid():", "path \"%s\"' % scheme_file_path); else: # skipping the known management file if scheme_file", "xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not load contents of xcscheme file!'); def __repr__(self): if", "'(%s : INVALID OBJECT)' % (type(self)); def __attrs(self): return (self.name, self.path); def __eq__(self,", "isValid(self): return self.contents != None; def actionLookup(self, action_name): \"\"\" This method returns the", "container): \"\"\" Returns the 'profile' action for this scheme. \"\"\" action = None;", "TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def launchAction(self, container): \"\"\" Returns the 'launch'", "XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path,", "as xml from ..Helpers import path_helper from ..Helpers import xcrun_helper from ..Helpers import", "from .XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction def", "= TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def launchAction(self, container): \"\"\" Returns the", "import logging_helper from .XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction import", "schemes; class xcscheme(object): def __init__(self, path): self.shared = False; self.container = ''; self.path", "action_type, list(self.contents.getroot()))[0]; return action; def buildAction(self, container): \"\"\" Returns the 'build' action for", "def analyzeAction(self, container): \"\"\" Returns the 'analyze' action for this scheme. \"\"\" action", "'profile' action for this scheme. \"\"\" action = None; if self.isValid(): action =", "ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path,", "scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid() == True: schemes.append(scheme_xml);", "action = None; if self.isValid(): action = BuildAction(self.getAction('BuildAction')); return action; def testAction(self, container):", "os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if", "= None; if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def", "= lookup[action_name]; return action; def getAction(self, action_type): \"\"\" This method returns all the", "xcscheme file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not exist!' % dir_path);", "= path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try: self.contents = xml.parse(self.path.obj_path);", "% (type(self)); def __attrs(self): return (self.name, self.path); def __eq__(self, other): return isinstance(other, xcscheme)", "the passed action type, None otherwise. \"\"\" action_name = action_name.lower(); lookup = {", "= BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self, container): \"\"\" Returns the 'archive' action for", "..Helpers import logging_helper from .XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction", "# skipping the known management file if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is", "return action; def profileAction(self, container): \"\"\" Returns the 'profile' action for this scheme.", "None; if action_name in lookup.keys(): action = lookup[action_name]; return action; def getAction(self, action_type):", "xml.etree.ElementTree as xml from ..Helpers import path_helper from ..Helpers import xcrun_helper from ..Helpers", "os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes = []; if", "if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an xcscheme file!' % scheme_file_path);", ".XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction", "os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def", "the 'launch' action for this scheme. \"\"\" action = None; if self.isValid(): action", "self.isValid(): action = BuildAction(self.getAction('BuildAction')); return action; def testAction(self, container): \"\"\" Returns the 'test'", "for this scheme. \"\"\" action = None; if self.isValid(): action = ArchiveAction(self.getAction('ArchiveAction')); action.root", "True: for scheme_file in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.') and", "self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try: self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could", ".XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction", "method returns all the object for the passed action type, otherwise None. \"\"\"", "management file if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an xcscheme file!'", "os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes = []; if os.path.exists(dir_path) == True: for scheme_file", "return action; def getAction(self, action_type): \"\"\" This method returns all the object for", "self.path.root_path == other.path.root_path; def __hash__(self): return hash(self.__attrs()); def isValid(self): return self.contents != None;", "Returns the 'analyze' action for this scheme. \"\"\" action = None; if self.isValid():", "the 'profile' action for this scheme. \"\"\" action = None; if self.isValid(): action", "object for the passed action type, otherwise None. \"\"\" action = None; if", "== other.path.root_path; def __hash__(self): return hash(self.__attrs()); def isValid(self): return self.contents != None; def", "self.name, self.path); else: return '(%s : INVALID OBJECT)' % (type(self)); def __attrs(self): return", "self.path); else: return '(%s : INVALID OBJECT)' % (type(self)); def __attrs(self): return (self.name,", "os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try: self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not load", "\"\"\" action = None; if self.isValid(): action = filter(lambda action: action.tag == action_type,", "= action_name.lower(); lookup = { 'build': self.buildAction, 'test': self.testAction, 'launch': self.launchAction, 'profile': self.profileAction,", "return isinstance(other, xcscheme) and self.name == other.name and self.path.root_path == other.path.root_path; def __hash__(self):", ": INVALID OBJECT)' % (type(self)); def __attrs(self): return (self.name, self.path); def __eq__(self, other):", "= None; if self.isValid(): action = TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def", "and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid() == True: schemes.append(scheme_xml); else:", "scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not exist!' % dir_path); return schemes; class", "hash(self.__attrs()); def isValid(self): return self.contents != None; def actionLookup(self, action_name): \"\"\" This method", "def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes = []; if os.path.exists(dir_path) ==", "logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an xcscheme file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path", "= os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path);", "from ..Helpers import logging_helper from .XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction import TestAction from", "XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes = []; if os.path.exists(dir_path) == True:", "exist!' % dir_path); return schemes; class xcscheme(object): def __init__(self, path): self.shared = False;", "logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path \"%s\"' % scheme_file_path); else: # skipping the", "\"%s\"' % scheme_file_path); else: # skipping the known management file if scheme_file !=", "% (type(self), self.name, self.path); else: return '(%s : INVALID OBJECT)' % (type(self)); def", "if self.isValid(): action = filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0]; return action; def", "\"\"\" Returns the 'launch' action for this scheme. \"\"\" action = None; if", "self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self, container): \"\"\"", "AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return", "self.isValid(): return '(%s : %s : %s)' % (type(self), self.name, self.path); else: return", "= filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0]; return action; def buildAction(self, container): \"\"\"", "passed action type, otherwise None. \"\"\" action = None; if self.isValid(): action =", "\"\"\" action = None; if self.isValid(): action = ArchiveAction(self.getAction('ArchiveAction')); action.root = BuildAction(self.getAction('BuildAction')) return", "container): \"\"\" Returns the 'launch' action for this scheme. \"\"\" action = None;", "action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self, container): \"\"\" Returns", "schemes = []; if os.path.exists(dir_path) == True: for scheme_file in os.listdir(dir_path): scheme_file_path =", "path_helper from ..Helpers import xcrun_helper from ..Helpers import logging_helper from .XCSchemeActions.BuildAction import BuildAction", "return hash(self.__attrs()); def isValid(self): return self.contents != None; def actionLookup(self, action_name): \"\"\" This", "return schemes; class xcscheme(object): def __init__(self, path): self.shared = False; self.container = '';", "for the passed action type, None otherwise. \"\"\" action_name = action_name.lower(); lookup =", "container): \"\"\" Returns the 'analyze' action for this scheme. \"\"\" action = None;", "getAction(self, action_type): \"\"\" This method returns all the object for the passed action", "XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes =", "__repr__(self): if self.isValid(): return '(%s : %s : %s)' % (type(self), self.name, self.path);", "xcscheme(object): def __init__(self, path): self.shared = False; self.container = ''; self.path = path_helper(path,", "if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self, container):", "= ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self, container): \"\"\" Returns the 'analyze' action for", "if scheme_xml.isValid() == True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path \"%s\"'", "'test' action for this scheme. \"\"\" action = None; if self.isValid(): action =", "ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path):", "action = None; if self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self, container):", "for this scheme. \"\"\" action = None; if self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return", "not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid() == True:", "def archiveAction(self, container): \"\"\" Returns the 'archive' action for this scheme. \"\"\" action", "this scheme. \"\"\" action = None; if self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return action;", "for this scheme. \"\"\" action = None; if self.isValid(): action = TestAction(self.getAction('TestAction')); action.root", "return action; def archiveAction(self, container): \"\"\" Returns the 'archive' action for this scheme.", "scheme_file in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and", ".XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata'));", "if self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self, container): \"\"\" Returns the", "self.contents != None; def actionLookup(self, action_name): \"\"\" This method returns the method for", "for this scheme. \"\"\" action = None; if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root", "list(self.contents.getroot()))[0]; return action; def buildAction(self, container): \"\"\" Returns the 'build' action for this", "..Helpers import xcrun_helper from ..Helpers import logging_helper from .XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction", "return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/');", "xcscheme file!'); def __repr__(self): if self.isValid(): return '(%s : %s : %s)' %", "def profileAction(self, container): \"\"\" Returns the 'profile' action for this scheme. \"\"\" action", "= None; if self.isValid(): action = BuildAction(self.getAction('BuildAction')); return action; def testAction(self, container): \"\"\"", "if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme') and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid() ==", "the known management file if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an", "the object for the passed action type, otherwise None. \"\"\" action = None;", "this scheme. \"\"\" action = None; if self.isValid(): action = ArchiveAction(self.getAction('ArchiveAction')); action.root =", "== True: for scheme_file in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.')", "'analyze': self.analyzeAction, 'archive': self.archiveAction }; action = None; if action_name in lookup.keys(): action", "= ''; self.path = path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try:", "Returns the 'build' action for this scheme. \"\"\" action = None; if self.isValid():", "\"\"\" action = None; if self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self,", "dir_path); return schemes; class xcscheme(object): def __init__(self, path): self.shared = False; self.container =", "LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction", "!= 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an xcscheme file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]:", "isinstance(other, xcscheme) and self.name == other.name and self.path.root_path == other.path.root_path; def __hash__(self): return", "= None; if self.isValid(): action = filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0]; return", "returns all the object for the passed action type, otherwise None. \"\"\" action", "action = ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self, container): \"\"\" Returns the 'analyze' action", "from .XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata'));", "\"\"\" action_name = action_name.lower(); lookup = { 'build': self.buildAction, 'test': self.testAction, 'launch': self.launchAction,", "\"\"\" Returns the 'build' action for this scheme. \"\"\" action = None; if", "from ..Helpers import xcrun_helper from ..Helpers import logging_helper from .XCSchemeActions.BuildAction import BuildAction from", "in lookup.keys(): action = lookup[action_name]; return action; def getAction(self, action_type): \"\"\" This method", "action for this scheme. \"\"\" action = None; if self.isValid(): action = ProfileAction(self.getAction('ProfileAction'));", "None; if self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self, container): \"\"\" Returns", "= None; if self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self, container): \"\"\"", "\"\"\" action = None; if self.isValid(): action = BuildAction(self.getAction('BuildAction')); return action; def testAction(self,", "__attrs(self): return (self.name, self.path); def __eq__(self, other): return isinstance(other, xcscheme) and self.name ==", "import BuildAction from .XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction import", "== action_type, list(self.contents.getroot()))[0]; return action; def buildAction(self, container): \"\"\" Returns the 'build' action", "Returns the 'launch' action for this scheme. \"\"\" action = None; if self.isValid():", "def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return", "None. \"\"\" action = None; if self.isValid(): action = filter(lambda action: action.tag ==", "at path \"%s\"' % scheme_file_path); else: # skipping the known management file if", "not an xcscheme file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not exist!'", "action; def getAction(self, action_type): \"\"\" This method returns all the object for the", "is not an xcscheme file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not", "except: logging_helper.getLogger().error('[xcscheme]: Could not load contents of xcscheme file!'); def __repr__(self): if self.isValid():", "does not exist!' % dir_path); return schemes; class xcscheme(object): def __init__(self, path): self.shared", "= BuildAction(self.getAction('BuildAction')); return action; def testAction(self, container): \"\"\" Returns the 'test' action for", "action.root = BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self, container): \"\"\" Returns the 'archive' action", "def getAction(self, action_type): \"\"\" This method returns all the object for the passed", "if os.path.exists(dir_path) == True: for scheme_file in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file); if", "scheme file at path \"%s\"' % scheme_file_path); else: # skipping the known management", "LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self, container): \"\"\" Returns the 'profile' action for this", "(type(self)); def __attrs(self): return (self.name, self.path); def __eq__(self, other): return isinstance(other, xcscheme) and", "Could not load contents of xcscheme file!'); def __repr__(self): if self.isValid(): return '(%s", ": %s : %s)' % (type(self), self.name, self.path); else: return '(%s : INVALID", ".XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def", "self.contents = None; try: self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not load contents", "scheme. \"\"\" action = None; if self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return action; def", "BuildAction(self.getAction('BuildAction')); return action; def testAction(self, container): \"\"\" Returns the 'test' action for this", "else: return '(%s : INVALID OBJECT)' % (type(self)); def __attrs(self): return (self.name, self.path);", "[]; if os.path.exists(dir_path) == True: for scheme_file in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file);", "action type, otherwise None. \"\"\" action = None; if self.isValid(): action = filter(lambda", "}; action = None; if action_name in lookup.keys(): action = lookup[action_name]; return action;", "BuildAction from .XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction", "..Helpers import path_helper from ..Helpers import xcrun_helper from ..Helpers import logging_helper from .XCSchemeActions.BuildAction", "launchAction(self, container): \"\"\" Returns the 'launch' action for this scheme. \"\"\" action =", "action = None; if action_name in lookup.keys(): action = lookup[action_name]; return action; def", "self.isValid(): action = ProfileAction(self.getAction('ProfileAction')); return action; def analyzeAction(self, container): \"\"\" Returns the 'analyze'", "BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self, container): \"\"\" Returns the 'archive' action for this", "== other.name and self.path.root_path == other.path.root_path; def __hash__(self): return hash(self.__attrs()); def isValid(self): return", "action_name in lookup.keys(): action = lookup[action_name]; return action; def getAction(self, action_type): \"\"\" This", "action = LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self, container): \"\"\" Returns the 'profile' action", "container): \"\"\" Returns the 'build' action for this scheme. \"\"\" action = None;", "for this scheme. \"\"\" action = None; if self.isValid(): action = BuildAction(self.getAction('BuildAction')); return", "method for the passed action type, None otherwise. \"\"\" action_name = action_name.lower(); lookup", "action.root = BuildAction(self.getAction('BuildAction')) return action; def launchAction(self, container): \"\"\" Returns the 'launch' action", "None; def actionLookup(self, action_name): \"\"\" This method returns the method for the passed", "return action; def testAction(self, container): \"\"\" Returns the 'test' action for this scheme.", "schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path \"%s\"' % scheme_file_path); else: #", "= { 'build': self.buildAction, 'test': self.testAction, 'launch': self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive':", "'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path):", "action = lookup[action_name]; return action; def getAction(self, action_type): \"\"\" This method returns all", "xml from ..Helpers import path_helper from ..Helpers import xcrun_helper from ..Helpers import logging_helper", "container): \"\"\" Returns the 'test' action for this scheme. \"\"\" action = None;", "action type, None otherwise. \"\"\" action_name = action_name.lower(); lookup = { 'build': self.buildAction,", "== True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path \"%s\"' % scheme_file_path);", "= xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not load contents of xcscheme file!'); def __repr__(self):", "and os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid() == True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid", "scheme_xml.isValid() == True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path \"%s\"' %", ": %s)' % (type(self), self.name, self.path); else: return '(%s : INVALID OBJECT)' %", "\"\"\" This method returns all the object for the passed action type, otherwise", "'build' action for this scheme. \"\"\" action = None; if self.isValid(): action =", "else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not exist!' % dir_path); return schemes; class xcscheme(object):", "'build': self.buildAction, 'test': self.testAction, 'launch': self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction };", "Returns the 'archive' action for this scheme. \"\"\" action = None; if self.isValid():", "action for this scheme. \"\"\" action = None; if self.isValid(): action = TestAction(self.getAction('TestAction'));", "action; def archiveAction(self, container): \"\"\" Returns the 'archive' action for this scheme. \"\"\"", "def XCSchemeParseDirectory(dir_path): schemes = []; if os.path.exists(dir_path) == True: for scheme_file in os.listdir(dir_path):", "action = TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def launchAction(self, container): \"\"\" Returns", "returns the method for the passed action type, None otherwise. \"\"\" action_name =", "if action_name in lookup.keys(): action = lookup[action_name]; return action; def getAction(self, action_type): \"\"\"", "xcscheme) and self.name == other.name and self.path.root_path == other.path.root_path; def __hash__(self): return hash(self.__attrs());", "'launch' action for this scheme. \"\"\" action = None; if self.isValid(): action =", "action; def testAction(self, container): \"\"\" Returns the 'test' action for this scheme. \"\"\"", "= False; self.container = ''; self.path = path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents", "import sys import xml.etree.ElementTree as xml from ..Helpers import path_helper from ..Helpers import", "logging_helper from .XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction import LaunchAction", "else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at path \"%s\"' % scheme_file_path); else: # skipping", "action for this scheme. \"\"\" action = None; if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction'));", "Invalid scheme file at path \"%s\"' % scheme_file_path); else: # skipping the known", "scheme. \"\"\" action = None; if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction'))", "= os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try: self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not", "False; self.container = ''; self.path = path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents =", "method returns the method for the passed action type, None otherwise. \"\"\" action_name", "if self.isValid(): action = BuildAction(self.getAction('BuildAction')); return action; def testAction(self, container): \"\"\" Returns the", "action_name = action_name.lower(); lookup = { 'build': self.buildAction, 'test': self.testAction, 'launch': self.launchAction, 'profile':", "scheme. \"\"\" action = None; if self.isValid(): action = ArchiveAction(self.getAction('ArchiveAction')); action.root = BuildAction(self.getAction('BuildAction'))", "{ 'build': self.buildAction, 'test': self.testAction, 'launch': self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction", "None; if self.isValid(): action = TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def launchAction(self,", "'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes = []; if os.path.exists(dir_path) == True: for scheme_file in", "scheme. \"\"\" action = None; if self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return action; def", "INVALID OBJECT)' % (type(self)); def __attrs(self): return (self.name, self.path); def __eq__(self, other): return", "= xcscheme(scheme_file_path); if scheme_xml.isValid() == True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme file at", "action_name): \"\"\" This method returns the method for the passed action type, None", "this scheme. \"\"\" action = None; if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root =", "= AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def archiveAction(self, container): \"\"\" Returns the", "from .XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction import TestAction from .XCSchemeActions.LaunchAction import LaunchAction from", "scheme_file_path); else: # skipping the known management file if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]:", "__init__(self, path): self.shared = False; self.container = ''; self.path = path_helper(path, ''); self.name", "lookup.keys(): action = lookup[action_name]; return action; def getAction(self, action_type): \"\"\" This method returns", "path_helper(path, ''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try: self.contents = xml.parse(self.path.obj_path); except:", "return action; def launchAction(self, container): \"\"\" Returns the 'launch' action for this scheme.", "load contents of xcscheme file!'); def __repr__(self): if self.isValid(): return '(%s : %s", "logging_helper.getLogger().error('[xcscheme]: Could not load contents of xcscheme file!'); def __repr__(self): if self.isValid(): return", ".XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction", "self.name == other.name and self.path.root_path == other.path.root_path; def __hash__(self): return hash(self.__attrs()); def isValid(self):", "file if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an xcscheme file!' %", "lookup[action_name]; return action; def getAction(self, action_type): \"\"\" This method returns all the object", "'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction }; action = None; if action_name in", "otherwise. \"\"\" action_name = action_name.lower(); lookup = { 'build': self.buildAction, 'test': self.testAction, 'launch':", "\"\"\" Returns the 'profile' action for this scheme. \"\"\" action = None; if", "self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not load contents of xcscheme file!'); def", "__eq__(self, other): return isinstance(other, xcscheme) and self.name == other.name and self.path.root_path == other.path.root_path;", "% dir_path); return schemes; class xcscheme(object): def __init__(self, path): self.shared = False; self.container", "this scheme. \"\"\" action = None; if self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return action;", "self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self, container): \"\"\" Returns the 'profile'", "def buildAction(self, container): \"\"\" Returns the 'build' action for this scheme. \"\"\" action", "''); self.name = os.path.basename(path).split('.xcscheme')[0]; self.contents = None; try: self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]:", "self.isValid(): action = TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return action; def launchAction(self, container): \"\"\"", "XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path,", "else: # skipping the known management file if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\"", "= []; if os.path.exists(dir_path) == True: for scheme_file in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path,", "an xcscheme file!' % scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not exist!' %", "return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path, 'xcuserdata')); def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes');", ".XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path):", "(type(self), self.name, self.path); else: return '(%s : INVALID OBJECT)' % (type(self)); def __attrs(self):", "known management file if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not an xcscheme", "skipping the known management file if scheme_file != 'xcschememanagement.plist': logging_helper.getLogger().warn('[xcscheme]: \"%s\" is not", "import xcrun_helper from ..Helpers import logging_helper from .XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction import", "this scheme. \"\"\" action = None; if self.isValid(): action = TestAction(self.getAction('TestAction')); action.root =", "other.path.root_path; def __hash__(self): return hash(self.__attrs()); def isValid(self): return self.contents != None; def actionLookup(self,", "def launchAction(self, container): \"\"\" Returns the 'launch' action for this scheme. \"\"\" action", "all the object for the passed action type, otherwise None. \"\"\" action =", "\"%s\" path does not exist!' % dir_path); return schemes; class xcscheme(object): def __init__(self,", "%s : %s)' % (type(self), self.name, self.path); else: return '(%s : INVALID OBJECT)'", "'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes = []; if os.path.exists(dir_path)", "action_type): \"\"\" This method returns all the object for the passed action type,", "\"\"\" This method returns the method for the passed action type, None otherwise.", "testAction(self, container): \"\"\" Returns the 'test' action for this scheme. \"\"\" action =", "def __repr__(self): if self.isValid(): return '(%s : %s : %s)' % (type(self), self.name,", "try: self.contents = xml.parse(self.path.obj_path); except: logging_helper.getLogger().error('[xcscheme]: Could not load contents of xcscheme file!');", "for the passed action type, otherwise None. \"\"\" action = None; if self.isValid():", "self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction }; action = None; if action_name in lookup.keys():", "action for this scheme. \"\"\" action = None; if self.isValid(): action = ArchiveAction(self.getAction('ArchiveAction'));", "return action; def buildAction(self, container): \"\"\" Returns the 'build' action for this scheme.", "\"\"\" action = None; if self.isValid(): action = AnalyzeAction(self.getAction('AnalyzeAction')); action.root = BuildAction(self.getAction('BuildAction')) return", "% scheme_file_path); else: logging_helper.getLogger().warn('[xcscheme]: \"%s\" path does not exist!' % dir_path); return schemes;", "'test': self.testAction, 'launch': self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction }; action =", "\"\"\" Returns the 'test' action for this scheme. \"\"\" action = None; if", "'launch': self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction }; action = None; if", "action = None; if self.isValid(): action = TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return action;", "action = filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0]; return action; def buildAction(self, container):", "for scheme_file in os.listdir(dir_path): scheme_file_path = os.path.join(dir_path, scheme_file); if not scheme_file.startswith('.') and scheme_file_path.endswith('.xcscheme')", "this scheme. \"\"\" action = None; if self.isValid(): action = BuildAction(self.getAction('BuildAction')); return action;", "\"\"\" action = None; if self.isValid(): action = TestAction(self.getAction('TestAction')); action.root = BuildAction(self.getAction('BuildAction')) return", "from .XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path): return os.path.exists(os.path.join(path, 'xcshareddata')); def XCSchemeHasUserSchemes(path): return os.path.exists(os.path.join(path,", "action = None; if self.isValid(): action = ArchiveAction(self.getAction('ArchiveAction')); action.root = BuildAction(self.getAction('BuildAction')) return action;", "action = None; if self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self, container):", "not load contents of xcscheme file!'); def __repr__(self): if self.isValid(): return '(%s :", "for this scheme. \"\"\" action = None; if self.isValid(): action = LaunchAction(self.getAction('LaunchAction')); return", "the 'archive' action for this scheme. \"\"\" action = None; if self.isValid(): action", "OBJECT)' % (type(self)); def __attrs(self): return (self.name, self.path); def __eq__(self, other): return isinstance(other,", "self.buildAction, 'test': self.testAction, 'launch': self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction }; action", "return action; def analyzeAction(self, container): \"\"\" Returns the 'analyze' action for this scheme.", "self.launchAction, 'profile': self.profileAction, 'analyze': self.analyzeAction, 'archive': self.archiveAction }; action = None; if action_name", "filter(lambda action: action.tag == action_type, list(self.contents.getroot()))[0]; return action; def buildAction(self, container): \"\"\" Returns", "def XCSchemeGetSharedPath(path): return os.path.join(path, 'xcshareddata/xcschemes'); def XCSchemeGetUserPath(path): return os.path.join(path, 'xcuserdata/'+os.getlogin()+'.xcuserdatad/xcschemes/'); def XCSchemeParseDirectory(dir_path): schemes", "'archive' action for this scheme. \"\"\" action = None; if self.isValid(): action =", "from .XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction from", "contents of xcscheme file!'); def __repr__(self): if self.isValid(): return '(%s : %s :", "action; def buildAction(self, container): \"\"\" Returns the 'build' action for this scheme. \"\"\"", "def isValid(self): return self.contents != None; def actionLookup(self, action_name): \"\"\" This method returns", "the method for the passed action type, None otherwise. \"\"\" action_name = action_name.lower();", "= LaunchAction(self.getAction('LaunchAction')); return action; def profileAction(self, container): \"\"\" Returns the 'profile' action for", "TestAction from .XCSchemeActions.LaunchAction import LaunchAction from .XCSchemeActions.ProfileAction import ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction", "container): \"\"\" Returns the 'archive' action for this scheme. \"\"\" action = None;", "xcrun_helper from ..Helpers import logging_helper from .XCSchemeActions.BuildAction import BuildAction from .XCSchemeActions.TestAction import TestAction", "os.path.isfile(scheme_file_path): scheme_xml = xcscheme(scheme_file_path); if scheme_xml.isValid() == True: schemes.append(scheme_xml); else: logging_helper.getLogger().warn('[xcscheme]: Invalid scheme", "import ProfileAction from .XCSchemeActions.AnalyzeAction import AnalyzeAction from .XCSchemeActions.ArchiveAction import ArchiveAction def XCSchemeHasSharedSchemes(path): return" ]
[ "migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True,", "models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,", "= [ migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)),", "migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation', name='inviter', field=models.ForeignKey(blank=True, null=True,", "dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Invitation', fields=[", "('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField( model_name='invitation', name='invitation_message',", "on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation',", "serialize=False, verbose_name='ID')), ('message', models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True,", "'0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp',", "), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation', name='inviter', field=models.ForeignKey(blank=True,", "models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True,", "] operations = [ migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key',", "2017-06-09 18:33 from __future__ import unicode_literals from django.conf import settings from django.db import", "models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True,", "max_length=20)), ], ), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation',", "model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation', name='inviter', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,", "import migrations, models import django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration): initial = True dependencies", "= [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Invitation', fields=[ ('id',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)),", "to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ],", "], ), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee',", "null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField(", "models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True,", "1.11.1 on 2017-06-09 18:33 from __future__ import unicode_literals from django.conf import settings from", "settings from django.db import migrations, models import django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration): initial", "models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser', fields=[", "import settings from django.db import migrations, models import django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration):", "), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')),", "on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp',", "('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "<filename>itk_invitations/migrations/0001_initial.py # -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-06-09", "models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ],", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation', name='inviter', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inviter', to=settings.AUTH_USER_MODEL),", "__future__ import unicode_literals from django.conf import settings from django.db import migrations, models import", "on 2017-06-09 18:33 from __future__ import unicode_literals from django.conf import settings from django.db", "primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted',", "('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee',", "-*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-06-09 18:33 from", "models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True,", "to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True,", "model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,", "models import django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration): initial = True dependencies = [", "null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)),", "Django 1.11.1 on 2017-06-09 18:33 from __future__ import unicode_literals from django.conf import settings", "True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Invitation',", "('message', models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name',", "('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ),", "models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)),", "null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit',", "name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message',", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation', name='inviter', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inviter', to=settings.AUTH_USER_MODEL), ),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)),", "verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ],", "name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation', name='inviter', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inviter',", "('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage', fields=[ ('id',", "= True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation', name='inviter', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='inviter', to=settings.AUTH_USER_MODEL), ), ]", "models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel(", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email',", "Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number',", "unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion import", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)),", "('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ),", "('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group',", "localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField(", "models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField(", "models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,", "], ), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp',", "max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField( model_name='invitation',", "to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()),", "verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined',", "('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True,", "), migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ], ),", "import localflavor.us.models class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "], ), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)),", "serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)),", "models.CharField(blank=True, max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True,", "field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ),", "('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "utf-8 -*- # Generated by Django 1.11.1 on 2017-06-09 18:33 from __future__ import", "[ migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator',", "('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,", "# Generated by Django 1.11.1 on 2017-06-09 18:33 from __future__ import unicode_literals from", "('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True,", "('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee',", "related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id',", "], ), migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ],", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp',", "name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True, max_length=30)),", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion import localflavor.us.models", "('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "18:33 from __future__ import unicode_literals from django.conf import settings from django.db import migrations,", "class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models", "max_length=30)), ('email', models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True,", "migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id',", "('response_message', models.TextField(blank=True)), ('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')),", "primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel(", "Generated by Django 1.11.1 on 2017-06-09 18:33 from __future__ import unicode_literals from django.conf", "primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "models.EmailField(max_length=254)), ('phone_number', localflavor.us.models.PhoneNumberField(blank=True, max_length=20)), ], ), migrations.AddField( model_name='invitation', name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'),", "models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message',", "django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'),", "('confirmed_invitee', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ),", "migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation',", "on_delete=django.db.models.deletion.SET_NULL, related_name='confirmed_invitee', to=settings.AUTH_USER_MODEL)), ('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='auth.Group')), ], ), migrations.CreateModel( name='Invitation_Visit', fields=[", "# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-06-09 18:33", "initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name', models.CharField(blank=True,", "migrations.CreateModel( name='InvitationMessage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ], ), migrations.CreateModel(", "), migrations.CreateModel( name='Invitation_Visit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)),", "[ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True,", "by Django 1.11.1 on 2017-06-09 18:33 from __future__ import unicode_literals from django.conf import", "import django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration): initial = True dependencies = [ ('auth',", "), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(blank=True, max_length=30)), ('last_name',", "django.db import migrations, models import django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration): initial = True", "serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage',", "-*- # Generated by Django 1.11.1 on 2017-06-09 18:33 from __future__ import unicode_literals", "coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-06-09 18:33 from __future__", "to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser'), ), migrations.AddField( model_name='invitation', name='inviter',", "unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)), ('response_message', models.TextField(blank=True)),", "migrations, models import django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration): initial = True dependencies =", "django.conf import settings from django.db import migrations, models import django.db.models.deletion import localflavor.us.models class", "from django.db import migrations, models import django.db.models.deletion import localflavor.us.models class Migration(migrations.Migration): initial =", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True,", "verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ), migrations.CreateModel( name='InvitationMessage', fields=[", "operations = [ migrations.CreateModel( name='Invitation', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key', models.CharField(max_length=64,", "verbose_name='ID')), ('message', models.TextField()), ], ), migrations.CreateModel( name='UnregisteredUser', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('key', models.CharField(max_length=64, unique=True)), ('is_coordinator', models.BooleanField(default=False)), ('sent_timestamp', models.DateTimeField(blank=True)), ('response_timestamp', models.DateTimeField(blank=True)), ('is_accepted', models.BooleanField(default=False)), ('is_declined', models.BooleanField(default=False)),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_action_timestamp', models.DateTimeField(blank=True)), ('last_action_timestamp', models.DateTimeField(blank=True)), ('invitation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='itk_invitations.Invitation')), ], ),", "localflavor.us.models class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0008_alter_user_username_max_length'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "name='invitation_message', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.InvitationMessage'), ), migrations.AddField( model_name='invitation', name='invitee', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='itk_invitations.UnregisteredUser')," ]
[]
[ "node2 print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session() as sess: output = sess.run([node1,", "tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 =", "node1 * node2 print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session() as sess: output", "* node2 print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session() as sess: output =", "sess.run(node3) print(output) # 12 file_writer = tf.summary.FileWriter('graph', sess.graph) # run `tensorboard --logdir=\"graph\"` in", "Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session() as sess: output = sess.run([node1, node2]) print(output) #", "Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 = node1 * node2", "= sess.run([node1, node2]) print(output) # [3.0, 4.0] with tf.Session() as sess: output =", "as tf node1 = tf.constant(3.0, tf.float32) node2 = tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(),", "= tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32) node3", "shape=(), dtype=float32) with tf.Session() as sess: output = sess.run([node1, node2]) print(output) # [3.0,", "dtype=float32) with tf.Session() as sess: output = sess.run([node1, node2]) print(output) # [3.0, 4.0]", "print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session() as sess: output = sess.run([node1, node2])", "tf.Session() as sess: output = sess.run([node1, node2]) print(output) # [3.0, 4.0] with tf.Session()", "[3.0, 4.0] with tf.Session() as sess: output = sess.run(node3) print(output) # 12 file_writer", "node3 = node1 * node2 print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session() as", "# Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session() as sess: output = sess.run([node1, node2]) print(output)", "import tensorflow as tf node1 = tf.constant(3.0, tf.float32) node2 = tf.constant(4.0) print(node1) #", "# [3.0, 4.0] with tf.Session() as sess: output = sess.run(node3) print(output) # 12", "with tf.Session() as sess: output = sess.run(node3) print(output) # 12 file_writer = tf.summary.FileWriter('graph',", "= tf.summary.FileWriter('graph', sess.graph) # run `tensorboard --logdir=\"graph\"` in command line to show the", "# 12 file_writer = tf.summary.FileWriter('graph', sess.graph) # run `tensorboard --logdir=\"graph\"` in command line", "tf.float32) node2 = tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(),", "node2 = tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32)", "dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 = node1 * node2 print(node3) #", "print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 = node1", "print(output) # [3.0, 4.0] with tf.Session() as sess: output = sess.run(node3) print(output) #", "as sess: output = sess.run([node1, node2]) print(output) # [3.0, 4.0] with tf.Session() as", "tf.Session() as sess: output = sess.run(node3) print(output) # 12 file_writer = tf.summary.FileWriter('graph', sess.graph)", "file_writer = tf.summary.FileWriter('graph', sess.graph) # run `tensorboard --logdir=\"graph\"` in command line to show", "tf node1 = tf.constant(3.0, tf.float32) node2 = tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32)", "= tf.constant(3.0, tf.float32) node2 = tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) #", "shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 = node1 * node2 print(node3)", "tensorflow as tf node1 = tf.constant(3.0, tf.float32) node2 = tf.constant(4.0) print(node1) # Tensor(\"Const:0\",", "# Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 = node1 *", "sess: output = sess.run([node1, node2]) print(output) # [3.0, 4.0] with tf.Session() as sess:", "print(output) # 12 file_writer = tf.summary.FileWriter('graph', sess.graph) # run `tensorboard --logdir=\"graph\"` in command", "tf.summary.FileWriter('graph', sess.graph) # run `tensorboard --logdir=\"graph\"` in command line to show the result", "dtype=float32) node3 = node1 * node2 print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session()", "4.0] with tf.Session() as sess: output = sess.run(node3) print(output) # 12 file_writer =", "= sess.run(node3) print(output) # 12 file_writer = tf.summary.FileWriter('graph', sess.graph) # run `tensorboard --logdir=\"graph\"`", "= node1 * node2 print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32) with tf.Session() as sess:", "node2]) print(output) # [3.0, 4.0] with tf.Session() as sess: output = sess.run(node3) print(output)", "as sess: output = sess.run(node3) print(output) # 12 file_writer = tf.summary.FileWriter('graph', sess.graph) #", "output = sess.run(node3) print(output) # 12 file_writer = tf.summary.FileWriter('graph', sess.graph) # run `tensorboard", "print(node2) # Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 = node1 * node2 print(node3) # Tensor(\"mul:0\",", "shape=(), dtype=float32) node3 = node1 * node2 print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32) with", "with tf.Session() as sess: output = sess.run([node1, node2]) print(output) # [3.0, 4.0] with", "sess: output = sess.run(node3) print(output) # 12 file_writer = tf.summary.FileWriter('graph', sess.graph) # run", "Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 = node1 * node2 print(node3) # Tensor(\"mul:0\", shape=(), dtype=float32)", "sess.run([node1, node2]) print(output) # [3.0, 4.0] with tf.Session() as sess: output = sess.run(node3)", "12 file_writer = tf.summary.FileWriter('graph', sess.graph) # run `tensorboard --logdir=\"graph\"` in command line to", "tf.constant(3.0, tf.float32) node2 = tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32) print(node2) # Tensor(\"Const_1:0\",", "node1 = tf.constant(3.0, tf.float32) node2 = tf.constant(4.0) print(node1) # Tensor(\"Const:0\", shape=(), dtype=float32) print(node2)", "# Tensor(\"Const_1:0\", shape=(), dtype=float32) node3 = node1 * node2 print(node3) # Tensor(\"mul:0\", shape=(),", "output = sess.run([node1, node2]) print(output) # [3.0, 4.0] with tf.Session() as sess: output", "<reponame>dandjo/tensorflow-playground import tensorflow as tf node1 = tf.constant(3.0, tf.float32) node2 = tf.constant(4.0) print(node1)" ]
[ "blockData toCompressM[ hashId ] = curMethod if cnt % cpu_n == 0: for", "recompress %s blocks.\" % hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) #", "!= curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId, cMethodId)", "\" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >=", "sv = Subvolume(_fuse.operations) cnt = cntNext = 0 cntNth = subvCount / 10000.0", "_fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ] = blockData toCompressM[ hashId ]", "tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item", "commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for", "_fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\") > 0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT =", "cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x symbols) cntNth", "subvCount = tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem and %s subvolumes statistics.\" % subvCount)", "(cnt * 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"])", "if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2 =", "cnt = cntNext = 0 cntNth = subvCount / 10000.0 / 3 if", "back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count()", "tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId, item in _fuse.compressData(toCompress): cData, cMethod", "res2: upd += 1 toCompress = {} toCompressM = {} if isVerbose: if", "upd,)) if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes", "ends - blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True)", "= _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ] = blockData toCompressM[ hashId ] = curMethod", "-*- coding: utf8 -*- \"\"\" Special action to recompress all data \"\"\" __author__", "= 1000 offBatch = 0 cnt = cntNext = upd = 0 cpu_n", "offBatch,)) offBatch += maxBatch for hashItem in iter(cur.fetchone, None): cnt += 1 hashId", "isVerbose: if cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt*100.0/hashCount)", "hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[ hashId ]", "tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if", "`id` FROM `%s` LIMIT %s OFFSET %s\" % (tableHash.getName(), maxBatch, offBatch,)) offBatch +=", "0 cnt = cntNext = upd = 0 cpu_n = cpu_count() * 4", "and res2: upd += 1 toCompress = {} toCompressM = {} if isVerbose:", "/ subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush()", "\"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() # For ends - blocks", "tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1", "coding: utf8 -*- \"\"\" Special action to recompress all data \"\"\" __author__ =", "= _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if isVerbose: print(\"Ready to recompress", "cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId,", "wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\") > 0 tableHash = _fuse.operations.getTable(\"hash\")", "@type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\") > 0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT", "1 toCompress = {} toCompressM = {} if isVerbose: if cnt >= cntNext:", "@param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\") > 0", "blockItem = tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"])", "print(\"Something went wrong? Changes are rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True)", "\"%6.2f%%\" % (cnt * 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc)", "] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2", "% prc) sys.stdout.flush() # For ends - blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory()", "= cntNext = upd = 0 cpu_n = cpu_count() * 4 try: toCompress", "commit maxBatch = 1000 offBatch = 0 cnt = cntNext = upd =", "1: cntNth = 1 for subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt += 1", "curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId, cMethodId) if", "if len(toCompress.keys()): for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod =", "1 if isVerbose: if cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\"", "tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes are rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False)", "# For ends - blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin()", "sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() _fuse.operations.getManager().setAutocommit(False) tableSubvol.commit() _fuse.operations.getManager().setAutocommit(True) return", "\"\"\" @param options: Commandline options @type options: object @param _fuse: FUSE wrapper @type", "cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes are rolled back!\") return", "% (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() # For ends - blocks commit", "sv.clean_stats(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext += cntNth", "cpu_n = cpu_count() * 4 try: toCompress = {} toCompressM = {} while", "tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ] = blockData", "if isVerbose: print(\"Processed %s blocks, recompressed %s blocks.\" % (cnt, upd,)) if hashCount", "cpu_count def do_recompress(options, _fuse): \"\"\" @param options: Commandline options @type options: object @param", "= toCompressM[ hashId ] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res =", "= cntNext = 0 cntNth = subvCount / 10000.0 / 3 if cntNth", "subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() _fuse.operations.getManager().setAutocommit(False)", "in iter(cur.fetchone, None): cnt += 1 hashId = hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT", "0 cpu_n = cpu_count() * 4 try: toCompress = {} toCompressM = {}", "0: for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[", "'sergey' import sys from multiprocessing import cpu_count def do_recompress(options, _fuse): \"\"\" @param options:", "(cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() # For ends - blocks commit _fuse.operations.getManager().setAutocommit(False)", "1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if isVerbose:", "(cnt * 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"],", "100*100 (4x symbols) cntNth = int(hashCount/10000.0) if cntNth < 1: cntNth = 1", "> 0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol =", "= curMethod if cnt % cpu_n == 0: for hashId, item in _fuse.compressData(toCompress):", "to recompress %s blocks.\" % hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True)", "tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId, item in", "filesystem and %s subvolumes statistics.\" % subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM", "subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if", "cData) res2 = tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1 except:", "None): sv.clean_stats(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext +=", "maxBatch = 1000 offBatch = 0 cnt = cntNext = upd = 0", "subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >=", "3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() _fuse.operations.getManager().setAutocommit(False) tableSubvol.commit() _fuse.operations.getManager().setAutocommit(True)", "= tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1 toCompress = {}", "return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if", "% prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >= cntNext:", "= {} toCompressM = {} while cnt < hashCount: cur.execute(\"SELECT `id` FROM `%s`", "* 4 try: toCompress = {} toCompressM = {} while cnt < hashCount:", "offBatch = 0 cnt = cntNext = upd = 0 cpu_n = cpu_count()", "cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \"", "_fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if isVerbose: print(\"Recalculate", "sys.stdout.flush() if isVerbose: print(\"Processed %s blocks, recompressed %s blocks.\" % (cnt, upd,)) if", "isVerbose: print(\"Recalculate filesystem and %s subvolumes statistics.\" % subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT", "= tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x symbols) cntNth =", "sv.get_usage(subvItem[\"name\"], True) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext +=", "= blockData toCompressM[ hashId ] = curMethod if cnt % cpu_n == 0:", "options: Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS", "toCompressM[ hashId ] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId,", "if isVerbose: if cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\" %", "tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem and", "cur.execute(\"SELECT `id` FROM `%s` LIMIT %s OFFSET %s\" % (tableHash.getName(), maxBatch, offBatch,)) offBatch", "cntNth prc = \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() # For", "= 0 cnt = cntNext = upd = 0 cpu_n = cpu_count() *", "hashId = hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData", "for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[hashId] if", "_fuse): \"\"\" @param options: Commandline options @type options: object @param _fuse: FUSE wrapper", "cData, cMethod = item curMethod = toCompressM[ hashId ] if cMethod != curMethod:", "blocks.\" % hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100", "_fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ] = blockData toCompressM[ hashId ] = curMethod if", "= 0 cntNth = subvCount / 10000.0 / 3 if cntNth < 1:", "options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\")", "] = blockData toCompressM[ hashId ] = curMethod if cnt % cpu_n ==", "0 cntNth = subvCount / 10000.0 / 3 if cntNth < 1: cntNth", "blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ] = blockData toCompressM[ hashId ] =", "(cnt * 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() if", "tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem and %s subvolumes statistics.\" % subvCount) cur =", "tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId, item in _fuse.compressData(toCompress): cData, cMethod =", "res and res2: upd += 1 except: pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if", "dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\") > 0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\")", "1000 offBatch = 0 cnt = cntNext = upd = 0 cpu_n =", "] = curMethod if cnt % cpu_n == 0: for hashId, item in", "!= cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes are rolled back!\")", "= 'sergey' import sys from multiprocessing import cpu_count def do_recompress(options, _fuse): \"\"\" @param", "tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem", "= _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if isVerbose:", "1 for subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt += 1 if isVerbose: if", "1 # Process Nth blocks and then - commit maxBatch = 1000 offBatch", "cpu_n == 0: for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod", "hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x symbols)", "res2 = tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1 except: pass", "%s blocks.\" % (cnt, upd,)) if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True)", "= {} if isVerbose: if cnt >= cntNext: cntNext += cntNth prc =", "{} while cnt < hashCount: cur.execute(\"SELECT `id` FROM `%s` LIMIT %s OFFSET %s\"", "cntNth = 1 # Process Nth blocks and then - commit maxBatch =", "_fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x symbols) cntNth = int(hashCount/10000.0) if", "res2: upd += 1 except: pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed", "cntNext += cntNth prc = \"%6.2f%%\" % (cnt * 100.0 / subvCount /", "curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ] = blockData toCompressM[", "_fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[hashId] if cMethod != curMethod: cMethodId", "% hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x", "len(toCompress.keys()): for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[hashId]", "= tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ] =", "10000.0 / 3 if cntNth < 1: cntNth = 1 for subvItem in", "hashCount = tableHash.get_count() if isVerbose: print(\"Ready to recompress %s blocks.\" % hashCount) cur", "= \"%6.2f%%\" % (cnt * 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" %", "import Subvolume sv = Subvolume(_fuse.operations) cnt = cntNext = 0 cntNth = subvCount", "subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True)", "def do_recompress(options, _fuse): \"\"\" @param options: Commandline options @type options: object @param _fuse:", "{} toCompressM = {} if isVerbose: if cnt >= cntNext: cntNext += cntNth", "+= cntNth prc = \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() #", "= 0 cpu_n = cpu_count() * 4 try: toCompress = {} toCompressM =", "`%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations)", "(4x symbols) cntNth = int(hashCount/10000.0) if cntNth < 1: cntNth = 1 #", "= _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount =", "%s blocks, recompressed %s blocks.\" % (cnt, upd,)) if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False)", "_fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count()", "in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[ hashId ] if cMethod", "tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId, item in _fuse.compressData(toCompress):", "= 1 for subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt += 1 if isVerbose:", "isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed %s blocks, recompressed %s blocks.\" % (cnt,", "+= 1 if isVerbose: if cnt >= cntNext: cntNext += cntNth prc =", "iter(cur.fetchone, None): cnt += 1 hashId = hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT =", "* 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True)", "upd += 1 toCompress = {} toCompressM = {} if isVerbose: if cnt", "all data \"\"\" __author__ = 'sergey' import sys from multiprocessing import cpu_count def", "if cntNth < 1: cntNth = 1 for subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"])", "hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[hashId] if cMethod", "pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed %s blocks, recompressed %s blocks.\"", "if cntNth < 1: cntNth = 1 # Process Nth blocks and then", "True) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext += cntNth", "% (cnt * 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush()", "offBatch += maxBatch for hashItem in iter(cur.fetchone, None): cnt += 1 hashId =", "FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume sv =", "curMethod if cnt % cpu_n == 0: for hashId, item in _fuse.compressData(toCompress): cData,", "For ends - blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin()", "isVerbose = _fuse.getOption(\"verbosity\") > 0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock =", "if res and res2: upd += 1 toCompress = {} toCompressM = {}", "_fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if isVerbose: print(\"Ready to recompress %s blocks.\" % hashCount)", "/ 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() _fuse.operations.getManager().setAutocommit(False) tableSubvol.commit()", "= _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if isVerbose: print(\"Ready to recompress %s blocks.\" %", "recompress all data \"\"\" __author__ = 'sergey' import sys from multiprocessing import cpu_count", "item curMethod = toCompressM[hashId] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res =", "\"\"\" Special action to recompress all data \"\"\" __author__ = 'sergey' import sys", "import sys from multiprocessing import cpu_count def do_recompress(options, _fuse): \"\"\" @param options: Commandline", "cpu_count() * 4 try: toCompress = {} toCompressM = {} while cnt <", "then - commit maxBatch = 1000 offBatch = 0 cnt = cntNext =", "100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt +=", "import cpu_count def do_recompress(options, _fuse): \"\"\" @param options: Commandline options @type options: object", "= tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem and %s subvolumes statistics.\" % subvCount) cur", "toCompress = {} toCompressM = {} if isVerbose: if cnt >= cntNext: cntNext", "if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes are", "3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt += 1 if isVerbose:", "blocks and then - commit maxBatch = 1000 offBatch = 0 cnt =", "try: toCompress = {} toCompressM = {} while cnt < hashCount: cur.execute(\"SELECT `id`", "< hashCount: cur.execute(\"SELECT `id` FROM `%s` LIMIT %s OFFSET %s\" % (tableHash.getName(), maxBatch,", "tableHash.get_count() if isVerbose: print(\"Ready to recompress %s blocks.\" % hashCount) cur = tableHash.getCursor(True)", "cMethod = item curMethod = toCompressM[hashId] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod)", "+= cntNth prc = \"%6.2f%%\" % (cnt * 100.0 / subvCount / 3)", "1 hashId = hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"])", "isVerbose: print(\"Processed %s blocks, recompressed %s blocks.\" % (cnt, upd,)) if hashCount !=", "print(\"Ready to recompress %s blocks.\" % hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin()", "@param options: Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse:", "+= maxBatch for hashItem in iter(cur.fetchone, None): cnt += 1 hashId = hashItem[\"id\"]", "item in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[hashId] if cMethod !=", "item in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[ hashId ] if", "data \"\"\" __author__ = 'sergey' import sys from multiprocessing import cpu_count def do_recompress(options,", "prc = \"%6.2f%%\" % (cnt * 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \"", "@type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose =", "* 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() if isVerbose:", "cntNth = int(hashCount/10000.0) if cntNth < 1: cntNth = 1 # Process Nth", "= tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1 except: pass if", "= tableHash.get_count() if isVerbose: print(\"Ready to recompress %s blocks.\" % hashCount) cur =", "toCompress = {} toCompressM = {} while cnt < hashCount: cur.execute(\"SELECT `id` FROM", "cMethodId) if res and res2: upd += 1 except: pass if isVerbose: sys.stdout.write(\"\\n\")", "upd += 1 except: pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed %s", "% tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) cnt", "in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[hashId] if cMethod != curMethod:", "* FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume sv", "hashId ] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData)", "toCompressM = {} if isVerbose: if cnt >= cntNext: cntNext += cntNth prc", "cntNext = upd = 0 cpu_n = cpu_count() * 4 try: toCompress =", "%s\" % (tableHash.getName(), maxBatch, offBatch,)) offBatch += maxBatch for hashItem in iter(cur.fetchone, None):", "subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt += 1", "__author__ = 'sergey' import sys from multiprocessing import cpu_count def do_recompress(options, _fuse): \"\"\"", "# -*- coding: utf8 -*- \"\"\" Special action to recompress all data \"\"\"", "tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId, item in _fuse.compressData(toCompress): cData,", "+= 1 except: pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed %s blocks,", "while cnt < hashCount: cur.execute(\"SELECT `id` FROM `%s` LIMIT %s OFFSET %s\" %", "cur = tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from", "= cpu_count() * 4 try: toCompress = {} toCompressM = {} while cnt", "cntNext = 0 cntNth = subvCount / 10000.0 / 3 if cntNth <", "_fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem and %s", "/ subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt +=", "cntNth < 1: cntNth = 1 for subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt", "% subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin()", "Special action to recompress all data \"\"\" __author__ = 'sergey' import sys from", "tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x symbols) cntNth = int(hashCount/10000.0)", ">= cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt * 100.0 /", "blocks.\" % (cnt, upd,)) if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something", "from multiprocessing import cpu_count def do_recompress(options, _fuse): \"\"\" @param options: Commandline options @type", "hashId ] = blockData toCompressM[ hashId ] = curMethod if cnt % cpu_n", "_fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes are rolled back!\") return 1", "= {} toCompressM = {} if isVerbose: if cnt >= cntNext: cntNext +=", "= upd = 0 cpu_n = cpu_count() * 4 try: toCompress = {}", "_fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId, cMethodId) if res and res2:", "\" % prc) sys.stdout.flush() if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() _fuse.operations.getManager().setAutocommit(False) tableSubvol.commit() _fuse.operations.getManager().setAutocommit(True) return 0", "object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\") >", "multiprocessing import cpu_count def do_recompress(options, _fuse): \"\"\" @param options: Commandline options @type options:", "cnt = cntNext = upd = 0 cpu_n = cpu_count() * 4 try:", "tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem and %s subvolumes", "and then - commit maxBatch = 1000 offBatch = 0 cnt = cntNext", "res and res2: upd += 1 toCompress = {} toCompressM = {} if", "and res2: upd += 1 except: pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose:", "subvolumes statistics.\" % subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\" % tableSubvol.getName())", "isVerbose: if cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt", "- commit maxBatch = 1000 offBatch = 0 cnt = cntNext = upd", "cnt += 1 if isVerbose: if cnt >= cntNext: cntNext += cntNth prc", "/ 10000.0 / 3 if cntNth < 1: cntNth = 1 for subvItem", "hashId ] = curMethod if cnt % cpu_n == 0: for hashId, item", "Every 100*100 (4x symbols) cntNth = int(hashCount/10000.0) if cntNth < 1: cntNth =", "hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod,", "= toCompressM[hashId] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData)", "tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes are rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit()", "sys.stdout.flush() # For ends - blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory()", "= _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ] = blockData toCompressM[ hashId", "are rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount", "except: pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed %s blocks, recompressed %s", "hashItem in iter(cur.fetchone, None): cnt += 1 hashId = hashItem[\"id\"] blockItem = tableBlock.get(hashId)", "if isVerbose: print(\"Recalculate filesystem and %s subvolumes statistics.\" % subvCount) cur = tableSubvol.getCursor(True)", "_fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\") > 0 tableHash", "= tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId, cMethodId) if res and res2: upd +=", "curMethod = toCompressM[hashId] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId,", "Changes are rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory()", "int(hashCount/10000.0) if cntNth < 1: cntNth = 1 # Process Nth blocks and", "1: cntNth = 1 # Process Nth blocks and then - commit maxBatch", "sys from multiprocessing import cpu_count def do_recompress(options, _fuse): \"\"\" @param options: Commandline options", "% (cnt, upd,)) if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went", "= subvCount / 10000.0 / 3 if cntNth < 1: cntNth = 1", "print(\"Recalculate filesystem and %s subvolumes statistics.\" % subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT *", "_fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes are rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit()", "{} toCompressM = {} while cnt < hashCount: cur.execute(\"SELECT `id` FROM `%s` LIMIT", "symbols) cntNth = int(hashCount/10000.0) if cntNth < 1: cntNth = 1 # Process", "tableBlock = _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if isVerbose: print(\"Ready to", "maxBatch, offBatch,)) offBatch += maxBatch for hashItem in iter(cur.fetchone, None): cnt += 1", "tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId", "options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose", "subvCount / 10000.0 / 3 if cntNth < 1: cntNth = 1 for", "cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc)", "tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if isVerbose: print(\"Ready to recompress %s blocks.\"", "cnt += 1 hashId = hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod", "cntNext += cntNth prc = \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush()", "tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) cnt =", "for hashItem in iter(cur.fetchone, None): cnt += 1 hashId = hashItem[\"id\"] blockItem =", "/ 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt += 1 if", "tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) cnt = cntNext =", "prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt += 1 if isVerbose: if cnt >= cntNext:", "upd = 0 cpu_n = cpu_count() * 4 try: toCompress = {} toCompressM", "isVerbose: print(\"Ready to recompress %s blocks.\" % hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin()", "cntNth = subvCount / 10000.0 / 3 if cntNth < 1: cntNth =", "-*- \"\"\" Special action to recompress all data \"\"\" __author__ = 'sergey' import", "blockItem[\"data\"]) toCompress[ hashId ] = blockData toCompressM[ hashId ] = curMethod if cnt", "sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt", "< 1: cntNth = 1 for subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt +=", "_fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId,", "_fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if isVerbose: print(\"Ready to recompress %s", "for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[ hashId", "- blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if", "0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\")", "None): cnt += 1 hashId = hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT = tableHashCT.get(hashId)", "tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import", "sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed %s blocks, recompressed %s blocks.\" % (cnt, upd,))", "sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt += 1 if isVerbose: if", "(cnt, upd,)) if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong?", "tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId, item", "_fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) cnt = cntNext = 0", "toCompressM[hashId] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2", "tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1 except: pass if isVerbose:", "\"\"\" __author__ = 'sergey' import sys from multiprocessing import cpu_count def do_recompress(options, _fuse):", ">= cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" %", "/ 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if isVerbose:", "to recompress all data \"\"\" __author__ = 'sergey' import sys from multiprocessing import", "do_recompress(options, _fuse): \"\"\" @param options: Commandline options @type options: object @param _fuse: FUSE", "if isVerbose: print(\"Ready to recompress %s blocks.\" % hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False)", "= 1 # Process Nth blocks and then - commit maxBatch = 1000", "sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext", "%s subvolumes statistics.\" % subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\" %", "= item curMethod = toCompressM[hashId] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res", "blocks, recompressed %s blocks.\" % (cnt, upd,)) if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback()", "cur.execute(\"SELECT * FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume", "%s blocks.\" % hashCount) cur = tableHash.getCursor(True) _fuse.operations.getManager().setAutocommit(False) tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every", "sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext +=", "FROM `%s` LIMIT %s OFFSET %s\" % (tableHash.getName(), maxBatch, offBatch,)) offBatch += maxBatch", "action to recompress all data \"\"\" __author__ = 'sergey' import sys from multiprocessing", "Commandline options @type options: object @param _fuse: FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\"", "100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() if isVerbose: sys.stdout.write(\"\\n\")", "3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if isVerbose: if", "for subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt", "+= 1 hashId = hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod =", "Nth blocks and then - commit maxBatch = 1000 offBatch = 0 cnt", "# Process Nth blocks and then - commit maxBatch = 1000 offBatch =", "cMethod = item curMethod = toCompressM[ hashId ] if cMethod != curMethod: cMethodId", "item curMethod = toCompressM[ hashId ] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod)", "`%s` LIMIT %s OFFSET %s\" % (tableHash.getName(), maxBatch, offBatch,)) offBatch += maxBatch for", "\" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt += 1 if isVerbose: if cnt", "= {} while cnt < hashCount: cur.execute(\"SELECT `id` FROM `%s` LIMIT %s OFFSET", "cnt < hashCount: cur.execute(\"SELECT `id` FROM `%s` LIMIT %s OFFSET %s\" % (tableHash.getName(),", "+= 1 toCompress = {} toCompressM = {} if isVerbose: if cnt >=", "prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext", "print(\"Processed %s blocks, recompressed %s blocks.\" % (cnt, upd,)) if hashCount != cnt:", "# Every 100*100 (4x symbols) cntNth = int(hashCount/10000.0) if cntNth < 1: cntNth", "curMethod = toCompressM[ hashId ] if cMethod != curMethod: cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res", "= _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId, cMethodId) if res and", "tableHash = _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount", "\" % prc) sys.stdout.flush() # For ends - blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit()", "cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt * 100.0 / subvCount", "tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x symbols) cntNth = int(hashCount/10000.0) if cntNth <", "if cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s", "% cpu_n == 0: for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item", "toCompressM = {} while cnt < hashCount: cur.execute(\"SELECT `id` FROM `%s` LIMIT %s", "cntNth < 1: cntNth = 1 # Process Nth blocks and then -", "if cnt % cpu_n == 0: for hashId, item in _fuse.compressData(toCompress): cData, cMethod", "prc = \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() # For ends", "% (tableHash.getName(), maxBatch, offBatch,)) offBatch += maxBatch for hashItem in iter(cur.fetchone, None): cnt", "cData) res2 = tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1 toCompress", "_fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x symbols) cntNth = int(hashCount/10000.0) if cntNth < 1:", "tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1 toCompress = {} toCompressM", "toCompress[ hashId ] = blockData toCompressM[ hashId ] = curMethod if cnt %", "if cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt *", "_fuse.compressData(toCompress): cData, cMethod = item curMethod = toCompressM[ hashId ] if cMethod !=", "sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() # For ends - blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit()", "100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt", "/ subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt += 1", "* 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() sv.get_root_diff(subvItem[\"name\"]) cnt", "maxBatch for hashItem in iter(cur.fetchone, None): cnt += 1 hashId = hashItem[\"id\"] blockItem", "{} if isVerbose: if cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\"", "= Subvolume(_fuse.operations) cnt = cntNext = 0 cntNth = subvCount / 10000.0 /", "and %s subvolumes statistics.\" % subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\"", "cntNth prc = \"%6.2f%%\" % (cnt * 100.0 / subvCount / 3) sys.stdout.write(\"\\r%s", "= item curMethod = toCompressM[ hashId ] if cMethod != curMethod: cMethodId =", "res = tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId, cMethodId) if res and res2: upd", "sv.get_root_diff(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext += cntNth", "%s OFFSET %s\" % (tableHash.getName(), maxBatch, offBatch,)) offBatch += maxBatch for hashItem in", "if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed %s blocks, recompressed %s blocks.\" %", "hashCT = tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[ hashId ]", "cMethodId = _fuse.operations.getCompressionTypeId(cMethod) res = tableBlock.update(hashId, cData) res2 = tableHashCT.update(hashId, cMethodId) if res", "tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) # Every 100*100 (4x symbols) cntNth = int(hashCount/10000.0) if cntNth", "statistics.\" % subvCount) cur = tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False)", "tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem and %s subvolumes statistics.\" %", "cntNth = 1 for subvItem in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt += 1 if", "= int(hashCount/10000.0) if cntNth < 1: cntNth = 1 # Process Nth blocks", "cnt >= cntNext: cntNext += cntNth prc = \"%6.2f%%\" % (cnt * 100.0", "3 if cntNth < 1: cntNth = 1 for subvItem in iter(cur.fetchone, None):", "dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) cnt = cntNext = 0 cntNth =", "recompressed %s blocks.\" % (cnt, upd,)) if hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback()", "= _fuse.getOption(\"verbosity\") > 0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\")", "_fuse.getOption(\"verbosity\") > 0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol", "FUSE wrapper @type _fuse: dedupsqlfs.fuse.dedupfs.DedupFS \"\"\" isVerbose = _fuse.getOption(\"verbosity\") > 0 tableHash =", "_fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) cnt = cntNext", "% prc) sys.stdout.flush() sv.get_usage(subvItem[\"name\"], True) cnt += 1 if isVerbose: if cnt >=", "OFFSET %s\" % (tableHash.getName(), maxBatch, offBatch,)) offBatch += maxBatch for hashItem in iter(cur.fetchone,", "toCompressM[ hashId ] = curMethod if cnt % cpu_n == 0: for hashId,", "= \"%6.2f%%\" % (cnt*100.0/hashCount) sys.stdout.write(\"\\r%s \" % prc) sys.stdout.flush() # For ends -", "rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount =", "if res and res2: upd += 1 except: pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush()", "in iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >= cntNext:", "Subvolume sv = Subvolume(_fuse.operations) cnt = cntNext = 0 cntNth = subvCount /", "hashCount != cnt: _fuse.operations.getManager().setAutocommit(False) tableBlock.rollback() tableHashCT.rollback() _fuse.operations.getManager().setAutocommit(True) print(\"Something went wrong? Changes are rolled", "cData, cMethod = item curMethod = toCompressM[hashId] if cMethod != curMethod: cMethodId =", "tableHash.shrinkMemory() tableHashCT.shrinkMemory() subvCount = tableSubvol.get_count() if isVerbose: print(\"Recalculate filesystem and %s subvolumes statistics.\"", "/ 3 if cntNth < 1: cntNth = 1 for subvItem in iter(cur.fetchone,", "prc) sys.stdout.flush() # For ends - blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory()", "= tableSubvol.getCursor(True) cur.execute(\"SELECT * FROM `%s`\" % tableSubvol.getName()) _fuse.operations.getManager().setAutocommit(False) tableSubvol.begin() _fuse.operations.getManager().setAutocommit(True) from dedupsqlfs.fuse.subvolume", "_fuse.operations.getTable(\"hash_compression_type\") tableBlock = _fuse.operations.getTable(\"block\") tableSubvol = _fuse.operations.getTable(\"subvolume\") hashCount = tableHash.get_count() if isVerbose: print(\"Ready", "= tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData = _fuse.decompressData(curMethod, blockItem[\"data\"]) toCompress[", "Subvolume(_fuse.operations) cnt = cntNext = 0 cntNth = subvCount / 10000.0 / 3", "wrong? Changes are rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory() tableHash.shrinkMemory()", "== 0: for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod =", "\"\"\" isVerbose = _fuse.getOption(\"verbosity\") > 0 tableHash = _fuse.operations.getTable(\"hash\") tableHashCT = _fuse.operations.getTable(\"hash_compression_type\") tableBlock", "cnt % cpu_n == 0: for hashId, item in _fuse.compressData(toCompress): cData, cMethod =", "< 1: cntNth = 1 # Process Nth blocks and then - commit", "res2 = tableHashCT.update(hashId, cMethodId) if res and res2: upd += 1 toCompress =", "4 try: toCompress = {} toCompressM = {} while cnt < hashCount: cur.execute(\"SELECT", "LIMIT %s OFFSET %s\" % (tableHash.getName(), maxBatch, offBatch,)) offBatch += maxBatch for hashItem", "from dedupsqlfs.fuse.subvolume import Subvolume sv = Subvolume(_fuse.operations) cnt = cntNext = 0 cntNth", "blocks commit _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() tableBlock.shrinkMemory() tableHash.shrinkMemory() tableHashCT.shrinkMemory() tableBlock.begin() tableHashCT.begin() _fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()):", "went wrong? Changes are rolled back!\") return 1 _fuse.operations.getManager().setAutocommit(False) tableBlock.commit() tableHashCT.commit() _fuse.operations.getManager().setAutocommit(True) tableBlock.shrinkMemory()", "= hashItem[\"id\"] blockItem = tableBlock.get(hashId) hashCT = tableHashCT.get(hashId) curMethod = _fuse.operations.getCompressionTypeName(hashCT[\"type_id\"]) blockData =", "1 except: pass if isVerbose: sys.stdout.write(\"\\n\") sys.stdout.flush() if isVerbose: print(\"Processed %s blocks, recompressed", "_fuse.operations.getManager().setAutocommit(True) if len(toCompress.keys()): for hashId, item in _fuse.compressData(toCompress): cData, cMethod = item curMethod", "Process Nth blocks and then - commit maxBatch = 1000 offBatch = 0", "(tableHash.getName(), maxBatch, offBatch,)) offBatch += maxBatch for hashItem in iter(cur.fetchone, None): cnt +=", "utf8 -*- \"\"\" Special action to recompress all data \"\"\" __author__ = 'sergey'", "iter(cur.fetchone, None): sv.clean_stats(subvItem[\"name\"]) cnt += 1 if isVerbose: if cnt >= cntNext: cntNext", "cMethodId) if res and res2: upd += 1 toCompress = {} toCompressM =", "hashCount: cur.execute(\"SELECT `id` FROM `%s` LIMIT %s OFFSET %s\" % (tableHash.getName(), maxBatch, offBatch,))" ]
[ "if r in team.picks and len(team.assigned_committees) < team.num_delegates: # print(team.name, team.preferences) for pref", "row['Number of Spots'], row['Delegation Size']) committees[row['Committee']] = comm max_at_conf += row['Delegation Size'] for", "return schools, comms def format_for_main(schools, comms): ''' Creates all the objects and fills", "of picks and returns a list of picks that they get. Thanks stack", "index_to_drop = i #no break so I can grab the last value c_to_drop", "[] def read_info(school_info_filename, committee_info_filename): ''' Takes the filepaths and returns the dataframes '''", "s == s2: print(\"It worked! :)\") else: print(\"There's a bug. Bad computer. :(\")", "order (all committees must be present) picks is the picks we assign to", "for comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments)", "== 1: index_to_drop = i #no break so I can grab the last", "= format_for_main(schools, comms) teams, committees = assign(teams, committees) output(teams, committees) s = 0", "mapping committee names to Committee objects ''' teams = [] committees = {}", "teams = [] committees = {} max_at_conf = 0 comms.columns = ['Committee', 'Number", "committees): ''' My algorithm! Draft-based assignment. Takes the teams' constraints/preferences and committees and", "assign to make the draft fair assigned committees will be the committees assigned", "and len(team.assigned_committees) < team.num_delegates: # print(team.name, team.preferences) for pref in team.preferences: p =", "Bad computer. :(\") if __name__ == \"__main__\": try: go(argv[1], argv[2]) except: print(\"Something went", "[] length = float(len(sequence)) for i in range(num): picks.append(sequence[int(ceil(i * length / num))])", "comm max_at_conf += row['Delegation Size'] for index, row in schools.iterrows(): prefs = [j", "team = Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else: team = Team(row['School'], row['Number of Delegates'],", "c.name)) break else: continue else: continue return teams, committees def output(teams, committees): '''", "all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number", "= [] committees = {} max_at_conf = 0 comms.columns = ['Committee', 'Number of", "''' picks = [] length = float(len(sequence)) for i in range(num): picks.append(sequence[int(ceil(i *", "of possible rounds the number of picks and returns a list of picks", "and it iterates through each round of the draft until either all delegates", "schools.iterrows(): prefs = [j for j in row[2:]] for i in range(ceil(row['Number of", "def __init__(self, name, num_delegates, preferences): ''' num_delegats is an int of the total", "0 comms.columns = ['Committee', 'Number of Spots', 'Delegation Size'] schools.columns = ['School', 'Number", "the teams' constraints/preferences and committees and simulates a draft. Each team got picks", "i in range(ceil(row['Number of Delegates'] / max_at_conf)): # handling more delegates requested #", "< team.num_delegates \\ - 1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if", "in order (all committees must be present) picks is the picks we assign", "except: print(\"Something went wrong. Please make sure your usage is correct and files", "through each round of the draft until either all delegates are assigned or", "in teams: if r in team.picks and len(team.assigned_committees) < team.num_delegates: # print(team.name, team.preferences)", "documents. Inputs from assign ''' all_school_assignments = [] all_comm_assignments = [] for team", "print(\"Something went wrong. Please make sure your usage is correct and files are", "enumerate(team.assigned_committees): if committees[val].delegation_size == 1: index_to_drop = i #no break so I can", "stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = [] length = float(len(sequence)) for i in", "= pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace = True) comm_df =", "Delegates'] - i * max_at_conf if num_dels > max_at_conf: team = Team(row['School']+str(i+2), max_at_conf,", "the draft fair assigned committees will be the committees assigned to be outputted", "= read_info(school_filename, committee_filename) teams, committees = format_for_main(schools, comms) teams, committees = assign(teams, committees)", "list, in order (all committees must be present) picks is the picks we", "1:'Number of Delegates'}, inplace = True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index,", "max_at_conf, prefs) teams.append(team) else: team = Team(row['School'], row['Number of Delegates'], prefs) teams.append(team) return", "number of picks and returns a list of picks that they get. Thanks", "returns the dataframes ''' schools = pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return schools, comms", "all the objects and fills in the information from the dataframes inputs: schools,", "= Team(row['School'], row['Number of Delegates'], prefs) teams.append(team) return teams, committees def assign(teams, committees):", "{}\".format(str(i)) for i in range(len(comms))] for index, row in comms.iterrows(): comm = Committee(row['Committee'],", "continue else: continue return teams, committees def output(teams, committees): ''' Outputs the master", "0 def _get_picks(self, sequence, num): ''' Intersperses picks for small delegations. Takes a", "r in team.picks and len(team.assigned_committees) < team.num_delegates: # print(team.name, team.preferences) for pref in", "all committees are filled. Inputs: teams, a list of Team objects from format_for_main", "team.num_delegates: for i, val in enumerate(team.assigned_committees): if committees[val].delegation_size == 1: index_to_drop = i", "Delegates'] + \\ [\"Preference {}\".format(str(i)) for i in range(len(comms))] for index, row in", "[] for team in teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for comm in committees:", "num): ''' Intersperses picks for small delegations. Takes a list of possible rounds", "val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name, c.name)) break else: continue else:", "schools at different times (ew) - Bias towards double delegate committees ''' class", "picks assigned at initialization (first round, fourth round, etc.), and it iterates through", "teams: s += i.num_delegates s2 = 0 for key in committees: s2 +=", "dict of committees (formatted the same) with assignments ''' for r in range(len(committees)):", "if team.num_dels_assigned > team.num_delegates: for i, val in enumerate(team.assigned_committees): if committees[val].delegation_size == 1:", "do schools at different times (ew) - Bias towards double delegate committees '''", "row['Delegation Size'] for index, row in schools.iterrows(): prefs = [j for j in", "simulates a draft. Each team got picks assigned at initialization (first round, fourth", "committees (name : Committee object) from format_for_main Outputs: teams, a list of Team", "team.preferences) for pref in team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c = committees[p] if len(c.assigned_schools)", "Size'] for index, row in schools.iterrows(): prefs = [j for j in row[2:]]", "index, row in schools.iterrows(): prefs = [j for j in row[2:]] for i", "a list of Team objects with assignments committees, a dict of committees (formatted", "for single, 2 for double, and so on assigned schools: the schools who", "assignments ''' for r in range(len(committees)): print(\"round {}\".format(r)) for team in teams: if", "Spots'], row['Delegation Size']) committees[row['Committee']] = comm max_at_conf += row['Delegation Size'] for index, row", "committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name, c.name)) break else: continue else: continue", "output(teams, committees): ''' Outputs the master documents. Inputs from assign ''' all_school_assignments =", "= Committee(row['Committee'], row['Number of Spots'], row['Delegation Size']) committees[row['Committee']] = comm max_at_conf += row['Delegation", "= [] for team in teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for comm in", "size: 1 for single, 2 for double, and so on assigned schools: the", "i.num_delegates s2 = 0 for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s", "committees and simulates a draft. Each team got picks assigned at initialization (first", "fair assigned committees will be the committees assigned to be outputted ''' self.name", "pandas as pd from math import ceil from sys import argv ''' Current", "value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name, c.name)) break", "= assign(teams, committees) output(teams, committees) s = 0 for i in teams: s", "teams, committees = assign(teams, committees) output(teams, committees) s = 0 for i in", "_get_picks(self, sequence, num): ''' Intersperses picks for small delegations. Takes a list of", "#no break so I can grab the last value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\", "all delegates are assigned or all committees are filled. Inputs: teams, a list", "in team.picks and len(team.assigned_committees) < team.num_delegates: # print(team.name, team.preferences) for pref in team.preferences:", "read_info outputs: teams, a list of Team objects committees, a dict mapping committee", ":(\") if __name__ == \"__main__\": try: go(argv[1], argv[2]) except: print(\"Something went wrong. Please", "maximum number of delegates that can be assigned to that committee delegation size:", "self.assigned_schools = [] def read_info(school_info_filename, committee_info_filename): ''' Takes the filepaths and returns the", "be assigned to that committee delegation size: 1 for single, 2 for double,", "i, val in enumerate(team.assigned_committees): if committees[val].delegation_size == 1: index_to_drop = i #no break", "http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = [] length = float(len(sequence)) for i in range(num): picks.append(sequence[int(ceil(i", "that committee delegation size: 1 for single, 2 for double, and so on", "algorithm! Draft-based assignment. Takes the teams' constraints/preferences and committees and simulates a draft.", "a dict mapping committee names to Committee objects ''' teams = [] committees", "range(num): picks.append(sequence[int(ceil(i * length / num))]) return picks class Committee: def __init__(self, name,", "thing. ''' schools, comms = read_info(school_filename, committee_filename) teams, committees = format_for_main(schools, comms) teams,", "assignment. Takes the teams' constraints/preferences and committees and simulates a draft. Each team", "* length / num))]) return picks class Committee: def __init__(self, name, num_spots, delegation_size):", "darn thing. ''' schools, comms = read_info(school_filename, committee_filename) teams, committees = format_for_main(schools, comms)", "for i, val in enumerate(team.assigned_committees): if committees[val].delegation_size == 1: index_to_drop = i #no", "''' Outputs the master documents. Inputs from assign ''' all_school_assignments = [] all_comm_assignments", "= i #no break so I can grab the last value c_to_drop =", "from math import ceil from sys import argv ''' Current known problems: -", "from sys import argv ''' Current known problems: - do schools at different", "row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): ''' Runs the whole darn thing. ''' schools, comms", "read_info(school_info_filename, committee_info_filename): ''' Takes the filepaths and returns the dataframes ''' schools =", "fills in the information from the dataframes inputs: schools, comms: pandas dataframes from", "= 0 comms.columns = ['Committee', 'Number of Spots', 'Delegation Size'] schools.columns = ['School',", "are filled. Inputs: teams, a list of Team objects from format_for_main committees, a", "\\ [\"Preference {}\".format(str(i)) for i in range(len(comms))] for index, row in comms.iterrows(): comm", "= comm max_at_conf += row['Delegation Size'] for index, row in schools.iterrows(): prefs =", "of the draft until either all delegates are assigned or all committees are", "committees, a dict of committees (name : Committee object) from format_for_main Outputs: teams,", "committees will be the committees assigned to be outputted ''' self.name = name", "for i in teams: s += i.num_delegates s2 = 0 for key in", "for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2: print(\"It worked!", "\\ - 1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if team.num_dels_assigned >", "picks.append(sequence[int(ceil(i * length / num))]) return picks class Committee: def __init__(self, name, num_spots,", "is correct and files are formatted correctly.\") print(\"Usage: python3 get_positions.py [school_info_filepath] [committee info", "in the information from the dataframes inputs: schools, comms: pandas dataframes from read_info", "a spot on the committee ''' self.name = name self.num_spots = num_spots self.delegation_size", "a list of picks that they get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks", "it iterates through each round of the draft until either all delegates are", "comms: pandas dataframes from read_info outputs: teams, a list of Team objects committees,", "team.preferences.pop(team.preferences.index(pref)) c = committees[p] if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates \\", "__init__(self, name, num_delegates, preferences): ''' num_delegats is an int of the total number", "1 for single, 2 for double, and so on assigned schools: the schools", "= delegation_size self.assigned_schools = [] def read_info(school_info_filename, committee_info_filename): ''' Takes the filepaths and", "the draft until either all delegates are assigned or all committees are filled.", "of the total number of delegates preferences is the ranked preferences as a", "for i in range(ceil(row['Number of Delegates'] / max_at_conf)): # handling more delegates requested", "try: go(argv[1], argv[2]) except: print(\"Something went wrong. Please make sure your usage is", "for small delegations. Takes a list of possible rounds the number of picks", "> max_at_conf: team = Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else: team = Team(row['School'], row['Number", "committees ''' class Team: def __init__(self, name, num_delegates, preferences): ''' num_delegats is an", "# handling more delegates requested # than there are committees. num_dels = row['Number", "committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School',", "j in row[2:]] for i in range(ceil(row['Number of Delegates'] / max_at_conf)): # handling", "draft fair assigned committees will be the committees assigned to be outputted '''", "are committees. num_dels = row['Number of Delegates'] - i * max_at_conf if num_dels", "= preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = [] self.num_dels_assigned = 0 def", "0 for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2: print(\"It", "who have a spot on the committee ''' self.name = name self.num_spots =", "teams, a list of Team objects committees, a dict mapping committee names to", "inplace = True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in schools_df.iterrows():", "list of Team objects from format_for_main committees, a dict of committees (name :", "= True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School']))", "etc.), and it iterates through each round of the draft until either all", "teams' constraints/preferences and committees and simulates a draft. Each team got picks assigned", "dataframes ''' schools = pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return schools, comms def format_for_main(schools,", "of delegates preferences is the ranked preferences as a list, in order (all", "make the draft fair assigned committees will be the committees assigned to be", "to that committee delegation size: 1 for single, 2 for double, and so", "last value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name, c.name))", "committees, a dict mapping committee names to Committee objects ''' teams = []", "so on assigned schools: the schools who have a spot on the committee", "have a spot on the committee ''' self.name = name self.num_spots = num_spots", "of Team objects committees, a dict mapping committee names to Committee objects '''", "be the committees assigned to be outputted ''' self.name = name self.num_delegates =", "committees assigned to be outputted ''' self.name = name self.num_delegates = num_delegates self.preferences", "committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2: print(\"It worked! :)\") else: print(\"There's", "list of Team objects with assignments committees, a dict of committees (formatted the", "round of the draft until either all delegates are assigned or all committees", "all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ +", ": Committee object) from format_for_main Outputs: teams, a list of Team objects with", "+ team.assigned_committees) for comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df", "the filepaths and returns the dataframes ''' schools = pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename)", "* max_at_conf if num_dels > max_at_conf: team = Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else:", "max_at_conf = 0 comms.columns = ['Committee', 'Number of Spots', 'Delegation Size'] schools.columns =", "and fills in the information from the dataframes inputs: schools, comms: pandas dataframes", "else: print(\"There's a bug. Bad computer. :(\") if __name__ == \"__main__\": try: go(argv[1],", "len(team.assigned_committees) < team.num_delegates: # print(team.name, team.preferences) for pref in team.preferences: p = team.preferences.pop(team.preferences.index(pref))", "object) from format_for_main Outputs: teams, a list of Team objects with assignments committees,", "+= i.num_delegates s2 = 0 for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if", "= self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = [] self.num_dels_assigned = 0 def _get_picks(self, sequence, num):", "picks that they get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = [] length", "delegates that can be assigned to that committee delegation size: 1 for single,", "{}\".format(r)) for team in teams: if r in team.picks and len(team.assigned_committees) < team.num_delegates:", "len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates \\ - 1 + c.delegation_size: c.assigned_schools.append(team.name)", "schools, comms = read_info(school_filename, committee_filename) teams, committees = format_for_main(schools, comms) teams, committees =", "in teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size]", "assigned to be outputted ''' self.name = name self.num_delegates = num_delegates self.preferences =", "delegation size: 1 for single, 2 for double, and so on assigned schools:", "schools = pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return schools, comms def format_for_main(schools, comms): '''", "overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = [] length = float(len(sequence)) for i in range(num):", "round, fourth round, etc.), and it iterates through each round of the draft", "delegates preferences is the ranked preferences as a list, in order (all committees", "for index, row in schools.iterrows(): prefs = [j for j in row[2:]] for", "of Delegates'] + \\ [\"Preference {}\".format(str(i)) for i in range(len(comms))] for index, row", "returns a list of picks that they get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n '''", "an int of the total number of delegates preferences is the ranked preferences", "['School', 'Number of Delegates'] + \\ [\"Preference {}\".format(str(i)) for i in range(len(comms))] for", "print(\"round {}\".format(r)) for team in teams: if r in team.picks and len(team.assigned_committees) <", "committee names to Committee objects ''' teams = [] committees = {} max_at_conf", "< team.num_delegates: # print(team.name, team.preferences) for pref in team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c", "self.num_dels_assigned = 0 def _get_picks(self, sequence, num): ''' Intersperses picks for small delegations.", "in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns =", "list of picks that they get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks =", "the number of picks and returns a list of picks that they get.", "number of delegates preferences is the ranked preferences as a list, in order", "self.assigned_committees = [] self.num_dels_assigned = 0 def _get_picks(self, sequence, num): ''' Intersperses picks", "the same) with assignments ''' for r in range(len(committees)): print(\"round {}\".format(r)) for team", "from the dataframes inputs: schools, comms: pandas dataframes from read_info outputs: teams, a", "committee_filename): ''' Runs the whole darn thing. ''' schools, comms = read_info(school_filename, committee_filename)", "read_info(school_filename, committee_filename) teams, committees = format_for_main(schools, comms) teams, committees = assign(teams, committees) output(teams,", "single, 2 for double, and so on assigned schools: the schools who have", "make sure your usage is correct and files are formatted correctly.\") print(\"Usage: python3", "length / num))]) return picks class Committee: def __init__(self, name, num_spots, delegation_size): '''", "picks is the picks we assign to make the draft fair assigned committees", "team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name, c.name)) break else: continue else: continue return teams,", "= {0:'School', 1:'Number of Delegates'}, inplace = True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\")", "from format_for_main Outputs: teams, a list of Team objects with assignments committees, a", "the whole darn thing. ''' schools, comms = read_info(school_filename, committee_filename) teams, committees =", "''' for r in range(len(committees)): print(\"round {}\".format(r)) for team in teams: if r", "preferences as a list, in order (all committees must be present) picks is", "the dataframes ''' schools = pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return schools, comms def", "range(ceil(row['Number of Delegates'] / max_at_conf)): # handling more delegates requested # than there", "teams: if r in team.picks and len(team.assigned_committees) < team.num_delegates: # print(team.name, team.preferences) for", "committees. num_dels = row['Number of Delegates'] - i * max_at_conf if num_dels >", "committees (formatted the same) with assignments ''' for r in range(len(committees)): print(\"round {}\".format(r))", "for i in range(num): picks.append(sequence[int(ceil(i * length / num))]) return picks class Committee:", "a bug. Bad computer. :(\") if __name__ == \"__main__\": try: go(argv[1], argv[2]) except:", "print(\"It worked! :)\") else: print(\"There's a bug. Bad computer. :(\") if __name__ ==", "s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2: print(\"It worked! :)\") else: print(\"There's a", "Inputs from assign ''' all_school_assignments = [] all_comm_assignments = [] for team in", "pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return schools, comms def format_for_main(schools, comms): ''' Creates all", "Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = [] length = float(len(sequence)) for i", "team.assigned_committees) for comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df =", "of Delegates'}, inplace = True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row", "schools, comms def format_for_main(schools, comms): ''' Creates all the objects and fills in", "delegates are assigned or all committees are filled. Inputs: teams, a list of", "Size']) committees[row['Committee']] = comm max_at_conf += row['Delegation Size'] for index, row in schools.iterrows():", "i * max_at_conf if num_dels > max_at_conf: team = Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team)", "continue return teams, committees def output(teams, committees): ''' Outputs the master documents. Inputs", "preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = [] self.num_dels_assigned = 0 def _get_picks(self,", "row['Number of Delegates'] - i * max_at_conf if num_dels > max_at_conf: team =", "in schools.iterrows(): prefs = [j for j in row[2:]] for i in range(ceil(row['Number", "print(\"assigned {} to {}\".format(team.name, c.name)) break else: continue else: continue return teams, committees", "same) with assignments ''' for r in range(len(committees)): print(\"round {}\".format(r)) for team in", "objects from format_for_main committees, a dict of committees (name : Committee object) from", "total number of delegates preferences is the ranked preferences as a list, in", "committee ''' self.name = name self.num_spots = num_spots self.delegation_size = delegation_size self.assigned_schools =", "teams.append(team) return teams, committees def assign(teams, committees): ''' My algorithm! Draft-based assignment. Takes", "True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def", "Team(row['School'], row['Number of Delegates'], prefs) teams.append(team) return teams, committees def assign(teams, committees): '''", "return picks class Committee: def __init__(self, name, num_spots, delegation_size): ''' name: name of", "Intersperses picks for small delegations. Takes a list of possible rounds the number", "comms def format_for_main(schools, comms): ''' Creates all the objects and fills in the", "comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns", "team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c = committees[p] if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned", "all_school_assignments = [] all_comm_assignments = [] for team in teams: all_school_assignments.append([team.name, team.num_delegates] +", "worked! :)\") else: print(\"There's a bug. Bad computer. :(\") if __name__ == \"__main__\":", "a list of possible rounds the number of picks and returns a list", "round, etc.), and it iterates through each round of the draft until either", "argv[2]) except: print(\"Something went wrong. Please make sure your usage is correct and", "committees = format_for_main(schools, comms) teams, committees = assign(teams, committees) output(teams, committees) s =", "''' all_school_assignments = [] all_comm_assignments = [] for team in teams: all_school_assignments.append([team.name, team.num_delegates]", "< c.num_spots and team.num_dels_assigned < team.num_delegates \\ - 1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name)", "__init__(self, name, num_spots, delegation_size): ''' name: name of committee num_spots: maximum number of", "committees must be present) picks is the picks we assign to make the", "/ num))]) return picks class Committee: def __init__(self, name, num_spots, delegation_size): ''' name:", "['Committee', 'Number of Spots', 'Delegation Size'] schools.columns = ['School', 'Number of Delegates'] +", "(ew) - Bias towards double delegate committees ''' class Team: def __init__(self, name,", "the objects and fills in the information from the dataframes inputs: schools, comms:", "team got picks assigned at initialization (first round, fourth round, etc.), and it", "schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace = True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv')", "= pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename):", "Outputs the master documents. Inputs from assign ''' all_school_assignments = [] all_comm_assignments =", "for index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): ''' Runs the whole", "on assigned schools: the schools who have a spot on the committee '''", "self.delegation_size = delegation_size self.assigned_schools = [] def read_info(school_info_filename, committee_info_filename): ''' Takes the filepaths", "def format_for_main(schools, comms): ''' Creates all the objects and fills in the information", "committees def assign(teams, committees): ''' My algorithm! Draft-based assignment. Takes the teams' constraints/preferences", "for r in range(len(committees)): print(\"round {}\".format(r)) for team in teams: if r in", "and team.num_dels_assigned < team.num_delegates \\ - 1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned +=", "import argv ''' Current known problems: - do schools at different times (ew)", "Creates all the objects and fills in the information from the dataframes inputs:", "draft. Each team got picks assigned at initialization (first round, fourth round, etc.),", "Committee objects ''' teams = [] committees = {} max_at_conf = 0 comms.columns", "class Team: def __init__(self, name, num_delegates, preferences): ''' num_delegats is an int of", "so I can grab the last value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop)", "''' schools = pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return schools, comms def format_for_main(schools, comms):", "def __init__(self, name, num_spots, delegation_size): ''' name: name of committee num_spots: maximum number", "float(len(sequence)) for i in range(num): picks.append(sequence[int(ceil(i * length / num))]) return picks class", "break so I can grab the last value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name))", "[] committees = {} max_at_conf = 0 comms.columns = ['Committee', 'Number of Spots',", "s += i.num_delegates s2 = 0 for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size", "(formatted the same) with assignments ''' for r in range(len(committees)): print(\"round {}\".format(r)) for", "delegations. Takes a list of possible rounds the number of picks and returns", "prefs) teams.append(team) return teams, committees def assign(teams, committees): ''' My algorithm! Draft-based assignment.", "wrong. Please make sure your usage is correct and files are formatted correctly.\")", "assigned schools: the schools who have a spot on the committee ''' self.name", "teams.append(team) else: team = Team(row['School'], row['Number of Delegates'], prefs) teams.append(team) return teams, committees", "can grab the last value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {}", "requested # than there are committees. num_dels = row['Number of Delegates'] - i", "with assignments ''' for r in range(len(committees)): print(\"round {}\".format(r)) for team in teams:", "schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace = True) comm_df", "comms) teams, committees = assign(teams, committees) output(teams, committees) s = 0 for i", "committees) output(teams, committees) s = 0 for i in teams: s += i.num_delegates", "c.delegation_size if team.num_dels_assigned > team.num_delegates: for i, val in enumerate(team.assigned_committees): if committees[val].delegation_size ==", "must be present) picks is the picks we assign to make the draft", "comms.iterrows(): comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation Size']) committees[row['Committee']] = comm max_at_conf", "the picks we assign to make the draft fair assigned committees will be", "teams, a list of Team objects with assignments committees, a dict of committees", "> team.num_delegates: for i, val in enumerate(team.assigned_committees): if committees[val].delegation_size == 1: index_to_drop =", "{} max_at_conf = 0 comms.columns = ['Committee', 'Number of Spots', 'Delegation Size'] schools.columns", "of Spots', 'Delegation Size'] schools.columns = ['School', 'Number of Delegates'] + \\ [\"Preference", "pandas dataframes from read_info outputs: teams, a list of Team objects committees, a", "- do schools at different times (ew) - Bias towards double delegate committees", "got picks assigned at initialization (first round, fourth round, etc.), and it iterates", "name: name of committee num_spots: maximum number of delegates that can be assigned", "= ['Committee', 'Number of Spots', 'Delegation Size'] schools.columns = ['School', 'Number of Delegates']", "get_positions.py import pandas as pd from math import ceil from sys import argv", "''' class Team: def __init__(self, name, num_delegates, preferences): ''' num_delegats is an int", "def _get_picks(self, sequence, num): ''' Intersperses picks for small delegations. Takes a list", "team.num_delegates] + team.assigned_committees) for comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools)", "= name self.num_delegates = num_delegates self.preferences = preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees", "team.num_dels_assigned > team.num_delegates: for i, val in enumerate(team.assigned_committees): if committees[val].delegation_size == 1: index_to_drop", "filepaths and returns the dataframes ''' schools = pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return", "in teams: s += i.num_delegates s2 = 0 for key in committees: s2", "your usage is correct and files are formatted correctly.\") print(\"Usage: python3 get_positions.py [school_info_filepath]", "Each team got picks assigned at initialization (first round, fourth round, etc.), and", "or all committees are filled. Inputs: teams, a list of Team objects from", "and returns the dataframes ''' schools = pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return schools,", "at different times (ew) - Bias towards double delegate committees ''' class Team:", "bug. Bad computer. :(\") if __name__ == \"__main__\": try: go(argv[1], argv[2]) except: print(\"Something", "Current known problems: - do schools at different times (ew) - Bias towards", "int of the total number of delegates preferences is the ranked preferences as", "times (ew) - Bias towards double delegate committees ''' class Team: def __init__(self,", "s = 0 for i in teams: s += i.num_delegates s2 = 0", "# get_positions.py import pandas as pd from math import ceil from sys import", "there are committees. num_dels = row['Number of Delegates'] - i * max_at_conf if", "= pd.read_csv(committee_info_filename) return schools, comms def format_for_main(schools, comms): ''' Creates all the objects", "in range(num): picks.append(sequence[int(ceil(i * length / num))]) return picks class Committee: def __init__(self,", "'Number of Spots', 'Delegation Size'] schools.columns = ['School', 'Number of Delegates'] + \\", "''' My algorithm! Draft-based assignment. Takes the teams' constraints/preferences and committees and simulates", "outputted ''' self.name = name self.num_delegates = num_delegates self.preferences = preferences self.picks =", "self.name = name self.num_spots = num_spots self.delegation_size = delegation_size self.assigned_schools = [] def", "= 0 for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2:", "committees = {} max_at_conf = 0 comms.columns = ['Committee', 'Number of Spots', 'Delegation", "iterates through each round of the draft until either all delegates are assigned", "inputs: schools, comms: pandas dataframes from read_info outputs: teams, a list of Team", "of committees (name : Committee object) from format_for_main Outputs: teams, a list of", "Team objects with assignments committees, a dict of committees (formatted the same) with", "of Delegates'] / max_at_conf)): # handling more delegates requested # than there are", "each round of the draft until either all delegates are assigned or all", "committees[p] if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates \\ - 1 +", "+= c.delegation_size if team.num_dels_assigned > team.num_delegates: for i, val in enumerate(team.assigned_committees): if committees[val].delegation_size", "def output(teams, committees): ''' Outputs the master documents. Inputs from assign ''' all_school_assignments", "num_delegates, preferences): ''' num_delegats is an int of the total number of delegates", "num_dels = row['Number of Delegates'] - i * max_at_conf if num_dels > max_at_conf:", "and committees and simulates a draft. Each team got picks assigned at initialization", "- Bias towards double delegate committees ''' class Team: def __init__(self, name, num_delegates,", "max_at_conf += row['Delegation Size'] for index, row in schools.iterrows(): prefs = [j for", "picks we assign to make the draft fair assigned committees will be the", "= [] all_comm_assignments = [] for team in teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees)", "of picks that they get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = []", "range(len(comms))] for index, row in comms.iterrows(): comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation", "team in teams: if r in team.picks and len(team.assigned_committees) < team.num_delegates: # print(team.name,", "in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): ''' Runs the whole darn thing. '''", "else: continue else: continue return teams, committees def output(teams, committees): ''' Outputs the", "the committee ''' self.name = name self.num_spots = num_spots self.delegation_size = delegation_size self.assigned_schools", "max_at_conf)): # handling more delegates requested # than there are committees. num_dels =", "[\"Preference {}\".format(str(i)) for i in range(len(comms))] for index, row in comms.iterrows(): comm =", "for j in row[2:]] for i in range(ceil(row['Number of Delegates'] / max_at_conf)): #", "= [] length = float(len(sequence)) for i in range(num): picks.append(sequence[int(ceil(i * length /", "return teams, committees def output(teams, committees): ''' Outputs the master documents. Inputs from", "+= len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2: print(\"It worked! :)\") else: print(\"There's a bug.", "= num_spots self.delegation_size = delegation_size self.assigned_schools = [] def read_info(school_info_filename, committee_info_filename): ''' Takes", "0 for i in teams: s += i.num_delegates s2 = 0 for key", "if __name__ == \"__main__\": try: go(argv[1], argv[2]) except: print(\"Something went wrong. Please make", "get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = [] length = float(len(sequence)) for", "comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): ''' Runs the", "print(team.name, team.preferences) for pref in team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c = committees[p] if", "whole darn thing. ''' schools, comms = read_info(school_filename, committee_filename) teams, committees = format_for_main(schools,", "a dict of committees (name : Committee object) from format_for_main Outputs: teams, a", "number of delegates that can be assigned to that committee delegation size: 1", "committees def output(teams, committees): ''' Outputs the master documents. Inputs from assign '''", "for double, and so on assigned schools: the schools who have a spot", "+ committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace =", "output(teams, committees) s = 0 for i in teams: s += i.num_delegates s2", "else: continue return teams, committees def output(teams, committees): ''' Outputs the master documents.", "be outputted ''' self.name = name self.num_delegates = num_delegates self.preferences = preferences self.picks", "row in schools.iterrows(): prefs = [j for j in row[2:]] for i in", "at initialization (first round, fourth round, etc.), and it iterates through each round", "grab the last value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to", "= [] self.num_dels_assigned = 0 def _get_picks(self, sequence, num): ''' Intersperses picks for", "num_spots: maximum number of delegates that can be assigned to that committee delegation", "correct and files are formatted correctly.\") print(\"Usage: python3 get_positions.py [school_info_filepath] [committee info filepath]\")", "s2 = 0 for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s ==", "handling more delegates requested # than there are committees. num_dels = row['Number of", "objects with assignments committees, a dict of committees (formatted the same) with assignments", "and so on assigned schools: the schools who have a spot on the", "if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates \\ - 1 + c.delegation_size:", "of committee num_spots: maximum number of delegates that can be assigned to that", "pref in team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c = committees[p] if len(c.assigned_schools) < c.num_spots", "{0:'School', 1:'Number of Delegates'}, inplace = True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for", "list of Team objects committees, a dict mapping committee names to Committee objects", "c = committees[p] if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates \\ -", "I can grab the last value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned", "with assignments committees, a dict of committees (formatted the same) with assignments '''", "to be outputted ''' self.name = name self.num_delegates = num_delegates self.preferences = preferences", "1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if team.num_dels_assigned > team.num_delegates: for", "name self.num_spots = num_spots self.delegation_size = delegation_size self.assigned_schools = [] def read_info(school_info_filename, committee_info_filename):", "num_delegates self.preferences = preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = [] self.num_dels_assigned =", "dict mapping committee names to Committee objects ''' teams = [] committees =", "''' num_delegats is an int of the total number of delegates preferences is", "constraints/preferences and committees and simulates a draft. Each team got picks assigned at", "Team objects from format_for_main committees, a dict of committees (name : Committee object)", "self.name = name self.num_delegates = num_delegates self.preferences = preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates)", "and simulates a draft. Each team got picks assigned at initialization (first round,", "num))]) return picks class Committee: def __init__(self, name, num_spots, delegation_size): ''' name: name", "assign ''' all_school_assignments = [] all_comm_assignments = [] for team in teams: all_school_assignments.append([team.name,", "the dataframes inputs: schools, comms: pandas dataframes from read_info outputs: teams, a list", "class Committee: def __init__(self, name, num_spots, delegation_size): ''' name: name of committee num_spots:", "+= row['Delegation Size'] for index, row in schools.iterrows(): prefs = [j for j", "for team in teams: if r in team.picks and len(team.assigned_committees) < team.num_delegates: #", "comms = read_info(school_filename, committee_filename) teams, committees = format_for_main(schools, comms) teams, committees = assign(teams,", "Inputs: teams, a list of Team objects from format_for_main committees, a dict of", "- i * max_at_conf if num_dels > max_at_conf: team = Team(row['School']+str(i+2), max_at_conf, prefs)", "s2: print(\"It worked! :)\") else: print(\"There's a bug. Bad computer. :(\") if __name__", "committee_info_filename): ''' Takes the filepaths and returns the dataframes ''' schools = pd.read_csv(school_info_filename)", "= row['Number of Delegates'] - i * max_at_conf if num_dels > max_at_conf: team", "{} to {}\".format(team.name, c.name)) break else: continue else: continue return teams, committees def", "picks and returns a list of picks that they get. Thanks stack overflow!", "list of possible rounds the number of picks and returns a list of", "a draft. Each team got picks assigned at initialization (first round, fourth round,", "preferences): ''' num_delegats is an int of the total number of delegates preferences", "to {}\".format(team.name, c.name)) break else: continue else: continue return teams, committees def output(teams,", "{}\".format(team.name, c.name)) break else: continue else: continue return teams, committees def output(teams, committees):", "Takes a list of possible rounds the number of picks and returns a", "Runs the whole darn thing. ''' schools, comms = read_info(school_filename, committee_filename) teams, committees", "name of committee num_spots: maximum number of delegates that can be assigned to", "\\ + committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace", "sequence, num): ''' Intersperses picks for small delegations. Takes a list of possible", "known problems: - do schools at different times (ew) - Bias towards double", "'Number of Delegates'] + \\ [\"Preference {}\".format(str(i)) for i in range(len(comms))] for index,", "'Delegation Size'] schools.columns = ['School', 'Number of Delegates'] + \\ [\"Preference {}\".format(str(i)) for", "''' teams = [] committees = {} max_at_conf = 0 comms.columns = ['Committee',", "i #no break so I can grab the last value c_to_drop = val", "break else: continue else: continue return teams, committees def output(teams, committees): ''' Outputs", "assigned to that committee delegation size: 1 for single, 2 for double, and", "computer. :(\") if __name__ == \"__main__\": try: go(argv[1], argv[2]) except: print(\"Something went wrong.", "from format_for_main committees, a dict of committees (name : Committee object) from format_for_main", "in range(ceil(row['Number of Delegates'] / max_at_conf)): # handling more delegates requested # than", "in range(len(comms))] for index, row in comms.iterrows(): comm = Committee(row['Committee'], row['Number of Spots'],", "objects ''' teams = [] committees = {} max_at_conf = 0 comms.columns =", "to make the draft fair assigned committees will be the committees assigned to", "either all delegates are assigned or all committees are filled. Inputs: teams, a", "Bias towards double delegate committees ''' class Team: def __init__(self, name, num_delegates, preferences):", "Takes the teams' constraints/preferences and committees and simulates a draft. Each team got", "for i in range(len(comms))] for index, row in comms.iterrows(): comm = Committee(row['Committee'], row['Number", "ranked preferences as a list, in order (all committees must be present) picks", "Committee object) from format_for_main Outputs: teams, a list of Team objects with assignments", "Committee(row['Committee'], row['Number of Spots'], row['Delegation Size']) committees[row['Committee']] = comm max_at_conf += row['Delegation Size']", "comms.columns = ['Committee', 'Number of Spots', 'Delegation Size'] schools.columns = ['School', 'Number of", "Outputs: teams, a list of Team objects with assignments committees, a dict of", "Spots', 'Delegation Size'] schools.columns = ['School', 'Number of Delegates'] + \\ [\"Preference {}\".format(str(i))", "prefs = [j for j in row[2:]] for i in range(ceil(row['Number of Delegates']", "else: team = Team(row['School'], row['Number of Delegates'], prefs) teams.append(team) return teams, committees def", "team in teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots,", "committees = assign(teams, committees) output(teams, committees) s = 0 for i in teams:", "committees, a dict of committees (formatted the same) with assignments ''' for r", "assigned or all committees are filled. Inputs: teams, a list of Team objects", "__name__ == \"__main__\": try: go(argv[1], argv[2]) except: print(\"Something went wrong. Please make sure", "as pd from math import ceil from sys import argv ''' Current known", "Draft-based assignment. Takes the teams' constraints/preferences and committees and simulates a draft. Each", "format_for_main(schools, comms): ''' Creates all the objects and fills in the information from", "= float(len(sequence)) for i in range(num): picks.append(sequence[int(ceil(i * length / num))]) return picks", "row['Number of Delegates'], prefs) teams.append(team) return teams, committees def assign(teams, committees): ''' My", "assigned at initialization (first round, fourth round, etc.), and it iterates through each", "teams, a list of Team objects from format_for_main committees, a dict of committees", "''' Creates all the objects and fills in the information from the dataframes", "we assign to make the draft fair assigned committees will be the committees", "in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2: print(\"It worked! :)\") else:", "prefs) teams.append(team) else: team = Team(row['School'], row['Number of Delegates'], prefs) teams.append(team) return teams,", "committees[val].delegation_size == 1: index_to_drop = i #no break so I can grab the", "schools.columns = ['School', 'Number of Delegates'] + \\ [\"Preference {}\".format(str(i)) for i in", "all_comm_assignments = [] for team in teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for comm", "== \"__main__\": try: go(argv[1], argv[2]) except: print(\"Something went wrong. Please make sure your", "def assign(teams, committees): ''' My algorithm! Draft-based assignment. Takes the teams' constraints/preferences and", "present) picks is the picks we assign to make the draft fair assigned", "= [j for j in row[2:]] for i in range(ceil(row['Number of Delegates'] /", "max_at_conf if num_dels > max_at_conf: team = Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else: team", "of delegates that can be assigned to that committee delegation size: 1 for", "index, row in comms.iterrows(): comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation Size']) committees[row['Committee']]", "committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number of Delegates'},", "max_at_conf: team = Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else: team = Team(row['School'], row['Number of", "c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name, c.name)) break else:", "== s2: print(\"It worked! :)\") else: print(\"There's a bug. Bad computer. :(\") if", "\"__main__\": try: go(argv[1], argv[2]) except: print(\"Something went wrong. Please make sure your usage", "length = float(len(sequence)) for i in range(num): picks.append(sequence[int(ceil(i * length / num))]) return", "committee num_spots: maximum number of delegates that can be assigned to that committee", "i in range(len(comms))] for index, row in comms.iterrows(): comm = Committee(row['Committee'], row['Number of", "picks for small delegations. Takes a list of possible rounds the number of", "the information from the dataframes inputs: schools, comms: pandas dataframes from read_info outputs:", "for pref in team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c = committees[p] if len(c.assigned_schools) <", "preferences is the ranked preferences as a list, in order (all committees must", "to Committee objects ''' teams = [] committees = {} max_at_conf = 0", "team.num_delegates \\ - 1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if team.num_dels_assigned", "go(school_filename, committee_filename): ''' Runs the whole darn thing. ''' schools, comms = read_info(school_filename,", "# than there are committees. num_dels = row['Number of Delegates'] - i *", "on the committee ''' self.name = name self.num_spots = num_spots self.delegation_size = delegation_size", "self.num_delegates = num_delegates self.preferences = preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = []", "def go(school_filename, committee_filename): ''' Runs the whole darn thing. ''' schools, comms =", "''' Intersperses picks for small delegations. Takes a list of possible rounds the", "[] all_comm_assignments = [] for team in teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for", "a list of Team objects committees, a dict mapping committee names to Committee", "format_for_main committees, a dict of committees (name : Committee object) from format_for_main Outputs:", "''' Takes the filepaths and returns the dataframes ''' schools = pd.read_csv(school_info_filename) comms", "num_spots self.delegation_size = delegation_size self.assigned_schools = [] def read_info(school_info_filename, committee_info_filename): ''' Takes the", "self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = [] self.num_dels_assigned = 0 def _get_picks(self, sequence,", "team = Team(row['School'], row['Number of Delegates'], prefs) teams.append(team) return teams, committees def assign(teams,", "of Team objects with assignments committees, a dict of committees (formatted the same)", "the last value c_to_drop = val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name,", "if s == s2: print(\"It worked! :)\") else: print(\"There's a bug. Bad computer.", "towards double delegate committees ''' class Team: def __init__(self, name, num_delegates, preferences): '''", "pd.read_csv(committee_info_filename) return schools, comms def format_for_main(schools, comms): ''' Creates all the objects and", "are assigned or all committees are filled. Inputs: teams, a list of Team", "[] self.num_dels_assigned = 0 def _get_picks(self, sequence, num): ''' Intersperses picks for small", "delegation_size): ''' name: name of committee num_spots: maximum number of delegates that can", "committees) s = 0 for i in teams: s += i.num_delegates s2 =", "i in teams: s += i.num_delegates s2 = 0 for key in committees:", "[j for j in row[2:]] for i in range(ceil(row['Number of Delegates'] / max_at_conf)):", "the ranked preferences as a list, in order (all committees must be present)", "can be assigned to that committee delegation size: 1 for single, 2 for", "Takes the filepaths and returns the dataframes ''' schools = pd.read_csv(school_info_filename) comms =", "teams, committees def assign(teams, committees): ''' My algorithm! Draft-based assignment. Takes the teams'", "team.num_delegates: # print(team.name, team.preferences) for pref in team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c =", "2 for double, and so on assigned schools: the schools who have a", "in range(len(committees)): print(\"round {}\".format(r)) for team in teams: if r in team.picks and", "that can be assigned to that committee delegation size: 1 for single, 2", "committees[comm].num_spots, committees[comm].delegation_size] \\ + committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number of", "draft until either all delegates are assigned or all committees are filled. Inputs:", "the master documents. Inputs from assign ''' all_school_assignments = [] all_comm_assignments = []", "problems: - do schools at different times (ew) - Bias towards double delegate", "if num_dels > max_at_conf: team = Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else: team =", "import ceil from sys import argv ''' Current known problems: - do schools", "comms = pd.read_csv(committee_info_filename) return schools, comms def format_for_main(schools, comms): ''' Creates all the", "i in range(num): picks.append(sequence[int(ceil(i * length / num))]) return picks class Committee: def", "math import ceil from sys import argv ''' Current known problems: - do", "= val committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\\ .assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name, c.name)) break else: continue", "+ \\ [\"Preference {}\".format(str(i)) for i in range(len(comms))] for index, row in comms.iterrows():", "information from the dataframes inputs: schools, comms: pandas dataframes from read_info outputs: teams,", "schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): ''' Runs the whole darn thing. ''' schools,", "row[2:]] for i in range(ceil(row['Number of Delegates'] / max_at_conf)): # handling more delegates", "p = team.preferences.pop(team.preferences.index(pref)) c = committees[p] if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned <", "assignments committees, a dict of committees (formatted the same) with assignments ''' for", "will be the committees assigned to be outputted ''' self.name = name self.num_delegates", "num_delegats is an int of the total number of delegates preferences is the", "they get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = [] length = float(len(sequence))", "committees[row['Committee']] = comm max_at_conf += row['Delegation Size'] for index, row in schools.iterrows(): prefs", "is the picks we assign to make the draft fair assigned committees will", "small delegations. Takes a list of possible rounds the number of picks and", "= {} max_at_conf = 0 comms.columns = ['Committee', 'Number of Spots', 'Delegation Size']", "import pandas as pd from math import ceil from sys import argv '''", "row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): ''' Runs the whole darn thing.", "ceil from sys import argv ''' Current known problems: - do schools at", "than there are committees. num_dels = row['Number of Delegates'] - i * max_at_conf", "format_for_main(schools, comms) teams, committees = assign(teams, committees) output(teams, committees) s = 0 for", "from read_info outputs: teams, a list of Team objects committees, a dict mapping", "argv ''' Current known problems: - do schools at different times (ew) -", "teams, committees = format_for_main(schools, comms) teams, committees = assign(teams, committees) output(teams, committees) s", "team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if team.num_dels_assigned > team.num_delegates: for i, val in enumerate(team.assigned_committees):", "names to Committee objects ''' teams = [] committees = {} max_at_conf =", ":)\") else: print(\"There's a bug. Bad computer. :(\") if __name__ == \"__main__\": try:", "a dict of committees (formatted the same) with assignments ''' for r in", "teams, committees def output(teams, committees): ''' Outputs the master documents. Inputs from assign", "row['Delegation Size']) committees[row['Committee']] = comm max_at_conf += row['Delegation Size'] for index, row in", "comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation Size']) committees[row['Committee']] = comm max_at_conf +=", "pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace = True) comm_df = pd.DataFrame(all_comm_assignments)", "index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): ''' Runs the whole darn", "objects committees, a dict mapping committee names to Committee objects ''' teams =", "format_for_main Outputs: teams, a list of Team objects with assignments committees, a dict", "(name : Committee object) from format_for_main Outputs: teams, a list of Team objects", "# print(team.name, team.preferences) for pref in team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c = committees[p]", "spot on the committee ''' self.name = name self.num_spots = num_spots self.delegation_size =", "= Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else: team = Team(row['School'], row['Number of Delegates'], prefs)", "pd from math import ceil from sys import argv ''' Current known problems:", "objects and fills in the information from the dataframes inputs: schools, comms: pandas", "Team objects committees, a dict mapping committee names to Committee objects ''' teams", "committees): ''' Outputs the master documents. Inputs from assign ''' all_school_assignments = []", "rounds the number of picks and returns a list of picks that they", "(all committees must be present) picks is the picks we assign to make", "c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if team.num_dels_assigned > team.num_delegates: for i, val", "from assign ''' all_school_assignments = [] all_comm_assignments = [] for team in teams:", "that they get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n ''' picks = [] length =", "c.num_spots and team.num_dels_assigned < team.num_delegates \\ - 1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned", "of Team objects from format_for_main committees, a dict of committees (name : Committee", "schools, comms: pandas dataframes from read_info outputs: teams, a list of Team objects", "- 1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if team.num_dels_assigned > team.num_delegates:", "Size'] schools.columns = ['School', 'Number of Delegates'] + \\ [\"Preference {}\".format(str(i)) for i", "= team.preferences.pop(team.preferences.index(pref)) c = committees[p] if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates", "go(argv[1], argv[2]) except: print(\"Something went wrong. Please make sure your usage is correct", "val in enumerate(team.assigned_committees): if committees[val].delegation_size == 1: index_to_drop = i #no break so", "delegation_size self.assigned_schools = [] def read_info(school_info_filename, committee_info_filename): ''' Takes the filepaths and returns", "''' self.name = name self.num_delegates = num_delegates self.preferences = preferences self.picks = self._get_picks(list(range(len(preferences))),", "= ['School', 'Number of Delegates'] + \\ [\"Preference {}\".format(str(i)) for i in range(len(comms))]", "the schools who have a spot on the committee ''' self.name = name", "assign(teams, committees) output(teams, committees) s = 0 for i in teams: s +=", "is an int of the total number of delegates preferences is the ranked", "filled. Inputs: teams, a list of Team objects from format_for_main committees, a dict", "print(\"There's a bug. Bad computer. :(\") if __name__ == \"__main__\": try: go(argv[1], argv[2])", "in team.preferences: p = team.preferences.pop(team.preferences.index(pref)) c = committees[p] if len(c.assigned_schools) < c.num_spots and", "if committees[val].delegation_size == 1: index_to_drop = i #no break so I can grab", "= 0 for i in teams: s += i.num_delegates s2 = 0 for", "= 0 def _get_picks(self, sequence, num): ''' Intersperses picks for small delegations. Takes", "committee_filename) teams, committees = format_for_main(schools, comms) teams, committees = assign(teams, committees) output(teams, committees)", "row in comms.iterrows(): comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation Size']) committees[row['Committee']] =", "master documents. Inputs from assign ''' all_school_assignments = [] all_comm_assignments = [] for", "as a list, in order (all committees must be present) picks is the", "range(len(committees)): print(\"round {}\".format(r)) for team in teams: if r in team.picks and len(team.assigned_committees)", "committee delegation size: 1 for single, 2 for double, and so on assigned", "assign(teams, committees): ''' My algorithm! Draft-based assignment. Takes the teams' constraints/preferences and committees", "different times (ew) - Bias towards double delegate committees ''' class Team: def", "outputs: teams, a list of Team objects committees, a dict mapping committee names", "of Delegates'] - i * max_at_conf if num_dels > max_at_conf: team = Team(row['School']+str(i+2),", "picks = [] length = float(len(sequence)) for i in range(num): picks.append(sequence[int(ceil(i * length", "pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): '''", "key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2: print(\"It worked! :)\")", "= pd.read_csv(school_info_filename) comms = pd.read_csv(committee_info_filename) return schools, comms def format_for_main(schools, comms): ''' Creates", "''' Runs the whole darn thing. ''' schools, comms = read_info(school_filename, committee_filename) teams,", "usage is correct and files are formatted correctly.\") print(\"Usage: python3 get_positions.py [school_info_filepath] [committee", "''' Current known problems: - do schools at different times (ew) - Bias", "return teams, committees def assign(teams, committees): ''' My algorithm! Draft-based assignment. Takes the", "more delegates requested # than there are committees. num_dels = row['Number of Delegates']", "for team in teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for comm in committees: all_comm_assignments.append([comm,", "a list of Team objects from format_for_main committees, a dict of committees (name", "initialization (first round, fourth round, etc.), and it iterates through each round of", "teams: all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees) for comm in committees: all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \\", "''' schools, comms = read_info(school_filename, committee_filename) teams, committees = format_for_main(schools, comms) teams, committees", "delegate committees ''' class Team: def __init__(self, name, num_delegates, preferences): ''' num_delegats is", "name self.num_delegates = num_delegates self.preferences = preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees =", "went wrong. Please make sure your usage is correct and files are formatted", "Team: def __init__(self, name, num_delegates, preferences): ''' num_delegats is an int of the", "the committees assigned to be outputted ''' self.name = name self.num_delegates = num_delegates", "self.num_spots = num_spots self.delegation_size = delegation_size self.assigned_schools = [] def read_info(school_info_filename, committee_info_filename): '''", "of Spots'], row['Delegation Size']) committees[row['Committee']] = comm max_at_conf += row['Delegation Size'] for index,", "Delegates'], prefs) teams.append(team) return teams, committees def assign(teams, committees): ''' My algorithm! Draft-based", "in enumerate(team.assigned_committees): if committees[val].delegation_size == 1: index_to_drop = i #no break so I", "sys import argv ''' Current known problems: - do schools at different times", "comms): ''' Creates all the objects and fills in the information from the", "in comms.iterrows(): comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation Size']) committees[row['Committee']] = comm", "schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename, committee_filename): ''' Runs", "double, and so on assigned schools: the schools who have a spot on", "sure your usage is correct and files are formatted correctly.\") print(\"Usage: python3 get_positions.py", "def read_info(school_info_filename, committee_info_filename): ''' Takes the filepaths and returns the dataframes ''' schools", "len(committees[key].assigned_schools)*committees[key].delegation_size if s == s2: print(\"It worked! :)\") else: print(\"There's a bug. Bad", "Delegates'] / max_at_conf)): # handling more delegates requested # than there are committees.", "1: index_to_drop = i #no break so I can grab the last value", "c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if team.num_dels_assigned > team.num_delegates: for i, val in", "double delegate committees ''' class Team: def __init__(self, name, num_delegates, preferences): ''' num_delegats", "fourth round, etc.), and it iterates through each round of the draft until", "team.picks and len(team.assigned_committees) < team.num_delegates: # print(team.name, team.preferences) for pref in team.preferences: p", "num_spots, delegation_size): ''' name: name of committee num_spots: maximum number of delegates that", "= num_delegates self.preferences = preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = [] self.num_dels_assigned", "comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in schools_df.iterrows(): row.to_csv(\"school_assignments/{}'s_assignments.csv\".format(row['School'])) def go(school_filename,", "My algorithm! Draft-based assignment. Takes the teams' constraints/preferences and committees and simulates a", "schools: the schools who have a spot on the committee ''' self.name =", "Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else: team = Team(row['School'], row['Number of Delegates'], prefs) teams.append(team)", "Committee: def __init__(self, name, num_spots, delegation_size): ''' name: name of committee num_spots: maximum", "of Delegates'], prefs) teams.append(team) return teams, committees def assign(teams, committees): ''' My algorithm!", "Please make sure your usage is correct and files are formatted correctly.\") print(\"Usage:", "and returns a list of picks that they get. Thanks stack overflow! http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n", "possible rounds the number of picks and returns a list of picks that", "team.num_dels_assigned += c.delegation_size if team.num_dels_assigned > team.num_delegates: for i, val in enumerate(team.assigned_committees): if", "dataframes inputs: schools, comms: pandas dataframes from read_info outputs: teams, a list of", "Delegates'}, inplace = True) comm_df = pd.DataFrame(all_comm_assignments) schools_df.to_csv('all_school_assignments.csv') comm_df.to_csv(\"all_committees_assignments.csv\") for index, row in", "be present) picks is the picks we assign to make the draft fair", "of committees (formatted the same) with assignments ''' for r in range(len(committees)): print(\"round", "r in range(len(committees)): print(\"round {}\".format(r)) for team in teams: if r in team.picks", "schools who have a spot on the committee ''' self.name = name self.num_spots", "num_dels > max_at_conf: team = Team(row['School']+str(i+2), max_at_conf, prefs) teams.append(team) else: team = Team(row['School'],", "the total number of delegates preferences is the ranked preferences as a list,", ".assigned_schools.index(team.name)) team.assigned_committees.pop(index_to_drop) print(\"assigned {} to {}\".format(team.name, c.name)) break else: continue else: continue return", "is the ranked preferences as a list, in order (all committees must be", "a list, in order (all committees must be present) picks is the picks", "''' name: name of committee num_spots: maximum number of delegates that can be", "''' self.name = name self.num_spots = num_spots self.delegation_size = delegation_size self.assigned_schools = []", "delegates requested # than there are committees. num_dels = row['Number of Delegates'] -", "name, num_spots, delegation_size): ''' name: name of committee num_spots: maximum number of delegates", "name, num_delegates, preferences): ''' num_delegats is an int of the total number of", "= [] def read_info(school_info_filename, committee_info_filename): ''' Takes the filepaths and returns the dataframes", "(first round, fourth round, etc.), and it iterates through each round of the", "dict of committees (name : Committee object) from format_for_main Outputs: teams, a list", "committees are filled. Inputs: teams, a list of Team objects from format_for_main committees,", "picks class Committee: def __init__(self, name, num_spots, delegation_size): ''' name: name of committee", "in row[2:]] for i in range(ceil(row['Number of Delegates'] / max_at_conf)): # handling more", "until either all delegates are assigned or all committees are filled. Inputs: teams,", "dataframes from read_info outputs: teams, a list of Team objects committees, a dict", "committees[comm].assigned_schools) schools_df = pd.DataFrame(all_school_assignments) schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace = True)", "num_delegates) self.assigned_committees = [] self.num_dels_assigned = 0 def _get_picks(self, sequence, num): ''' Intersperses", "= name self.num_spots = num_spots self.delegation_size = delegation_size self.assigned_schools = [] def read_info(school_info_filename,", "for index, row in comms.iterrows(): comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation Size'])", "+ c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size if team.num_dels_assigned > team.num_delegates: for i,", "= committees[p] if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates \\ - 1", "self.preferences = preferences self.picks = self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = [] self.num_dels_assigned = 0", "self._get_picks(list(range(len(preferences))), num_delegates) self.assigned_committees = [] self.num_dels_assigned = 0 def _get_picks(self, sequence, num): '''", "team.num_dels_assigned < team.num_delegates \\ - 1 + c.delegation_size: c.assigned_schools.append(team.name) team.assigned_committees.append(c.name) team.num_dels_assigned += c.delegation_size", "assigned committees will be the committees assigned to be outputted ''' self.name =", "/ max_at_conf)): # handling more delegates requested # than there are committees. num_dels" ]
[ "not begin with a number\") else: return False if re.search(r\"[^a-zA-Z_0-9]\", name) is not", "of equal sign is a valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls,", "parse_float: value = cls._attempt_parse_float(value) if parse_int and not isinstance(value, float): value = cls._attempt_parse_int(value)", "if not suppress_exceptions: raise ValueError(f\"Key or section name must only contain letters, numbers", "parse_bool=True, parse_float=True, parse_int=True): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index =", "\"[\" and line[-1] == \"]\" @staticmethod def _get_heading_from_line(line): return line[1:-1] @staticmethod def _clean_line(line_raw):", "try: return float(value) except ValueError: pass return value @staticmethod def _generate_file_line(key, value): return", "not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\") return line[:equal_index] @classmethod", "return line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \", \"=\")", "=\", \"=\") return line_cleaned @classmethod def _is_line_an_entry(cls, line): line = cls._clean_line(line) try: equal_index", "ValueError: pass return value @staticmethod def _attempt_parse_float(value): if isinstance(value, str): if value.count(\".\") >", "must only contain letters, numbers and underscores\") else: return False return True @staticmethod", "is not None: if not suppress_exceptions: raise ValueError(f\"Key or section name must only", "\"]\" @staticmethod def _get_heading_from_line(line): return line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned", "not suppress_exceptions: raise ValueError(f\"Key or section name must not be blank.\") else: return", "line = cls._clean_line(line) equal_index = line.index(\"=\") return line[:equal_index] @classmethod def _get_value_from_line(cls, line, parse_bool=True,", "False # check if line to left of equal sign is a valid", "= line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \", \"=\") line_cleaned = line_cleaned.replace(\" =\", \"=\") return", "suppress_exceptions: raise ValueError(f\"Key or section name must be a string, not None\") else:", "line_lower == \"true\": return True if line_lower == \"false\": return False return value", "isinstance(value, float): value = cls._attempt_parse_int(value) return value @staticmethod def _attempt_parse_bool(value): if isinstance(value, str):", "return value @staticmethod def _attempt_parse_int(value): if isinstance(value, str): if value.count(\".\") == 0: try:", "section name must not begin with '_'\") else: return False if name[0].isnumeric(): if", "def _is_line_a_heading(line): if len(line) <= 2: return False return line[0] == \"[\" and", "|,\", string) return items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name is None: if", "return False if name[0].isnumeric(): if not suppress_exceptions: raise ValueError(f\"Key or section name must", "suppress_exceptions: raise ValueError(f\"Key or section name must only contain letters, numbers and underscores\")", "= line_cleaned.replace(\"= \", \"=\") line_cleaned = line_cleaned.replace(\" =\", \"=\") return line_cleaned @classmethod def", "suppress_exceptions: raise ValueError(f\"Key or section name must not begin with '_'\") else: return", "def _get_keys(self): attrs = self.get_attributes() return attrs.keys() def get_attributes(self): attrs = self.__dict__ attrs_filtered", "value = cls._attempt_parse_int(value) return value @staticmethod def _attempt_parse_bool(value): if isinstance(value, str): line_lower =", "or section name must not begin with '_'\") else: return False if name[0].isnumeric():", "= [] if string is not None and len(string) > 0: items =", "False if len(name) == 0: if not suppress_exceptions: raise ValueError(f\"Key or section name", "if line_lower == \"false\": return False return value @staticmethod def _attempt_parse_int(value): if isinstance(value,", "must be a string, not None\") else: return False if not isinstance(name, str):", "blank.\") else: return False if name[0] == \"_\": if not suppress_exceptions: raise ValueError(f\"Key", "== \"false\": return False return value @staticmethod def _attempt_parse_int(value): if isinstance(value, str): if", "= re.split(r\", |,\", string) return items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name is", "float(value) except ValueError: pass return value @staticmethod def _generate_file_line(key, value): return f\"{key} =", "must be a string. {name} is type {type(name)}\") else: return False if len(name)", "return value @staticmethod def _attempt_parse_float(value): if isinstance(value, str): if value.count(\".\") > 0: try:", "def _attempt_parse_int(value): if isinstance(value, str): if value.count(\".\") == 0: try: return int(value) except", "try: equal_index = line.index(\"=\") except ValueError: return False # check if line to", "string. {name} is type {type(name)}\") else: return False if len(name) == 0: if", "False if re.search(r\"[^a-zA-Z_0-9]\", name) is not None: if not suppress_exceptions: raise ValueError(f\"Key or", "line): line = cls._clean_line(line) try: equal_index = line.index(\"=\") except ValueError: return False #", "return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line): if not cls._is_line_an_entry(line): return None line", "value @staticmethod def _attempt_parse_float(value): if isinstance(value, str): if value.count(\".\") > 0: try: return", "_is_line_an_entry(cls, line): line = cls._clean_line(line) try: equal_index = line.index(\"=\") except ValueError: return False", "@staticmethod def _attempt_parse_float(value): if isinstance(value, str): if value.count(\".\") > 0: try: return float(value)", "raise ValueError(f\"Key or section name must not begin with '_'\") else: return False", "if not suppress_exceptions: raise ValueError(f\"Key or section name must not begin with a", "cls._clean_line(line) equal_index = line.index(\"=\") return line[:equal_index] @classmethod def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True):", "return line[:equal_index] @classmethod def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True): if not cls._is_line_an_entry(line): return", "if isinstance(value, str): if value.count(\".\") > 0: try: return float(value) except ValueError: pass", "def _get_key_from_line(cls, line): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index =", "letters, numbers and underscores\") else: return False return True @staticmethod def _is_line_a_heading(line): if", "parse_bool: value = cls._attempt_parse_bool(value) if parse_float: value = cls._attempt_parse_float(value) if parse_int and not", "isinstance(value, str): line_lower = value.lower() if line_lower == \"true\": return True if line_lower", "raise ValueError(f\"Key or section name must be a string, not None\") else: return", "value.count(\".\") == 0: try: return int(value) except ValueError: pass return value @staticmethod def", "_get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line)", "{k: v for k, v in attrs.items() if not k.startswith(\"_\")} return attrs_filtered @staticmethod", "cls._attempt_parse_float(value) if parse_int and not isinstance(value, float): value = cls._attempt_parse_int(value) return value @staticmethod", "name[0] == \"_\": if not suppress_exceptions: raise ValueError(f\"Key or section name must not", "str): if value.count(\".\") == 0: try: return int(value) except ValueError: pass return value", "_get_heading_from_line(line): return line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \",", "cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\") return line[:equal_index] @classmethod def", "name must not be blank.\") else: return False if name[0] == \"_\": if", "def _is_line_an_entry(cls, line): line = cls._clean_line(line) try: equal_index = line.index(\"=\") except ValueError: return", "else: return False if len(name) == 0: if not suppress_exceptions: raise ValueError(f\"Key or", "is a valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line): if not", "attrs = self.__dict__ attrs_filtered = {k: v for k, v in attrs.items() if", "or section name must only contain letters, numbers and underscores\") else: return False", "type {type(name)}\") else: return False if len(name) == 0: if not suppress_exceptions: raise", "pass return value @staticmethod def _attempt_parse_float(value): if isinstance(value, str): if value.count(\".\") > 0:", "attrs_filtered = {k: v for k, v in attrs.items() if not k.startswith(\"_\")} return", "raise ValueError(f\"Key or section name must be a string. {name} is type {type(name)}\")", "re.search(r\"[^a-zA-Z_0-9]\", name) is not None: if not suppress_exceptions: raise ValueError(f\"Key or section name", "name must not begin with a number\") else: return False if re.search(r\"[^a-zA-Z_0-9]\", name)", "value @staticmethod def _attempt_parse_bool(value): if isinstance(value, str): line_lower = value.lower() if line_lower ==", "return True if line_lower == \"false\": return False return value @staticmethod def _attempt_parse_int(value):", "and len(string) > 0: items = re.split(r\", |,\", string) return items @staticmethod def", "section name must not begin with a number\") else: return False if re.search(r\"[^a-zA-Z_0-9]\",", "value @staticmethod def _attempt_parse_int(value): if isinstance(value, str): if value.count(\".\") == 0: try: return", "return items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name is None: if not suppress_exceptions:", "== \"_\": if not suppress_exceptions: raise ValueError(f\"Key or section name must not begin", "equal_index = line.index(\"=\") return line[:equal_index] @classmethod def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True): if", "if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\") value =", "if parse_int and not isinstance(value, float): value = cls._attempt_parse_int(value) return value @staticmethod def", "line[:equal_index] @classmethod def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True): if not cls._is_line_an_entry(line): return None", "not be blank.\") else: return False if name[0] == \"_\": if not suppress_exceptions:", "def get_attributes(self): attrs = self.__dict__ attrs_filtered = {k: v for k, v in", "0: try: return int(value) except ValueError: pass return value @staticmethod def _attempt_parse_float(value): if", "suppress_exceptions=False): if name is None: if not suppress_exceptions: raise ValueError(f\"Key or section name", "not isinstance(value, float): value = cls._attempt_parse_int(value) return value @staticmethod def _attempt_parse_bool(value): if isinstance(value,", "def convert_string_to_list(string): items = [] if string is not None and len(string) >", "attrs = self.get_attributes() return attrs.keys() def get_attributes(self): attrs = self.__dict__ attrs_filtered = {k:", "def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name is None: if not suppress_exceptions: raise ValueError(f\"Key or", "value = cls._attempt_parse_bool(value) if parse_float: value = cls._attempt_parse_float(value) if parse_int and not isinstance(value,", "False return value @staticmethod def _attempt_parse_int(value): if isinstance(value, str): if value.count(\".\") == 0:", "False return True @staticmethod def _is_line_a_heading(line): if len(line) <= 2: return False return", "else: return False if not isinstance(name, str): if not suppress_exceptions: raise ValueError(f\"Key or", "_attempt_parse_bool(value): if isinstance(value, str): line_lower = value.lower() if line_lower == \"true\": return True", "_clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \", \"=\") line_cleaned = line_cleaned.replace(\" =\",", "{type(name)}\") else: return False if len(name) == 0: if not suppress_exceptions: raise ValueError(f\"Key", "not suppress_exceptions: raise ValueError(f\"Key or section name must not begin with a number\")", "line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \", \"=\") line_cleaned = line_cleaned.replace(\" =\", \"=\") return line_cleaned", "ValueError(f\"Key or section name must be a string. {name} is type {type(name)}\") else:", "2: return False return line[0] == \"[\" and line[-1] == \"]\" @staticmethod def", "# check if line to left of equal sign is a valid key", "if parse_bool: value = cls._attempt_parse_bool(value) if parse_float: value = cls._attempt_parse_float(value) if parse_int and", "ValueError(f\"Key or section name must be a string, not None\") else: return False", "None line = cls._clean_line(line) equal_index = line.index(\"=\") return line[:equal_index] @classmethod def _get_value_from_line(cls, line,", "equal sign is a valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line):", "a string, not None\") else: return False if not isinstance(name, str): if not", "_get_key_from_line(cls, line): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\")", "@staticmethod def _get_heading_from_line(line): return line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned =", "= line_cleaned.replace(\" =\", \"=\") return line_cleaned @classmethod def _is_line_an_entry(cls, line): line = cls._clean_line(line)", "return None line = cls._clean_line(line) equal_index = line.index(\"=\") value = line[equal_index + 1:]", "ValueError(f\"Key or section name must not be blank.\") else: return False if name[0]", "line_lower == \"false\": return False return value @staticmethod def _attempt_parse_int(value): if isinstance(value, str):", "section name must not be blank.\") else: return False if name[0] == \"_\":", "isinstance(value, str): if value.count(\".\") == 0: try: return int(value) except ValueError: pass return", "@classmethod def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True): if not cls._is_line_an_entry(line): return None line", "return value @staticmethod def _attempt_parse_bool(value): if isinstance(value, str): line_lower = value.lower() if line_lower", "except ValueError: pass return value @staticmethod def _generate_file_line(key, value): return f\"{key} = {value}\\n\"", "suppress_exceptions: raise ValueError(f\"Key or section name must not be blank.\") else: return False", "if name[0].isnumeric(): if not suppress_exceptions: raise ValueError(f\"Key or section name must not begin", "return False if len(name) == 0: if not suppress_exceptions: raise ValueError(f\"Key or section", "try: return int(value) except ValueError: pass return value @staticmethod def _attempt_parse_float(value): if isinstance(value,", "return False if not isinstance(name, str): if not suppress_exceptions: raise ValueError(f\"Key or section", "except ValueError: pass return value @staticmethod def _attempt_parse_float(value): if isinstance(value, str): if value.count(\".\")", "BaseClass: def _get_keys(self): attrs = self.get_attributes() return attrs.keys() def get_attributes(self): attrs = self.__dict__", "@staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name is None: if not suppress_exceptions: raise ValueError(f\"Key", "= cls._attempt_parse_int(value) return value @staticmethod def _attempt_parse_bool(value): if isinstance(value, str): line_lower = value.lower()", "def _attempt_parse_bool(value): if isinstance(value, str): line_lower = value.lower() if line_lower == \"true\": return", "contain letters, numbers and underscores\") else: return False return True @staticmethod def _is_line_a_heading(line):", "or section name must be a string. {name} is type {type(name)}\") else: return", "import re class BaseClass: def _get_keys(self): attrs = self.get_attributes() return attrs.keys() def get_attributes(self):", "\"=\") line_cleaned = line_cleaned.replace(\" =\", \"=\") return line_cleaned @classmethod def _is_line_an_entry(cls, line): line", "self.__dict__ attrs_filtered = {k: v for k, v in attrs.items() if not k.startswith(\"_\")}", "check if line to left of equal sign is a valid key return", "\"=\") return line_cleaned @classmethod def _is_line_an_entry(cls, line): line = cls._clean_line(line) try: equal_index =", "ValueError(f\"Key or section name must not begin with '_'\") else: return False if", "_attempt_parse_float(value): if isinstance(value, str): if value.count(\".\") > 0: try: return float(value) except ValueError:", "self.get_attributes() return attrs.keys() def get_attributes(self): attrs = self.__dict__ attrs_filtered = {k: v for", "suppress_exceptions: raise ValueError(f\"Key or section name must not begin with a number\") else:", "with a number\") else: return False if re.search(r\"[^a-zA-Z_0-9]\", name) is not None: if", "if len(name) == 0: if not suppress_exceptions: raise ValueError(f\"Key or section name must", "not k.startswith(\"_\")} return attrs_filtered @staticmethod def convert_string_to_list(string): items = [] if string is", "a valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line): if not cls._is_line_an_entry(line):", "= line.index(\"=\") value = line[equal_index + 1:] if parse_bool: value = cls._attempt_parse_bool(value) if", "\"true\": return True if line_lower == \"false\": return False return value @staticmethod def", "items = [] if string is not None and len(string) > 0: items", "section name must only contain letters, numbers and underscores\") else: return False return", "line[-1] == \"]\" @staticmethod def _get_heading_from_line(line): return line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned =", "return None line = cls._clean_line(line) equal_index = line.index(\"=\") return line[:equal_index] @classmethod def _get_value_from_line(cls,", "@staticmethod def _attempt_parse_int(value): if isinstance(value, str): if value.count(\".\") == 0: try: return int(value)", "numbers and underscores\") else: return False return True @staticmethod def _is_line_a_heading(line): if len(line)", "line = cls._clean_line(line) try: equal_index = line.index(\"=\") except ValueError: return False # check", "return False return line[0] == \"[\" and line[-1] == \"]\" @staticmethod def _get_heading_from_line(line):", "line.index(\"=\") except ValueError: return False # check if line to left of equal", "line.index(\"=\") return line[:equal_index] @classmethod def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True): if not cls._is_line_an_entry(line):", "def _clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \", \"=\") line_cleaned = line_cleaned.replace(\"", "must not begin with a number\") else: return False if re.search(r\"[^a-zA-Z_0-9]\", name) is", "a number\") else: return False if re.search(r\"[^a-zA-Z_0-9]\", name) is not None: if not", "<= 2: return False return line[0] == \"[\" and line[-1] == \"]\" @staticmethod", "= cls._clean_line(line) equal_index = line.index(\"=\") return line[:equal_index] @classmethod def _get_value_from_line(cls, line, parse_bool=True, parse_float=True,", "or section name must not begin with a number\") else: return False if", "and underscores\") else: return False return True @staticmethod def _is_line_a_heading(line): if len(line) <=", "= line.index(\"=\") except ValueError: return False # check if line to left of", "== 0: try: return int(value) except ValueError: pass return value @staticmethod def _attempt_parse_float(value):", "if not suppress_exceptions: raise ValueError(f\"Key or section name must not be blank.\") else:", "value = cls._attempt_parse_float(value) if parse_int and not isinstance(value, float): value = cls._attempt_parse_int(value) return", "not isinstance(name, str): if not suppress_exceptions: raise ValueError(f\"Key or section name must be", "return False return value @staticmethod def _attempt_parse_int(value): if isinstance(value, str): if value.count(\".\") ==", "not suppress_exceptions: raise ValueError(f\"Key or section name must only contain letters, numbers and", "@staticmethod def _clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \", \"=\") line_cleaned =", "not suppress_exceptions: raise ValueError(f\"Key or section name must not begin with '_'\") else:", "None\") else: return False if not isinstance(name, str): if not suppress_exceptions: raise ValueError(f\"Key", "not None: if not suppress_exceptions: raise ValueError(f\"Key or section name must only contain", "line, parse_bool=True, parse_float=True, parse_int=True): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index", "sign is a valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line): if", "= cls._clean_line(line) try: equal_index = line.index(\"=\") except ValueError: return False # check if", "== \"]\" @staticmethod def _get_heading_from_line(line): return line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned = line_raw.rstrip()", "int(value) except ValueError: pass return value @staticmethod def _attempt_parse_float(value): if isinstance(value, str): if", "line to left of equal sign is a valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True)", "be blank.\") else: return False if name[0] == \"_\": if not suppress_exceptions: raise", "return attrs_filtered @staticmethod def convert_string_to_list(string): items = [] if string is not None", "cls._clean_line(line) equal_index = line.index(\"=\") value = line[equal_index + 1:] if parse_bool: value =", "is not None and len(string) > 0: items = re.split(r\", |,\", string) return", "isinstance(value, str): if value.count(\".\") > 0: try: return float(value) except ValueError: pass return", "if string is not None and len(string) > 0: items = re.split(r\", |,\",", "line_cleaned = line_cleaned.replace(\"= \", \"=\") line_cleaned = line_cleaned.replace(\" =\", \"=\") return line_cleaned @classmethod", "cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\") value = line[equal_index +", "begin with '_'\") else: return False if name[0].isnumeric(): if not suppress_exceptions: raise ValueError(f\"Key", "value.count(\".\") > 0: try: return float(value) except ValueError: pass return value @staticmethod def", "if re.search(r\"[^a-zA-Z_0-9]\", name) is not None: if not suppress_exceptions: raise ValueError(f\"Key or section", "not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\") value = line[equal_index", "valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line): if not cls._is_line_an_entry(line): return", "> 0: items = re.split(r\", |,\", string) return items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False):", "len(name) == 0: if not suppress_exceptions: raise ValueError(f\"Key or section name must not", "and not isinstance(value, float): value = cls._attempt_parse_int(value) return value @staticmethod def _attempt_parse_bool(value): if", "{name} is type {type(name)}\") else: return False if len(name) == 0: if not", "v in attrs.items() if not k.startswith(\"_\")} return attrs_filtered @staticmethod def convert_string_to_list(string): items =", "def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True): if not cls._is_line_an_entry(line): return None line =", "ValueError: return False # check if line to left of equal sign is", "= line.index(\"=\") return line[:equal_index] @classmethod def _get_value_from_line(cls, line, parse_bool=True, parse_float=True, parse_int=True): if not", "value.lower() if line_lower == \"true\": return True if line_lower == \"false\": return False", "k.startswith(\"_\")} return attrs_filtered @staticmethod def convert_string_to_list(string): items = [] if string is not", "False if name[0] == \"_\": if not suppress_exceptions: raise ValueError(f\"Key or section name", "= self.__dict__ attrs_filtered = {k: v for k, v in attrs.items() if not", "be a string, not None\") else: return False if not isinstance(name, str): if", "re.split(r\", |,\", string) return items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name is None:", "is type {type(name)}\") else: return False if len(name) == 0: if not suppress_exceptions:", "is None: if not suppress_exceptions: raise ValueError(f\"Key or section name must be a", "ValueError(f\"Key or section name must only contain letters, numbers and underscores\") else: return", "if not suppress_exceptions: raise ValueError(f\"Key or section name must be a string. {name}", "not begin with '_'\") else: return False if name[0].isnumeric(): if not suppress_exceptions: raise", "parse_float=True, parse_int=True): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\")", "== \"true\": return True if line_lower == \"false\": return False return value @staticmethod", "0: if not suppress_exceptions: raise ValueError(f\"Key or section name must not be blank.\")", "_is_key_or_section_name_valid(name, suppress_exceptions=False): if name is None: if not suppress_exceptions: raise ValueError(f\"Key or section", "return False # check if line to left of equal sign is a", "line_cleaned = line_cleaned.replace(\" =\", \"=\") return line_cleaned @classmethod def _is_line_an_entry(cls, line): line =", "return False return True @staticmethod def _is_line_a_heading(line): if len(line) <= 2: return False", "if line to left of equal sign is a valid key return cls._is_key_or_section_name_valid(line[:equal_index],", "= self.get_attributes() return attrs.keys() def get_attributes(self): attrs = self.__dict__ attrs_filtered = {k: v", "name must only contain letters, numbers and underscores\") else: return False return True", "if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\") return line[:equal_index]", "cls._attempt_parse_int(value) return value @staticmethod def _attempt_parse_bool(value): if isinstance(value, str): line_lower = value.lower() if", "= cls._clean_line(line) equal_index = line.index(\"=\") value = line[equal_index + 1:] if parse_bool: value", "name is None: if not suppress_exceptions: raise ValueError(f\"Key or section name must be", "line_cleaned.replace(\" =\", \"=\") return line_cleaned @classmethod def _is_line_an_entry(cls, line): line = cls._clean_line(line) try:", "name) is not None: if not suppress_exceptions: raise ValueError(f\"Key or section name must", "raise ValueError(f\"Key or section name must not be blank.\") else: return False if", "\"_\": if not suppress_exceptions: raise ValueError(f\"Key or section name must not begin with", "line_cleaned.replace(\"= \", \"=\") line_cleaned = line_cleaned.replace(\" =\", \"=\") return line_cleaned @classmethod def _is_line_an_entry(cls,", "line_cleaned = line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \", \"=\") line_cleaned = line_cleaned.replace(\" =\", \"=\")", "if not suppress_exceptions: raise ValueError(f\"Key or section name must be a string, not", "None: if not suppress_exceptions: raise ValueError(f\"Key or section name must be a string,", "number\") else: return False if re.search(r\"[^a-zA-Z_0-9]\", name) is not None: if not suppress_exceptions:", "isinstance(name, str): if not suppress_exceptions: raise ValueError(f\"Key or section name must be a", "name must be a string. {name} is type {type(name)}\") else: return False if", "cls._clean_line(line) try: equal_index = line.index(\"=\") except ValueError: return False # check if line", "attrs.keys() def get_attributes(self): attrs = self.__dict__ attrs_filtered = {k: v for k, v", "convert_string_to_list(string): items = [] if string is not None and len(string) > 0:", "'_'\") else: return False if name[0].isnumeric(): if not suppress_exceptions: raise ValueError(f\"Key or section", "if isinstance(value, str): line_lower = value.lower() if line_lower == \"true\": return True if", "if parse_float: value = cls._attempt_parse_float(value) if parse_int and not isinstance(value, float): value =", "attrs.items() if not k.startswith(\"_\")} return attrs_filtered @staticmethod def convert_string_to_list(string): items = [] if", "raise ValueError(f\"Key or section name must not begin with a number\") else: return", "or section name must be a string, not None\") else: return False if", "be a string. {name} is type {type(name)}\") else: return False if len(name) ==", "name[0].isnumeric(): if not suppress_exceptions: raise ValueError(f\"Key or section name must not begin with", "for k, v in attrs.items() if not k.startswith(\"_\")} return attrs_filtered @staticmethod def convert_string_to_list(string):", "+ 1:] if parse_bool: value = cls._attempt_parse_bool(value) if parse_float: value = cls._attempt_parse_float(value) if", "line[equal_index + 1:] if parse_bool: value = cls._attempt_parse_bool(value) if parse_float: value = cls._attempt_parse_float(value)", "len(line) <= 2: return False return line[0] == \"[\" and line[-1] == \"]\"", "str): line_lower = value.lower() if line_lower == \"true\": return True if line_lower ==", "with '_'\") else: return False if name[0].isnumeric(): if not suppress_exceptions: raise ValueError(f\"Key or", "suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line)", "suppress_exceptions: raise ValueError(f\"Key or section name must be a string. {name} is type", "return False if name[0] == \"_\": if not suppress_exceptions: raise ValueError(f\"Key or section", "\"false\": return False return value @staticmethod def _attempt_parse_int(value): if isinstance(value, str): if value.count(\".\")", "= cls._attempt_parse_float(value) if parse_int and not isinstance(value, float): value = cls._attempt_parse_int(value) return value", "cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line): if not cls._is_line_an_entry(line): return None line =", "str): if value.count(\".\") > 0: try: return float(value) except ValueError: pass return value", "must not be blank.\") else: return False if name[0] == \"_\": if not", "@staticmethod def _attempt_parse_bool(value): if isinstance(value, str): line_lower = value.lower() if line_lower == \"true\":", "= value.lower() if line_lower == \"true\": return True if line_lower == \"false\": return", "\", \"=\") line_cleaned = line_cleaned.replace(\" =\", \"=\") return line_cleaned @classmethod def _is_line_an_entry(cls, line):", "None: if not suppress_exceptions: raise ValueError(f\"Key or section name must only contain letters,", "_is_line_a_heading(line): if len(line) <= 2: return False return line[0] == \"[\" and line[-1]", "len(string) > 0: items = re.split(r\", |,\", string) return items @staticmethod def _is_key_or_section_name_valid(name,", "to left of equal sign is a valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod", "equal_index = line.index(\"=\") except ValueError: return False # check if line to left", "1:] if parse_bool: value = cls._attempt_parse_bool(value) if parse_float: value = cls._attempt_parse_float(value) if parse_int", "ValueError(f\"Key or section name must not begin with a number\") else: return False", "return attrs.keys() def get_attributes(self): attrs = self.__dict__ attrs_filtered = {k: v for k,", "@classmethod def _get_key_from_line(cls, line): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index", "else: return False if re.search(r\"[^a-zA-Z_0-9]\", name) is not None: if not suppress_exceptions: raise", "True if line_lower == \"false\": return False return value @staticmethod def _attempt_parse_int(value): if", "line_cleaned @classmethod def _is_line_an_entry(cls, line): line = cls._clean_line(line) try: equal_index = line.index(\"=\") except", "left of equal sign is a valid key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def", "else: return False return True @staticmethod def _is_line_a_heading(line): if len(line) <= 2: return", "_get_keys(self): attrs = self.get_attributes() return attrs.keys() def get_attributes(self): attrs = self.__dict__ attrs_filtered =", "section name must be a string. {name} is type {type(name)}\") else: return False", "not suppress_exceptions: raise ValueError(f\"Key or section name must be a string. {name} is", "str): if not suppress_exceptions: raise ValueError(f\"Key or section name must be a string.", "get_attributes(self): attrs = self.__dict__ attrs_filtered = {k: v for k, v in attrs.items()", "True @staticmethod def _is_line_a_heading(line): if len(line) <= 2: return False return line[0] ==", "def _get_heading_from_line(line): return line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned = line_cleaned.replace(\"=", "re class BaseClass: def _get_keys(self): attrs = self.get_attributes() return attrs.keys() def get_attributes(self): attrs", "float): value = cls._attempt_parse_int(value) return value @staticmethod def _attempt_parse_bool(value): if isinstance(value, str): line_lower", "line.index(\"=\") value = line[equal_index + 1:] if parse_bool: value = cls._attempt_parse_bool(value) if parse_float:", "None and len(string) > 0: items = re.split(r\", |,\", string) return items @staticmethod", "@staticmethod def convert_string_to_list(string): items = [] if string is not None and len(string)", "return line[0] == \"[\" and line[-1] == \"]\" @staticmethod def _get_heading_from_line(line): return line[1:-1]", "name must not begin with '_'\") else: return False if name[0].isnumeric(): if not", "line_lower = value.lower() if line_lower == \"true\": return True if line_lower == \"false\":", "and line[-1] == \"]\" @staticmethod def _get_heading_from_line(line): return line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned", "else: return False if name[0] == \"_\": if not suppress_exceptions: raise ValueError(f\"Key or", "else: return False if name[0].isnumeric(): if not suppress_exceptions: raise ValueError(f\"Key or section name", "parse_int and not isinstance(value, float): value = cls._attempt_parse_int(value) return value @staticmethod def _attempt_parse_bool(value):", "return line_cleaned @classmethod def _is_line_an_entry(cls, line): line = cls._clean_line(line) try: equal_index = line.index(\"=\")", "if not isinstance(name, str): if not suppress_exceptions: raise ValueError(f\"Key or section name must", "items = re.split(r\", |,\", string) return items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name", "False if name[0].isnumeric(): if not suppress_exceptions: raise ValueError(f\"Key or section name must not", "not None and len(string) > 0: items = re.split(r\", |,\", string) return items", "= {k: v for k, v in attrs.items() if not k.startswith(\"_\")} return attrs_filtered", "= line[equal_index + 1:] if parse_bool: value = cls._attempt_parse_bool(value) if parse_float: value =", "if not k.startswith(\"_\")} return attrs_filtered @staticmethod def convert_string_to_list(string): items = [] if string", "a string. {name} is type {type(name)}\") else: return False if len(name) == 0:", "if len(line) <= 2: return False return line[0] == \"[\" and line[-1] ==", "== \"[\" and line[-1] == \"]\" @staticmethod def _get_heading_from_line(line): return line[1:-1] @staticmethod def", "line = cls._clean_line(line) equal_index = line.index(\"=\") value = line[equal_index + 1:] if parse_bool:", "cls._attempt_parse_bool(value) if parse_float: value = cls._attempt_parse_float(value) if parse_int and not isinstance(value, float): value", "class BaseClass: def _get_keys(self): attrs = self.get_attributes() return attrs.keys() def get_attributes(self): attrs =", "line[1:-1] @staticmethod def _clean_line(line_raw): line_cleaned = line_raw.rstrip() line_cleaned = line_cleaned.replace(\"= \", \"=\") line_cleaned", "not None\") else: return False if not isinstance(name, str): if not suppress_exceptions: raise", "return True @staticmethod def _is_line_a_heading(line): if len(line) <= 2: return False return line[0]", "@classmethod def _is_line_an_entry(cls, line): line = cls._clean_line(line) try: equal_index = line.index(\"=\") except ValueError:", "0: items = re.split(r\", |,\", string) return items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if", "value = line[equal_index + 1:] if parse_bool: value = cls._attempt_parse_bool(value) if parse_float: value", "if name[0] == \"_\": if not suppress_exceptions: raise ValueError(f\"Key or section name must", "@staticmethod def _is_line_a_heading(line): if len(line) <= 2: return False return line[0] == \"[\"", "v for k, v in attrs.items() if not k.startswith(\"_\")} return attrs_filtered @staticmethod def", "== 0: if not suppress_exceptions: raise ValueError(f\"Key or section name must not be", "in attrs.items() if not k.startswith(\"_\")} return attrs_filtered @staticmethod def convert_string_to_list(string): items = []", "if not suppress_exceptions: raise ValueError(f\"Key or section name must not begin with '_'\")", "None line = cls._clean_line(line) equal_index = line.index(\"=\") value = line[equal_index + 1:] if", "parse_int=True): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\") value", "except ValueError: return False # check if line to left of equal sign", "False return line[0] == \"[\" and line[-1] == \"]\" @staticmethod def _get_heading_from_line(line): return", "0: try: return float(value) except ValueError: pass return value @staticmethod def _generate_file_line(key, value):", "string is not None and len(string) > 0: items = re.split(r\", |,\", string)", "attrs_filtered @staticmethod def convert_string_to_list(string): items = [] if string is not None and", "items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name is None: if not suppress_exceptions: raise", "only contain letters, numbers and underscores\") else: return False return True @staticmethod def", "section name must be a string, not None\") else: return False if not", "if name is None: if not suppress_exceptions: raise ValueError(f\"Key or section name must", "raise ValueError(f\"Key or section name must only contain letters, numbers and underscores\") else:", "= cls._attempt_parse_bool(value) if parse_float: value = cls._attempt_parse_float(value) if parse_int and not isinstance(value, float):", "line[0] == \"[\" and line[-1] == \"]\" @staticmethod def _get_heading_from_line(line): return line[1:-1] @staticmethod", "k, v in attrs.items() if not k.startswith(\"_\")} return attrs_filtered @staticmethod def convert_string_to_list(string): items", "return False if re.search(r\"[^a-zA-Z_0-9]\", name) is not None: if not suppress_exceptions: raise ValueError(f\"Key", "[] if string is not None and len(string) > 0: items = re.split(r\",", "name must be a string, not None\") else: return False if not isinstance(name,", "string) return items @staticmethod def _is_key_or_section_name_valid(name, suppress_exceptions=False): if name is None: if not", "if value.count(\".\") > 0: try: return float(value) except ValueError: pass return value @staticmethod", "underscores\") else: return False return True @staticmethod def _is_line_a_heading(line): if len(line) <= 2:", "return int(value) except ValueError: pass return value @staticmethod def _attempt_parse_float(value): if isinstance(value, str):", "line): if not cls._is_line_an_entry(line): return None line = cls._clean_line(line) equal_index = line.index(\"=\") return", "if line_lower == \"true\": return True if line_lower == \"false\": return False return", "must not begin with '_'\") else: return False if name[0].isnumeric(): if not suppress_exceptions:", "> 0: try: return float(value) except ValueError: pass return value @staticmethod def _generate_file_line(key,", "if value.count(\".\") == 0: try: return int(value) except ValueError: pass return value @staticmethod", "equal_index = line.index(\"=\") value = line[equal_index + 1:] if parse_bool: value = cls._attempt_parse_bool(value)", "if isinstance(value, str): if value.count(\".\") == 0: try: return int(value) except ValueError: pass", "key return cls._is_key_or_section_name_valid(line[:equal_index], suppress_exceptions=True) @classmethod def _get_key_from_line(cls, line): if not cls._is_line_an_entry(line): return None", "_attempt_parse_int(value): if isinstance(value, str): if value.count(\".\") == 0: try: return int(value) except ValueError:", "begin with a number\") else: return False if re.search(r\"[^a-zA-Z_0-9]\", name) is not None:", "False if not isinstance(name, str): if not suppress_exceptions: raise ValueError(f\"Key or section name", "or section name must not be blank.\") else: return False if name[0] ==", "def _attempt_parse_float(value): if isinstance(value, str): if value.count(\".\") > 0: try: return float(value) except", "return float(value) except ValueError: pass return value @staticmethod def _generate_file_line(key, value): return f\"{key}", "string, not None\") else: return False if not isinstance(name, str): if not suppress_exceptions:", "not suppress_exceptions: raise ValueError(f\"Key or section name must be a string, not None\")" ]
[ "with DBConnectionHendler() as db_connection: try: return ( db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except: db_connection.session.rollback()", "new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit()", "Create a new user \"\"\" @classmethod @db_connector def delete(cls, connection, client_id: int) ->", "db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection:", "finally: db_connection.session.close() @classmethod def get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: return", "List: \"\"\"Retorna uma lista de todos os clients\"\"\" with DBConnectionHendler() as db_connection: try:", "with DBConnectionHendler() as db_connection: try: return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod", "def get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: return ( db_connection.session.query(Client) .filter_by(id=client_id)", "import DBConnectionHendler from src.database.db_connection import db_connector from src.database.models import Client class ClientQuerys: \"\"\"Criando", "try: return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_id(cls, client_id): \"\"\"someting\"\"\"", "db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\" Create a new user \"\"\" @classmethod @db_connector def", "raise finally: db_connection.session.close() @classmethod def get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try:", "class ClientQuerys: \"\"\"Criando um novo cliente\"\"\" @classmethod def new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler()", "@classmethod def new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: client = Client(name=nome.upper())", "finally: db_connection.session.close() \"\"\" Create a new user \"\"\" @classmethod @db_connector def delete(cls, connection,", "db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler()", "clients\"\"\" with DBConnectionHendler() as db_connection: try: return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally: db_connection.session.close()", "from src.database import DBConnectionHendler from src.database.db_connection import db_connector from src.database.models import Client class", "os clients\"\"\" with DBConnectionHendler() as db_connection: try: return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally:", "as db_connection: try: client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise finally: db_connection.session.close()", ".first() ) except: db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\" Create a new user \"\"\"", "def delete(cls, connection, client_id: int) -> None: client = ( connection.session.query(Client) .filter_by(id=client_id) .first()", "return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_id(cls, client_id): \"\"\"someting\"\"\" with", "user \"\"\" @classmethod @db_connector def delete(cls, connection, client_id: int) -> None: client =", "novo cliente\"\"\" @classmethod def new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: client", "client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: return ( db_connection.session.query(Client) .filter_by(id=client_id) .first() )", "db_connection: try: client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod", "\"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: return ( db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except:", "Client class ClientQuerys: \"\"\"Criando um novo cliente\"\"\" @classmethod def new(cls, nome): \"\"\"someting\"\"\" with", "try: client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def", ".filter_by(id=client_id) .first() ) except: db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\" Create a new user", "except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as", "DBConnectionHendler() as db_connection: try: client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise finally:", "from typing import List from src.database import DBConnectionHendler from src.database.db_connection import db_connector from", "get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: return ( db_connection.session.query(Client) .filter_by(id=client_id) .first()", "@classmethod def get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: return ( db_connection.session.query(Client)", "client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_all(cls)", "db_connection: try: return ( db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except: db_connection.session.rollback() raise finally: db_connection.session.close()", "client_id: int) -> None: client = ( connection.session.query(Client) .filter_by(id=client_id) .first() ) connection.session.delete(client) connection.session.commit()", "db_connection: try: return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_id(cls, client_id):", "@classmethod @db_connector def delete(cls, connection, client_id: int) -> None: client = ( connection.session.query(Client)", "raise finally: db_connection.session.close() \"\"\" Create a new user \"\"\" @classmethod @db_connector def delete(cls,", "uma lista de todos os clients\"\"\" with DBConnectionHendler() as db_connection: try: return db_connection.session.query(Client).all()", "ClientQuerys: \"\"\"Criando um novo cliente\"\"\" @classmethod def new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler() as", "<filename>src/database/querys/clients.py from typing import List from src.database import DBConnectionHendler from src.database.db_connection import db_connector", "db_connection.session.close() \"\"\" Create a new user \"\"\" @classmethod @db_connector def delete(cls, connection, client_id:", "db_connection.session.close() @classmethod def get_all(cls) -> List: \"\"\"Retorna uma lista de todos os clients\"\"\"", "import List from src.database import DBConnectionHendler from src.database.db_connection import db_connector from src.database.models import", "try: return ( db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except: db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\"", "finally: db_connection.session.close() @classmethod def get_all(cls) -> List: \"\"\"Retorna uma lista de todos os", "get_all(cls) -> List: \"\"\"Retorna uma lista de todos os clients\"\"\" with DBConnectionHendler() as", "as db_connection: try: return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_id(cls,", "a new user \"\"\" @classmethod @db_connector def delete(cls, connection, client_id: int) -> None:", "-> List: \"\"\"Retorna uma lista de todos os clients\"\"\" with DBConnectionHendler() as db_connection:", "db_connection.session.commit() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_all(cls) -> List: \"\"\"Retorna uma", "except: db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\" Create a new user \"\"\" @classmethod @db_connector", "( db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except: db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\" Create a", "db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_all(cls) -> List: \"\"\"Retorna", "DBConnectionHendler() as db_connection: try: return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def", "delete(cls, connection, client_id: int) -> None: client = ( connection.session.query(Client) .filter_by(id=client_id) .first() )", "DBConnectionHendler from src.database.db_connection import db_connector from src.database.models import Client class ClientQuerys: \"\"\"Criando um", "\"\"\" Create a new user \"\"\" @classmethod @db_connector def delete(cls, connection, client_id: int)", "def new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: client = Client(name=nome.upper()) db_connection.session.add(client)", "except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_all(cls) -> List: \"\"\"Retorna uma lista", "todos os clients\"\"\" with DBConnectionHendler() as db_connection: try: return db_connection.session.query(Client).all() except: db_connection.session.rollback() raise", "from src.database.db_connection import db_connector from src.database.models import Client class ClientQuerys: \"\"\"Criando um novo", "src.database.db_connection import db_connector from src.database.models import Client class ClientQuerys: \"\"\"Criando um novo cliente\"\"\"", "raise finally: db_connection.session.close() @classmethod def get_all(cls) -> List: \"\"\"Retorna uma lista de todos", "import Client class ClientQuerys: \"\"\"Criando um novo cliente\"\"\" @classmethod def new(cls, nome): \"\"\"someting\"\"\"", "new user \"\"\" @classmethod @db_connector def delete(cls, connection, client_id: int) -> None: client", "db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_all(cls) -> List: \"\"\"Retorna uma lista de", "nome): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except:", "\"\"\" @classmethod @db_connector def delete(cls, connection, client_id: int) -> None: client = (", "connection, client_id: int) -> None: client = ( connection.session.query(Client) .filter_by(id=client_id) .first() ) connection.session.delete(client)", "List from src.database import DBConnectionHendler from src.database.db_connection import db_connector from src.database.models import Client", "db_connector from src.database.models import Client class ClientQuerys: \"\"\"Criando um novo cliente\"\"\" @classmethod def", "\"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback()", ") except: db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\" Create a new user \"\"\" @classmethod", "de todos os clients\"\"\" with DBConnectionHendler() as db_connection: try: return db_connection.session.query(Client).all() except: db_connection.session.rollback()", "DBConnectionHendler() as db_connection: try: return ( db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except: db_connection.session.rollback() raise", "with DBConnectionHendler() as db_connection: try: client = Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise", "as db_connection: try: return ( db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except: db_connection.session.rollback() raise finally:", "db_connection.session.close() @classmethod def get_id(cls, client_id): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: return (", "import db_connector from src.database.models import Client class ClientQuerys: \"\"\"Criando um novo cliente\"\"\" @classmethod", "db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except: db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\" Create a new", "from src.database.models import Client class ClientQuerys: \"\"\"Criando um novo cliente\"\"\" @classmethod def new(cls,", "Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_all(cls) -> List:", "um novo cliente\"\"\" @classmethod def new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try:", "= Client(name=nome.upper()) db_connection.session.add(client) db_connection.session.commit() except: db_connection.session.rollback() raise finally: db_connection.session.close() @classmethod def get_all(cls) ->", "cliente\"\"\" @classmethod def new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection: try: client =", "\"\"\"Criando um novo cliente\"\"\" @classmethod def new(cls, nome): \"\"\"someting\"\"\" with DBConnectionHendler() as db_connection:", "typing import List from src.database import DBConnectionHendler from src.database.db_connection import db_connector from src.database.models", "@classmethod def get_all(cls) -> List: \"\"\"Retorna uma lista de todos os clients\"\"\" with", "lista de todos os clients\"\"\" with DBConnectionHendler() as db_connection: try: return db_connection.session.query(Client).all() except:", "\"\"\"Retorna uma lista de todos os clients\"\"\" with DBConnectionHendler() as db_connection: try: return", "src.database import DBConnectionHendler from src.database.db_connection import db_connector from src.database.models import Client class ClientQuerys:", "def get_all(cls) -> List: \"\"\"Retorna uma lista de todos os clients\"\"\" with DBConnectionHendler()", "src.database.models import Client class ClientQuerys: \"\"\"Criando um novo cliente\"\"\" @classmethod def new(cls, nome):", "return ( db_connection.session.query(Client) .filter_by(id=client_id) .first() ) except: db_connection.session.rollback() raise finally: db_connection.session.close() \"\"\" Create", "@db_connector def delete(cls, connection, client_id: int) -> None: client = ( connection.session.query(Client) .filter_by(id=client_id)" ]
[ "Bucket(10, 1, 5) def test_state(the_bucket): assert 4.99 < the_bucket.state() < 5 def test_push(the_bucket):", "assert 9.99 < the_bucket.state() < 10 def test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10) <", "from ccxt_microservice.bucket import Bucket import pytest @pytest.fixture def the_bucket(): return Bucket(10, 1, 5)", "import Bucket import pytest @pytest.fixture def the_bucket(): return Bucket(10, 1, 5) def test_state(the_bucket):", "< the_bucket.state() < 10 def test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10) < 5 def", "10 def test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10) < 5 def test_add(): pass def", "def test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10) < 5 def test_add(): pass def test_wait():", "assert 4.99 < the_bucket.state() < 5 def test_push(the_bucket): the_bucket.push(5) assert 9.99 < the_bucket.state()", "test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10) < 5 def test_add(): pass def test_wait(): pass", "< the_bucket.state() < 5 def test_push(the_bucket): the_bucket.push(5) assert 9.99 < the_bucket.state() < 10", "test_state(the_bucket): assert 4.99 < the_bucket.state() < 5 def test_push(the_bucket): the_bucket.push(5) assert 9.99 <", "< 10 def test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10) < 5 def test_add(): pass", "pytest @pytest.fixture def the_bucket(): return Bucket(10, 1, 5) def test_state(the_bucket): assert 4.99 <", "def the_bucket(): return Bucket(10, 1, 5) def test_state(the_bucket): assert 4.99 < the_bucket.state() <", "1, 5) def test_state(the_bucket): assert 4.99 < the_bucket.state() < 5 def test_push(the_bucket): the_bucket.push(5)", "5 def test_push(the_bucket): the_bucket.push(5) assert 9.99 < the_bucket.state() < 10 def test_timeToWait(the_bucket): assert", "5) def test_state(the_bucket): assert 4.99 < the_bucket.state() < 5 def test_push(the_bucket): the_bucket.push(5) assert", "ccxt_microservice.bucket import Bucket import pytest @pytest.fixture def the_bucket(): return Bucket(10, 1, 5) def", "the_bucket.state() < 10 def test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10) < 5 def test_add():", "< 5 def test_push(the_bucket): the_bucket.push(5) assert 9.99 < the_bucket.state() < 10 def test_timeToWait(the_bucket):", "9.99 < the_bucket.state() < 10 def test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10) < 5", "@pytest.fixture def the_bucket(): return Bucket(10, 1, 5) def test_state(the_bucket): assert 4.99 < the_bucket.state()", "Bucket import pytest @pytest.fixture def the_bucket(): return Bucket(10, 1, 5) def test_state(the_bucket): assert", "the_bucket(): return Bucket(10, 1, 5) def test_state(the_bucket): assert 4.99 < the_bucket.state() < 5", "return Bucket(10, 1, 5) def test_state(the_bucket): assert 4.99 < the_bucket.state() < 5 def", "the_bucket.state() < 5 def test_push(the_bucket): the_bucket.push(5) assert 9.99 < the_bucket.state() < 10 def", "4.99 < the_bucket.state() < 5 def test_push(the_bucket): the_bucket.push(5) assert 9.99 < the_bucket.state() <", "def test_state(the_bucket): assert 4.99 < the_bucket.state() < 5 def test_push(the_bucket): the_bucket.push(5) assert 9.99", "import pytest @pytest.fixture def the_bucket(): return Bucket(10, 1, 5) def test_state(the_bucket): assert 4.99", "test_push(the_bucket): the_bucket.push(5) assert 9.99 < the_bucket.state() < 10 def test_timeToWait(the_bucket): assert 4.99 <", "the_bucket.push(5) assert 9.99 < the_bucket.state() < 10 def test_timeToWait(the_bucket): assert 4.99 < the_bucket.timeToWait(10)", "def test_push(the_bucket): the_bucket.push(5) assert 9.99 < the_bucket.state() < 10 def test_timeToWait(the_bucket): assert 4.99" ]
[ "stepsizes for the parameter arrays. \"\"\" from astropy.io import fits import numpy as", "\"\"\" low_wave, low_spec = [], [] for i in range(len(spec)/px - 1): idx", "-N/2. * np.log(X2 / N) # Find highest likelihood values maximum = np.nanmax(lnL)", "= args.nights data_path = args.datapath model_path = args.modelpath out_path = args.outpath ext =", "vel / 3e5 shift_wave = wave[0,w_idx] * redshift mspec_shifted = model_interp(shift_wave) mspec_weighted =", "from scipy.interpolate import interp1d from astropy.convolution import convolve, Gaussian1DKernel import argparse from scipy.optimize", "wave = wave[:,p_ind,:] # Determine size of arrays n_orders = spec.shape[0] n_frames =", "previously computed cross-correlation map and other base terms, for a given set of", "i in range(len(contrast)): for j in range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha,", "= %.1f' % (Vsys[vidx]) # Write lnL to fits file hdu2 = fits.PrimaryHDU(lnL)", "a given set of day-night contrast and peak phase offset values over a", "parameter grid is used, and the output can be multiple Gigabytes. Either run", "axes are (Kp, time, Vsys) _, term2_shift = phasefold(Kps, vgrid, vsys, merr, phase)", "n_orders = spec.shape[0] n_frames = spec.shape[1] n_pix = spec.shape[2] for v,vmr in enumerate(vmrs):", "for a given temperature over a given wavelength range. \"\"\" c1 = 1.1911e-12", "lnL_term2 def submed(cmap): \"\"\" Subtracts the median along the velocity axis from the", "serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp) cmap = submed(cmap0)", "contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood", "'logVMR = %.1f' % (vmrs[fidx]) print 'C = %.1f' % (contrast[cidx]) print 'off", "- theta)) A_p = 1. - contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2", "= wavegrid(model[0,0], model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave)", "parser.parse_args() nights = args.nights data_path = args.datapath model_path = args.modelpath out_path = args.outpath", "3e5) wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave) # Convolve model", "values. \"\"\" X2 = np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps, Vsys) # Shift", "output='sos') x_filtered = sosfiltfilt(butterfilt, x) return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength", "/= stdev[frame,w_idx] # Perform cross-correlation corr_top = np.nansum(mspec_weighted * fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2)", "axis from the cross-correlation map. \"\"\" mdn = np.nanmedian(cmap,axis=1) sub = cmap -", "wavelength grid filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays for lnL", "cmap merr_osum += merr serr_osum += serr # Compute brightness variation for given", "0. cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames)) #", "merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate lnL for given", "Last Modified: 2021-05-11 Description: Calculates the 6-D log likelihood map for a series", "range(len(contrast)): for j in range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid,", "# Get dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum) model", "np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/ corr_bot # Calculate model term for log likelihood", "chi2(cmap, merr, serr, alpha, Kps, vgrid, vsys, phase): \"\"\" Calculates the chi squared", "maxes[2][0] aidx = maxes[3][0] kidx = maxes[4][0] vidx = maxes[5][0] # Print highest", "5, '56561': 4, '56904': 4, '56915': 6, '56966': 6, '57321': 6} # Specify", "# Equation: Ap = 1 - C * cos^2 (pi * (phi -", "= fullmap fmap[i,:] = np.nansum(fullmap, axis=0) return fmap, KTVmap def chi2(cmap, merr, serr,", "sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates the Planck function for a given temperature over", "lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate wavelength range and scale data w_idx = (wave[0,:]", "= interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i] = fullmap", "%.1f' % (Vsys[vidx]) # Write lnL to fits file hdu2 = fits.PrimaryHDU(lnL) hdu2.writeto(out_path+'lnL_wasp33b_FeI%s'", "= hdu[0].data # Interpolate model to wavelength grid with consistent resolution m_wave =", "phase range. \"\"\" offset = offset_deg / 360. # Equation: Ap = 1", "numpy as np from scipy.interpolate import interp1d from astropy.convolution import convolve, Gaussian1DKernel import", "spec - envelope return corrected def butterworth(x, order, freq, filt_type='highpass'): \"\"\" Applies a", "def phasefold(Kps, vgrid, vsys, cmap, phase): \"\"\" Shifts the cross-correlation map to planet's", "2020-10-28 Last Modified: 2021-05-11 Description: Calculates the 6-D log likelihood map for a", "butterworth(x, order, freq, filt_type='highpass'): \"\"\" Applies a high-pass Butterworth filter, with a given", "VMR for i in range(len(contrast)): for j in range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j],", "= chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys, phase) lnL[v,i,j] += -N/2. *", "sigma, vgrid, minwave, maxwave, filt_interp) cmap = submed(cmap0) cmap_osum += cmap merr_osum +=", "# Apply brightness variation to lnL terms lnL_term1 = serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:]", "the log likelihood for each value of alpha for i,a in enumerate(alpha): X2_KTV", "highest likelihood values maximum = np.nanmax(lnL) maxes = np.where(lnL == maximum) fidx =", "orbital velocity vp = Kp * np.sin(2.*np.pi*phase[frame]) vshift = vgrid - vp shift", "len(Kps), len(vsys))) # (alpha, Kps, Vsys) # Shift merr and cmap to the", "atmospheric models cross-correlated with planetary emission spectra. Parameters are log VMR, day-night contrast,", "\"\"\" Author: <NAME> Created: 2020-10-28 Last Modified: 2021-05-11 Description: Calculates the 6-D log", "full parameter grid is used, and the output can be multiple Gigabytes. Either", "= a/b return bbsor def remove_env(wave, spec, px): \"\"\" Subtracts the lower envelope", "= maxes[1][0] oidx = maxes[2][0] aidx = maxes[3][0] kidx = maxes[4][0] vidx =", "base terms, for a given set of scaled line contrast values. \"\"\" X2", "help=\"path for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file name extension\") args = parser.parse_args()", "log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2 def submed(cmap): \"\"\" Subtracts", "other two terms of the log likelihood equation: the spectra squared, and the", "Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv, 1, bfreq[night]) # Create interpolator", "be multiple Gigabytes. Either run the file on a server that can handle", "Create interpolator to put model onto data's wavelength grid filt_interp = interp1d(m_wave, mspec_bf,", "in the given stepsize, then interpolating. \"\"\" low_wave, low_spec = [], [] for", "the brightness variation for a given set of day-night contrast and peak phase", "offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\",", "= np.arange(0.5, 5., 0.1) vgrid = np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150., 150., 0.5)", "vmrs = np.arange(-5., -2.1, 0.1) alpha = np.arange(0.5, 5., 0.1) vgrid = np.arange(-600.,601.5,", "cross-correlation cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp) cmap", "= interp1d(low_wave, low_spec, fill_value='extrapolate') envelope = interp(wave) corrected = spec - envelope return", "= np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/ corr_bot # Calculate model term", "\"\"\" mdn = np.nanmedian(cmap,axis=1) sub = cmap - mdn[:,np.newaxis] return sub def phasefold(Kps,", "remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv, 1, bfreq[night]) # Create interpolator to put model", "0.1) alpha = np.arange(0.5, 5., 0.1) vgrid = np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150.,", "Gaussian1DKernel import argparse from scipy.optimize import curve_fit from scipy.signal import butter, sosfiltfilt def", "enumerate(np.arange(24,37)): # Calculate time- and wavelength-dependent uncertainties tsigma = np.nanstd(spec[o], axis=0) wsigma =", "np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames)) # Perform cross-correlation for orders redward of 600", "with 1D Gaussian kernel, then filter FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8} mspec_conv", "redward of 600 nm, and sum together for i,o in enumerate(np.arange(24,37)): # Calculate", "orbital velocity, and systemic velocity. NOTE: Because this computes the full likelihood map,", "offset = offset_deg / 360. # Equation: Ap = 1 - C *", "Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to data\") parser.add_argument(\"-m\",", "mdn = np.nanmedian(cmap,axis=1) sub = cmap - mdn[:,np.newaxis] return sub def phasefold(Kps, vgrid,", "#,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation map for a given spectral order,", "= butter(order, freq, btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt, x) return x_filtered def wavegrid(wavemin,wavemax,res):", "time, Vsys) _, term2_shift = phasefold(Kps, vgrid, vsys, merr, phase) _, term3_shift =", "and wavelength-dependent uncertainties tsigma = np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o], axis=1) sigma =", "scipy.optimize import curve_fit from scipy.signal import butter, sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates the", "frame and creates the Kp-Vsys map. \"\"\" fmap = np.empty((len(Kps), len(vsys))) KTVmap =", "1. - vel / 3e5 shift_wave = wave[0,w_idx] * redshift mspec_shifted = model_interp(shift_wave)", "return cmap, lnL_term1, lnL_term2 def submed(cmap): \"\"\" Subtracts the median along the velocity", "serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift - 2 * a * term3_shift # Sum", "args = parser.parse_args() nights = args.nights data_path = args.datapath model_path = args.modelpath out_path", "return X2 def brightvar(phase, offset_deg, contrast): \"\"\" Computes the brightness variation for a", "% (alpha[aidx]) print 'Kp = %.1f' % (Kps[kidx]) print 'Vsys = %.1f' %", "given wavelength range. \"\"\" c1 = 1.1911e-12 c2 = 1.439 y = 1e4/wavelength", "file on a server that can handle this or reduce the ranges and/or", "0.025} for night in nights: # Read in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] -", "submed(cmap0) cmap_osum += cmap merr_osum += merr serr_osum += serr # Compute brightness", "np.arange(-5., -2.1, 0.1) alpha = np.arange(0.5, 5., 0.1) vgrid = np.arange(-600.,601.5, 1.5) Vsys", "likelihood equation: the spectra squared, and the base model squared. \"\"\" cmap =", "serr_osum = np.zeros((n_frames)) # Perform cross-correlation for orders redward of 600 nm, and", "brightness variation for a given set of day-night contrast and peak phase offset", "variation to lnL terms lnL_term1 = serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3", "= wave[0,w_idx] * redshift mspec_shifted = model_interp(shift_wave) mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted", "Kps = np.arange(175.,275., 0.5) offset = np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1, 0.1) lnL", "vgrid - vp shift = interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:] =", "in nights: # Read in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders,", "frame in range(len(spec)): fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] # Calculate", "6-D log likelihood map for a series of atmospheric models cross-correlated with planetary", "* redshift mspec_shifted = model_interp(shift_wave) mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx]", "order, freq, filt_type='highpass'): \"\"\" Applies a high-pass Butterworth filter, with a given order", "# Perform cross-correlation for orders redward of 600 nm, and sum together for", "offsets variation = brightvar(phase, offset, contrast) # Apply brightness variation to lnL terms", "Vsys) _, term2_shift = phasefold(Kps, vgrid, vsys, merr, phase) _, term3_shift = phasefold(Kps,", "given stepsize, then interpolating. \"\"\" low_wave, low_spec = [], [] for i in", "output can be multiple Gigabytes. Either run the file on a server that", "model term for log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2 def", "# Create variables/arrays for lnL components N = 0. cmap_osum = np.zeros((n_frames, len(vgrid)))", "resolution m_wave = wavegrid(model[0,0], model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec", "out_path = args.outpath ext = args.extension # Define parameter arrays vmrs = np.arange(-5.,", "computationally expensive to run when the full parameter grid is used, and the", "= 1e4/wavelength a = c1*(y**5.) tmp = c2*y/temp b = np.exp(tmp) - 1.", "= {'CFHT': 4.48, 'Subaru': 1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250)", "<NAME> Created: 2020-10-28 Last Modified: 2021-05-11 Description: Calculates the 6-D log likelihood map", "i, Kp in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for frame in range(len(phase)): # Shift", "= vgrid - vp shift = interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:]", "for log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2 def submed(cmap): \"\"\"", "maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) *", "(wavelength, spectrum) model = hdu[0].data # Interpolate model to wavelength grid with consistent", "_, term3_shift = phasefold(Kps, vgrid, vsys, cmap, phase) # Calculate the log likelihood", "each MJD night iters = {'56550': 5, '56561': 4, '56904': 4, '56915': 6,", "#corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/ corr_bot # Calculate model", "np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution", "\"\"\" fmap = np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap), len(vsys))) for i, Kp", "then filter FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day", "shift = interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i] =", "1.) contrast = np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys))) #", "= args.extension # Define parameter arrays vmrs = np.arange(-5., -2.1, 0.1) alpha =", "data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames, pixels) wave = np.load(data_path+night+'_wavelength.npy')", "* a * term3_shift # Sum the log likelihood in time X2[i] =", "log likelihood lnL_term1[frame] = np.nansum(fixspec**2) for i, vel in enumerate(vgrid): # Shift model", "log likelihood in time X2[i] = np.nansum(X2_KTV, axis=1) return X2 def brightvar(phase, offset_deg,", "low_spec, fill_value='extrapolate') envelope = interp(wave) corrected = spec - envelope return corrected def", "p_ind = np.where((phase < 0.41) & (phase > -0.41))[0] phase = phase[p_ind] spec", "portion of the lnL from the previously computed cross-correlation map and other base", "log likelihood for each value of alpha for i,a in enumerate(alpha): X2_KTV =", "parameter arrays. \"\"\" from astropy.io import fits import numpy as np from scipy.interpolate", "axis=0) wsigma = np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma", "NOTE: Because this computes the full likelihood map, not MCMC chains, this file", "model = hdu[0].data # Interpolate model to wavelength grid with consistent resolution m_wave", "tsigma = np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma, tsigma) sigma", "shifted_map = shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i] = fullmap fmap[i,:] = np.nansum(fullmap, axis=0)", "%.1f' % (vmrs[fidx]) print 'C = %.1f' % (contrast[cidx]) print 'off = %.1f'", "# Specify Butterworth filter cut-off frequency for each night bfreq = {'56550': 0.035,", "< 0.0005) | np.isnan(sigma))] = 1e20 # Calculate number of data points in", "offset_deg / 360. # Equation: Ap = 1 - C * cos^2 (pi", "- C * cos^2 (pi * (phi - theta)) A_p = 1. -", "vp = Kp * np.sin(2.*np.pi*phase[frame]) vshift = vgrid - vp shift = interp1d(vshift,", "data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for output\")", "argparse from scipy.optimize import curve_fit from scipy.signal import butter, sosfiltfilt def planck(wavelength,temp): \"\"\"", "X2 = np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps, Vsys) # Shift merr and", "of highest likelihood:' print 'logVMR = %.1f' % (vmrs[fidx]) print 'C = %.1f'", "sigma = np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) | np.isnan(sigma))] =", "likelihood lnL_term1[frame] = np.nansum(fixspec**2) for i, vel in enumerate(vgrid): # Shift model to", "Vsys) # Shift merr and cmap to the planet's velocity, so their axes", "night iters = {'56550': 5, '56561': 4, '56904': 4, '56915': 6, '56966': 6,", "data_path = args.datapath model_path = args.modelpath out_path = args.outpath ext = args.extension #", "\"\"\" Computes the brightness variation for a given set of day-night contrast and", "in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift - 2 * a", "High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to data\")", "Butterworth filter cut-off frequency for each night bfreq = {'56550': 0.035, '56561': 0.04,", "scaled line contrast, orbital velocity, and systemic velocity. NOTE: Because this computes the", "extension\") args = parser.parse_args() nights = args.nights data_path = args.datapath model_path = args.modelpath", "interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave) # Convolve model with 1D Gaussian", "value of alpha for i,a in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 *", "wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation map for", "c2 = 1.439 y = 1e4/wavelength a = c1*(y**5.) tmp = c2*y/temp b", "Because this computes the full likelihood map, not MCMC chains, this file is", "argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\",", "spec = spec[:,p_ind,:] wave = wave[:,p_ind,:] # Determine size of arrays n_orders =", "* np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/ corr_bot # Calculate model term for log", "the base model squared. \"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2 =", "vgrid, vsys, cmap, phase) # Calculate the log likelihood for each value of", "merr and cmap to the planet's velocity, so their axes are (Kp, time,", "remove_env(wave, spec, px): \"\"\" Subtracts the lower envelope from a model spectrum by", "series of atmospheric models cross-correlated with planetary emission spectra. Parameters are log VMR,", "astropy.io import fits import numpy as np from scipy.interpolate import interp1d from astropy.convolution", "parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to models\") parser.add_argument(\"-o\",", "likelihood:' print 'logVMR = %.1f' % (vmrs[fidx]) print 'C = %.1f' % (contrast[cidx])", "shifted_map KTVmap[i] = fullmap fmap[i,:] = np.nansum(fullmap, axis=0) return fmap, KTVmap def chi2(cmap,", "\"\"\" Calculates the cross-correlation map for a given spectral order, along with the", "given VMR for i in range(len(contrast)): for j in range(len(offset)): X2 = chi2(lnL_term3[i,j],", "interp(wave) corrected = spec - envelope return corrected def butterworth(x, order, freq, filt_type='highpass'):", "'off = %.1f' % (offset[oidx]) print 'a = %.1f' % (alpha[aidx]) print 'Kp", "corrected def butterworth(x, order, freq, filt_type='highpass'): \"\"\" Applies a high-pass Butterworth filter, with", "(pi * (phi - theta)) A_p = 1. - contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:]", "to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for", "include phases below 0.41 and above 0.59, to avoid stellar Fe signal p_ind", "the lnL from the previously computed cross-correlation map and other base terms, for", "<= maxwave) for frame in range(len(spec)): fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /=", "parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file name extension\")", "computed cross-correlation map and other base terms, for a given set of scaled", "over a given phase range. \"\"\" offset = offset_deg / 360. # Equation:", "log likelihood equation: the spectra squared, and the base model squared. \"\"\" cmap", "phases below 0.41 and above 0.59, to avoid stellar Fe signal p_ind =", "(alpha[aidx]) print 'Kp = %.1f' % (Kps[kidx]) print 'Vsys = %.1f' % (Vsys[vidx])", "idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec, fill_value='extrapolate') envelope = interp(wave)", "= correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp) cmap = submed(cmap0) cmap_osum +=", "print 'off = %.1f' % (offset[oidx]) print 'a = %.1f' % (alpha[aidx]) print", "default=\"./\", help=\"path to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\",", "= np.load(data_path+night+'_phase.npy') # (frames) # Only include phases below 0.41 and above 0.59,", "term3_shift = phasefold(Kps, vgrid, vsys, cmap, phase) # Calculate the log likelihood for", "axis=0) return fmap, KTVmap def chi2(cmap, merr, serr, alpha, Kps, vgrid, vsys, phase):", "a given phase range. \"\"\" offset = offset_deg / 360. # Equation: Ap", "= np.nansum(X2_KTV, axis=1) return X2 def brightvar(phase, offset_deg, contrast): \"\"\" Computes the brightness", "tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) | np.isnan(sigma))] = 1e20 # Calculate", "\"\"\" X2 = np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps, Vsys) # Shift merr", "len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap), len(vsys))) for i, Kp in enumerate(Kps): fullmap =", "0.025, '56966': 0.055, '57321': 0.025} for night in nights: # Read in data", "parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file name extension\") args = parser.parse_args() nights = args.nights", "stepsize, then interpolating. \"\"\" low_wave, low_spec = [], [] for i in range(len(spec)/px", "= shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i] = fullmap fmap[i,:] = np.nansum(fullmap, axis=0) return", "Either run the file on a server that can handle this or reduce", "4, '56904': 4, '56915': 6, '56966': 6, '57321': 6} # Specify Butterworth filter", "phase = np.load(data_path+night+'_phase.npy') # (frames) # Only include phases below 0.41 and above", "and cut-off frequency, to the given model. \"\"\" butterfilt = butter(order, freq, btype=filt_type,", "# Only include phases below 0.41 and above 0.59, to avoid stellar Fe", "= np.empty((len(spec),len(vgrid))) # Isolate wavelength range and scale data w_idx = (wave[0,:] >=", "spectrum by finding the minimum value in the given stepsize, then interpolating. \"\"\"", "= [], [] for i in range(len(spec)/px - 1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px])", "brightness variation for given contrasts and offsets variation = brightvar(phase, offset, contrast) #", "cross-correlation map for a given spectral order, along with the other two terms", "= args.modelpath out_path = args.outpath ext = args.extension # Define parameter arrays vmrs", "minwave, maxwave, filt_interp) cmap = submed(cmap0) cmap_osum += cmap merr_osum += merr serr_osum", "fits import numpy as np from scipy.interpolate import interp1d from astropy.convolution import convolve,", "args.nights data_path = args.datapath model_path = args.modelpath out_path = args.outpath ext = args.extension", "return fmap, KTVmap def chi2(cmap, merr, serr, alpha, Kps, vgrid, vsys, phase): \"\"\"", "1e20 # Calculate number of data points in spectra minwave, maxwave = np.nanmin(wave[o,:,:]),", "Find highest likelihood values maximum = np.nanmax(lnL) maxes = np.where(lnL == maximum) fidx", "brightvar(phase, offset_deg, contrast): \"\"\" Computes the brightness variation for a given set of", "return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength array evenly spaced in resolution.", "serr_osum += serr # Compute brightness variation for given contrasts and offsets variation", "line contrast values. \"\"\" X2 = np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps, Vsys)", "| np.isnan(sigma))] = 1e20 # Calculate number of data points in spectra minwave,", "Kps, vgrid, Vsys, phase) lnL[v,i,j] += -N/2. * np.log(X2 / N) # Find", "for i in range(len(contrast)): for j in range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1,", "Apply brightness variation to lnL terms lnL_term1 = serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] *", "/= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) | np.isnan(sigma))] = 1e20 # Calculate number of", "Applies a high-pass Butterworth filter, with a given order and cut-off frequency, to", "np.arange(-150., 150., 0.5) Kps = np.arange(175.,275., 0.5) offset = np.arange(-30.,60., 1.) contrast =", "on a server that can handle this or reduce the ranges and/or stepsizes", "given order and cut-off frequency, to the given model. \"\"\" butterfilt = butter(order,", "very computationally expensive to run when the full parameter grid is used, and", "variation = brightvar(phase, offset, contrast) # Apply brightness variation to lnL terms lnL_term1", "contrast = np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys))) # Specify", "high-pass Butterworth filter, with a given order and cut-off frequency, to the given", "return bbsor def remove_env(wave, spec, px): \"\"\" Subtracts the lower envelope from a", "models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file name", "0.41) & (phase > -0.41))[0] phase = phase[p_ind] spec = spec[:,p_ind,:] wave =", "highest likelihood:' print 'logVMR = %.1f' % (vmrs[fidx]) print 'C = %.1f' %", "and above 0.59, to avoid stellar Fe signal p_ind = np.where((phase < 0.41)", "vel in enumerate(vgrid): # Shift model to desired velocity and scale redshift =", "is very computationally expensive to run when the full parameter grid is used,", "and/or stepsizes for the parameter arrays. \"\"\" from astropy.io import fits import numpy", "vp shift = interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i]", "return corrected def butterworth(x, order, freq, filt_type='highpass'): \"\"\" Applies a high-pass Butterworth filter,", "return A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD", "Specify number of SYSREM iterations used on spectra for each MJD night iters", "a high-pass Butterworth filter, with a given order and cut-off frequency, to the", "oidx = maxes[2][0] aidx = maxes[3][0] kidx = maxes[4][0] vidx = maxes[5][0] #", "= spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] # Calculate data term for log", "that can handle this or reduce the ranges and/or stepsizes for the parameter", "(orders, frames, pixels) wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels) phase = np.load(data_path+night+'_phase.npy')", "print 'Kp = %.1f' % (Kps[kidx]) print 'Vsys = %.1f' % (Vsys[vidx]) #", "spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp) cmap = submed(cmap0) cmap_osum += cmap merr_osum", "expensive to run when the full parameter grid is used, and the output", "C * cos^2 (pi * (phi - theta)) A_p = 1. - contrast[:,np.newaxis,np.newaxis]", "'56966': 0.055, '57321': 0.025} for night in nights: # Read in data spec", "for each MJD night iters = {'56550': 5, '56561': 4, '56904': 4, '56915':", "phasefold(Kps, vgrid, vsys, merr, phase) _, term3_shift = phasefold(Kps, vgrid, vsys, cmap, phase)", "fullmap fmap[i,:] = np.nansum(fullmap, axis=0) return fmap, KTVmap def chi2(cmap, merr, serr, alpha,", "each value of alpha for i,a in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2", "submed(cmap): \"\"\" Subtracts the median along the velocity axis from the cross-correlation map.", "wsigma = np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma <", "= np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform cross-correlation cmap0, serr,", "dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum) model = hdu[0].data", "phase) _, term3_shift = phasefold(Kps, vgrid, vsys, cmap, phase) # Calculate the log", "lnL_term1 = serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]", "phase) # Calculate the log likelihood for each value of alpha for i,a", "args.outpath ext = args.extension # Define parameter arrays vmrs = np.arange(-5., -2.1, 0.1)", "phase): \"\"\" Shifts the cross-correlation map to planet's rest frame and creates the", "given contrasts and offsets variation = brightvar(phase, offset, contrast) # Apply brightness variation", "as np from scipy.interpolate import interp1d from astropy.convolution import convolve, Gaussian1DKernel import argparse", "components N = 0. cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum", "vgrid, Vsys, phase) lnL[v,i,j] += -N/2. * np.log(X2 / N) # Find highest", "2 * a * term3_shift # Sum the log likelihood in time X2[i]", "variation[:,:,:,np.newaxis] # Calculate lnL for given VMR for i in range(len(contrast)): for j", "- envelope return corrected def butterworth(x, order, freq, filt_type='highpass'): \"\"\" Applies a high-pass", "model squared. \"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) #", "shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i] = fullmap fmap[i,:] = np.nansum(fullmap, axis=0) return fmap,", "len(vgrid))) serr_osum = np.zeros((n_frames)) # Perform cross-correlation for orders redward of 600 nm,", "Subtracts the median along the velocity axis from the cross-correlation map. \"\"\" mdn", "# Sum the log likelihood in time X2[i] = np.nansum(X2_KTV, axis=1) return X2", "import convolve, Gaussian1DKernel import argparse from scipy.optimize import curve_fit from scipy.signal import butter,", "= np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005)", "len(vsys))) # (alpha, Kps, Vsys) # Shift merr and cmap to the planet's", "full likelihood map, not MCMC chains, this file is very computationally expensive to", "aidx = maxes[3][0] kidx = maxes[4][0] vidx = maxes[5][0] # Print highest likelihood", "of the log likelihood equation: the spectra squared, and the base model squared.", "MCMC chains, this file is very computationally expensive to run when the full", "log likelihood map for a series of atmospheric models cross-correlated with planetary emission", "spectra squared, and the base model squared. \"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1 =", "Calculate number of data points in spectra minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx,", "stdev[frame,w_idx] # Calculate data term for log likelihood lnL_term1[frame] = np.nansum(fixspec**2) for i,", "lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2 def submed(cmap): \"\"\" Subtracts the median", "consistent resolution m_wave = wavegrid(model[0,0], model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False)", "np.zeros((len(Kps), len(cmap), len(vsys))) for i, Kp in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for frame", "to run when the full parameter grid is used, and the output can", "bbsor def remove_env(wave, spec, px): \"\"\" Subtracts the lower envelope from a model", "envelope = interp(wave) corrected = spec - envelope return corrected def butterworth(x, order,", "= phasefold(Kps, vgrid, vsys, merr, phase) _, term3_shift = phasefold(Kps, vgrid, vsys, cmap,", "of scaled line contrast values. \"\"\" X2 = np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha,", "X2[i] = np.nansum(X2_KTV, axis=1) return X2 def brightvar(phase, offset_deg, contrast): \"\"\" Computes the", "filter cut-off frequency for each night bfreq = {'56550': 0.035, '56561': 0.04, '56904':", "this or reduce the ranges and/or stepsizes for the parameter arrays. \"\"\" from", "interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i] = fullmap fmap[i,:]", "btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt, x) return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates a", "* np.sin(2.*np.pi*phase[frame]) vshift = vgrid - vp shift = interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map", "from scipy.optimize import curve_fit from scipy.signal import butter, sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates", "default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file name extension\") args =", "planet's velocity, so their axes are (Kp, time, Vsys) _, term2_shift = phasefold(Kps,", "vgrid = np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150., 150., 0.5) Kps = np.arange(175.,275., 0.5)", "[] for i in range(len(spec)/px - 1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp", "x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation map", "a model spectrum by finding the minimum value in the given stepsize, then", "size of arrays n_orders = spec.shape[0] n_frames = spec.shape[1] n_pix = spec.shape[2] for", "Calculate time- and wavelength-dependent uncertainties tsigma = np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o], axis=1)", "a**2 * term2_shift - 2 * a * term3_shift # Sum the log", "frequency, to the given model. \"\"\" butterfilt = butter(order, freq, btype=filt_type, output='sos') x_filtered", "(wave[0,:] <= maxwave) for frame in range(len(spec)): fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec", "envelope return corrected def butterworth(x, order, freq, filt_type='highpass'): \"\"\" Applies a high-pass Butterworth", "order, along with the other two terms of the log likelihood equation: the", "np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] # Perform cross-correlation corr_top = np.nansum(mspec_weighted * fixspec) #corr_bot", "low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec, fill_value='extrapolate') envelope = interp(wave) corrected = spec", "the velocity axis from the cross-correlation map. \"\"\" mdn = np.nanmedian(cmap,axis=1) sub =", "'Location of highest likelihood:' print 'logVMR = %.1f' % (vmrs[fidx]) print 'C =", "SYSREM iterations used on spectra for each MJD night iters = {'56550': 5,", "bfreq = {'56550': 0.035, '56561': 0.04, '56904': 0.03, '56915': 0.025, '56966': 0.055, '57321':", "name extension\") args = parser.parse_args() nights = args.nights data_path = args.datapath model_path =", "fidx = maxes[0][0] cidx = maxes[1][0] oidx = maxes[2][0] aidx = maxes[3][0] kidx", "in range(len(spec)): fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] # Calculate data", "parser = argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\",", "phase offset values over a given phase range. \"\"\" offset = offset_deg /", "cmap = np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate wavelength range", "maxes[5][0] # Print highest likelihood values print 'Location of highest likelihood:' print 'logVMR", "* len(phase) # Perform cross-correlation cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid,", "alpha, Kps, vgrid, vsys, phase): \"\"\" Calculates the chi squared portion of the", "squared portion of the lnL from the previously computed cross-correlation map and other", "equation: the spectra squared, and the base model squared. \"\"\" cmap = np.empty((len(spec),len(vgrid)))", "cross-correlation corr_top = np.nansum(mspec_weighted * fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] =", "in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames, pixels) wave =", "# Calculate time- and wavelength-dependent uncertainties tsigma = np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o],", "planet's rest frame and creates the Kp-Vsys map. \"\"\" fmap = np.empty((len(Kps), len(vsys)))", "# (alpha, Kps, Vsys) # Shift merr and cmap to the planet's velocity,", "in enumerate(vmrs): # Get dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength,", "in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for frame in range(len(phase)): # Shift to planet's", "corr_bot # Calculate model term for log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap,", "len(offset), len(alpha), len(Kps), len(Vsys))) # Specify number of SYSREM iterations used on spectra", "np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:])", "np.nanmedian(cmap,axis=1) sub = cmap - mdn[:,np.newaxis] return sub def phasefold(Kps, vgrid, vsys, cmap,", "Define parameter arrays vmrs = np.arange(-5., -2.1, 0.1) alpha = np.arange(0.5, 5., 0.1)", "corr_top #/ corr_bot # Calculate model term for log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2)", "redshift mspec_shifted = model_interp(shift_wave) mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] #", "on spectra for each MJD night iters = {'56550': 5, '56561': 4, '56904':", "maxes[4][0] vidx = maxes[5][0] # Print highest likelihood values print 'Location of highest", "phase): \"\"\" Calculates the chi squared portion of the lnL from the previously", "chains, this file is very computationally expensive to run when the full parameter", "= maxes[4][0] vidx = maxes[5][0] # Print highest likelihood values print 'Location of", "2021-05-11 Description: Calculates the 6-D log likelihood map for a series of atmospheric", "bounds_error=False) m_spec = wv_interp(m_wave) # Convolve model with 1D Gaussian kernel, then filter", "= np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap), len(vsys))) for i, Kp in enumerate(Kps):", "Planck function for a given temperature over a given wavelength range. \"\"\" c1", "merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames)) # Perform cross-correlation for orders redward", "> -0.41))[0] phase = phase[p_ind] spec = spec[:,p_ind,:] wave = wave[:,p_ind,:] # Determine", "- contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser =", "of alpha for i,a in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift", "= argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath',", "np.nansum(fixspec**2) for i, vel in enumerate(vgrid): # Shift model to desired velocity and", "used on spectra for each MJD night iters = {'56550': 5, '56561': 4,", "'--outpath', default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file name extension\") args", "np.exp(tmp) - 1. bbsor = a/b return bbsor def remove_env(wave, spec, px): \"\"\"", "for night in nights: # Read in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1.", "nm, and sum together for i,o in enumerate(np.arange(24,37)): # Calculate time- and wavelength-dependent", "cut-off frequency for each night bfreq = {'56550': 0.035, '56561': 0.04, '56904': 0.03,", "# Shift to planet's orbital velocity vp = Kp * np.sin(2.*np.pi*phase[frame]) vshift =", "px): \"\"\" Subtracts the lower envelope from a model spectrum by finding the", "360. # Equation: Ap = 1 - C * cos^2 (pi * (phi", "for frame in range(len(spec)): fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] #", "range(len(spec)/px - 1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec, fill_value='extrapolate')", "likelihood for each value of alpha for i,a in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis]", "= np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150., 150., 0.5) Kps = np.arange(175.,275., 0.5) offset", "with a given order and cut-off frequency, to the given model. \"\"\" butterfilt", "default=\".fits\", help=\"output file name extension\") args = parser.parse_args() nights = args.nights data_path =", "model onto data's wavelength grid filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create", "- np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] # Calculate data term for log likelihood lnL_term1[frame]", "np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) * len(phase) #", "# Write lnL to fits file hdu2 = fits.PrimaryHDU(lnL) hdu2.writeto(out_path+'lnL_wasp33b_FeI%s' % (ext), overwrite=True)", "filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays for lnL components N", "* term2_shift - 2 * a * term3_shift # Sum the log likelihood", "N) # Find highest likelihood values maximum = np.nanmax(lnL) maxes = np.where(lnL ==", "w_idx = (wave[0,:] >= minwave) & (wave[0,:] <= maxwave) for frame in range(len(spec)):", "= parser.parse_args() nights = args.nights data_path = args.datapath model_path = args.modelpath out_path =", "scipy.signal import butter, sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates the Planck function for a", "Shifts the cross-correlation map to planet's rest frame and creates the Kp-Vsys map.", "a given order and cut-off frequency, to the given model. \"\"\" butterfilt =", "Equation: Ap = 1 - C * cos^2 (pi * (phi - theta))", "& (wave[0,:] <= maxwave) for frame in range(len(spec)): fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx])", "Only include phases below 0.41 and above 0.59, to avoid stellar Fe signal", "\"\"\" Subtracts the median along the velocity axis from the cross-correlation map. \"\"\"", "= model_interp(shift_wave) mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] # Perform cross-correlation", "wavegrid(model[0,0], model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave) #", "0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys))) # Specify number of SYSREM", "and scale data w_idx = (wave[0,:] >= minwave) & (wave[0,:] <= maxwave) for", "a = c1*(y**5.) tmp = c2*y/temp b = np.exp(tmp) - 1. bbsor =", "axis=1) return X2 def brightvar(phase, offset_deg, contrast): \"\"\" Computes the brightness variation for", "'56966': 6, '57321': 6} # Specify Butterworth filter cut-off frequency for each night", "from astropy.io import fits import numpy as np from scipy.interpolate import interp1d from", "= convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv, 1, bfreq[night]) #", "wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation map for a given spectral", "# Calculate lnL for given VMR for i in range(len(contrast)): for j in", "ext = args.extension # Define parameter arrays vmrs = np.arange(-5., -2.1, 0.1) alpha", "{'56550': 5, '56561': 4, '56904': 4, '56915': 6, '56966': 6, '57321': 6} #", "avoid stellar Fe signal p_ind = np.where((phase < 0.41) & (phase > -0.41))[0]", "= butterworth(mspec_conv, 1, bfreq[night]) # Create interpolator to put model onto data's wavelength", "# Create interpolator to put model onto data's wavelength grid filt_interp = interp1d(m_wave,", "in range(len(contrast)): for j in range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps,", "0.055, '57321': 0.025} for night in nights: # Read in data spec =", "minwave) & (wave[0,:] <= maxwave) for frame in range(len(spec)): fixspec = spec[frame,w_idx] -", "mspec_bf = butterworth(mspec_conv, 1, bfreq[night]) # Create interpolator to put model onto data's", "sigma[((sigma < 0.0005) | np.isnan(sigma))] = 1e20 # Calculate number of data points", "line contrast, orbital velocity, and systemic velocity. NOTE: Because this computes the full", "convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv, 1, bfreq[night]) # Create", "minimum value in the given stepsize, then interpolating. \"\"\" low_wave, low_spec = [],", "def chi2(cmap, merr, serr, alpha, Kps, vgrid, vsys, phase): \"\"\" Calculates the chi", "= 1. - contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ###############################################################################", "frames, pixels) wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels) phase = np.load(data_path+night+'_phase.npy') #", "= np.nansum(fullmap, axis=0) return fmap, KTVmap def chi2(cmap, merr, serr, alpha, Kps, vgrid,", ">= minwave) & (wave[0,:] <= maxwave) for frame in range(len(spec)): fixspec = spec[frame,w_idx]", "for given VMR for i in range(len(contrast)): for j in range(len(offset)): X2 =", "\"\"\" butterfilt = butter(order, freq, btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt, x) return x_filtered", "np.where(lnL == maximum) fidx = maxes[0][0] cidx = maxes[1][0] oidx = maxes[2][0] aidx", "# Isolate wavelength range and scale data w_idx = (wave[0,:] >= minwave) &", "run when the full parameter grid is used, and the output can be", "to desired velocity and scale redshift = 1. - vel / 3e5 shift_wave", "\"\"\" from astropy.io import fits import numpy as np from scipy.interpolate import interp1d", "the previously computed cross-correlation map and other base terms, for a given set", "np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150., 150., 0.5) Kps = np.arange(175.,275., 0.5) offset =", "X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys, phase) lnL[v,i,j] += -N/2.", "creates the Kp-Vsys map. \"\"\" fmap = np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap),", "%.1f' % (offset[oidx]) print 'a = %.1f' % (alpha[aidx]) print 'Kp = %.1f'", "variation for given contrasts and offsets variation = brightvar(phase, offset, contrast) # Apply", "= np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames)) # Perform cross-correlation", "the other two terms of the log likelihood equation: the spectra squared, and", "number of data points in spectra minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx", "np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys))) # Specify number of SYSREM iterations used on", "and sum together for i,o in enumerate(np.arange(24,37)): # Calculate time- and wavelength-dependent uncertainties", "scale redshift = 1. - vel / 3e5 shift_wave = wave[0,w_idx] * redshift", "MJD night iters = {'56550': 5, '56561': 4, '56904': 4, '56915': 6, '56966':", "freq, btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt, x) return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates", "a wavelength array evenly spaced in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength", "the cross-correlation map to planet's rest frame and creates the Kp-Vsys map. \"\"\"", "to lnL terms lnL_term1 = serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 =", "len(Kps), len(Vsys))) # Specify number of SYSREM iterations used on spectra for each", "lnL[v,i,j] += -N/2. * np.log(X2 / N) # Find highest likelihood values maximum", "Sum the log likelihood in time X2[i] = np.nansum(X2_KTV, axis=1) return X2 def", "= brightvar(phase, offset, contrast) # Apply brightness variation to lnL terms lnL_term1 =", "= np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames, pixels) wave = np.load(data_path+night+'_wavelength.npy') # (orders,", "sosfiltfilt(butterfilt, x) return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength array evenly spaced", "# Convolve model with 1D Gaussian kernel, then filter FWHM_inst = {'CFHT': 4.48,", "reduce the ranges and/or stepsizes for the parameter arrays. \"\"\" from astropy.io import", "grid with consistent resolution m_wave = wavegrid(model[0,0], model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1], kind='linear',", "phasefold(Kps, vgrid, vsys, cmap, phase) # Calculate the log likelihood for each value", "convolve, Gaussian1DKernel import argparse from scipy.optimize import curve_fit from scipy.signal import butter, sosfiltfilt", "other base terms, for a given set of scaled line contrast values. \"\"\"", "'C = %.1f' % (contrast[cidx]) print 'off = %.1f' % (offset[oidx]) print 'a", "grid filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays for lnL components", "and offsets variation = brightvar(phase, offset, contrast) # Apply brightness variation to lnL", "for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file name extension\") args = parser.parse_args() nights", "value in the given stepsize, then interpolating. \"\"\" low_wave, low_spec = [], []", "kernel, then filter FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35))", "0.59, to avoid stellar Fe signal p_ind = np.where((phase < 0.41) & (phase", "brightness variation to lnL terms lnL_term1 = serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2", "= np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels) phase = np.load(data_path+night+'_phase.npy') # (frames) # Only", "scipy.interpolate import interp1d from astropy.convolution import convolve, Gaussian1DKernel import argparse from scipy.optimize import", "= phasefold(Kps, vgrid, vsys, cmap, phase) # Calculate the log likelihood for each", "a given temperature over a given wavelength range. \"\"\" c1 = 1.1911e-12 c2", "velocity vp = Kp * np.sin(2.*np.pi*phase[frame]) vshift = vgrid - vp shift =", "= offset_deg / 360. # Equation: Ap = 1 - C * cos^2", "np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames)) # Perform cross-correlation for", "'56915': 6, '56966': 6, '57321': 6} # Specify Butterworth filter cut-off frequency for", "parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to data\") parser.add_argument(\"-m\", '--modelpath',", "\"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates", "of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to", "import numpy as np from scipy.interpolate import interp1d from astropy.convolution import convolve, Gaussian1DKernel", "\"\"\" Calculates the Planck function for a given temperature over a given wavelength", "fullmap = np.empty((len(cmap),len(vsys))) for frame in range(len(phase)): # Shift to planet's orbital velocity", "log VMR, day-night contrast, peak phase offset, scaled line contrast, orbital velocity, and", "kind='linear', fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave) # Convolve model with 1D Gaussian kernel,", "arrays. \"\"\" from astropy.io import fits import numpy as np from scipy.interpolate import", "= %.1f' % (contrast[cidx]) print 'off = %.1f' % (offset[oidx]) print 'a =", "arrays n_orders = spec.shape[0] n_frames = spec.shape[1] n_pix = spec.shape[2] for v,vmr in", "VMR, day-night contrast, peak phase offset, scaled line contrast, orbital velocity, and systemic", "a/b return bbsor def remove_env(wave, spec, px): \"\"\" Subtracts the lower envelope from", "= np.arange(-5., -2.1, 0.1) alpha = np.arange(0.5, 5., 0.1) vgrid = np.arange(-600.,601.5, 1.5)", "in enumerate(np.arange(24,37)): # Calculate time- and wavelength-dependent uncertainties tsigma = np.nanstd(spec[o], axis=0) wsigma", "scaled line contrast values. \"\"\" X2 = np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps,", "when the full parameter grid is used, and the output can be multiple", "x_filtered = sosfiltfilt(butterfilt, x) return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength array", "lnL_term1, lnL_term2 def submed(cmap): \"\"\" Subtracts the median along the velocity axis from", "peak phase offset values over a given phase range. \"\"\" offset = offset_deg", "= 1.1911e-12 c2 = 1.439 y = 1e4/wavelength a = c1*(y**5.) tmp =", "4, '56915': 6, '56966': 6, '57321': 6} # Specify Butterworth filter cut-off frequency", "phasefold(Kps, vgrid, vsys, cmap, phase): \"\"\" Shifts the cross-correlation map to planet's rest", "'Kp = %.1f' % (Kps[kidx]) print 'Vsys = %.1f' % (Vsys[vidx]) # Write", "merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp) cmap = submed(cmap0) cmap_osum", "for i,o in enumerate(np.arange(24,37)): # Calculate time- and wavelength-dependent uncertainties tsigma = np.nanstd(spec[o],", "filt_interp) cmap = submed(cmap0) cmap_osum += cmap merr_osum += merr serr_osum += serr", "# Interpolate model to wavelength grid with consistent resolution m_wave = wavegrid(model[0,0], model[0,-1],", "term for log likelihood lnL_term1[frame] = np.nansum(fixspec**2) for i, vel in enumerate(vgrid): #", "np.where((phase < 0.41) & (phase > -0.41))[0] phase = phase[p_ind] spec = spec[:,p_ind,:]", "serr # Compute brightness variation for given contrasts and offsets variation = brightvar(phase,", "map, not MCMC chains, this file is very computationally expensive to run when", "i, vel in enumerate(vgrid): # Shift model to desired velocity and scale redshift", "contrast) # Apply brightness variation to lnL terms lnL_term1 = serr_osum lnL_term2 =", "'56561': 0.04, '56904': 0.03, '56915': 0.025, '56966': 0.055, '57321': 0.025} for night in", "the chi squared portion of the lnL from the previously computed cross-correlation map", "the log likelihood equation: the spectra squared, and the base model squared. \"\"\"", "corr_top = np.nansum(mspec_weighted * fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] = corr_top", "6} # Specify Butterworth filter cut-off frequency for each night bfreq = {'56550':", "fmap, KTVmap def chi2(cmap, merr, serr, alpha, Kps, vgrid, vsys, phase): \"\"\" Calculates", "len(cmap), len(vsys))) for i, Kp in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for frame in", "contrast, peak phase offset, scaled line contrast, orbital velocity, and systemic velocity. NOTE:", "the parameter arrays. \"\"\" from astropy.io import fits import numpy as np from", "rest frame and creates the Kp-Vsys map. \"\"\" fmap = np.empty((len(Kps), len(vsys))) KTVmap", "merr, serr, alpha, Kps, vgrid, vsys, phase): \"\"\" Calculates the chi squared portion", "merr serr_osum += serr # Compute brightness variation for given contrasts and offsets", "wavelength range. \"\"\" c1 = 1.1911e-12 c2 = 1.439 y = 1e4/wavelength a", "cross-correlation for orders redward of 600 nm, and sum together for i,o in", "\"\"\" Calculates the chi squared portion of the lnL from the previously computed", "likelihood map for a series of atmospheric models cross-correlated with planetary emission spectra.", "np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] # Calculate data term for log likelihood lnL_term1[frame] =", "= %.1f' % (offset[oidx]) print 'a = %.1f' % (alpha[aidx]) print 'Kp =", "filter FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day =", "function for a given temperature over a given wavelength range. \"\"\" c1 =", "Author: <NAME> Created: 2020-10-28 Last Modified: 2021-05-11 Description: Calculates the 6-D log likelihood", "nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\",", "emission spectra. Parameters are log VMR, day-night contrast, peak phase offset, scaled line", "wavelength-dependent uncertainties tsigma = np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma,", "for orders redward of 600 nm, and sum together for i,o in enumerate(np.arange(24,37)):", "likelihood values maximum = np.nanmax(lnL) maxes = np.where(lnL == maximum) fidx = maxes[0][0]", "(phi - theta)) A_p = 1. - contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\", "likelihood values print 'Location of highest likelihood:' print 'logVMR = %.1f' % (vmrs[fidx])", "spec.shape[1] n_pix = spec.shape[2] for v,vmr in enumerate(vmrs): # Get dayside model hdu", "vsys, cmap, phase) # Calculate the log likelihood for each value of alpha", "pixels) phase = np.load(data_path+night+'_phase.npy') # (frames) # Only include phases below 0.41 and", "data points in spectra minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]),", "Butterworth filter, with a given order and cut-off frequency, to the given model.", "given phase range. \"\"\" offset = offset_deg / 360. # Equation: Ap =", "= cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate lnL for given VMR for i in", "squared. \"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate", "map to planet's rest frame and creates the Kp-Vsys map. \"\"\" fmap =", "1.5) Vsys = np.arange(-150., 150., 0.5) Kps = np.arange(175.,275., 0.5) offset = np.arange(-30.,60.,", "values print 'Location of highest likelihood:' print 'logVMR = %.1f' % (vmrs[fidx]) print", "spec.shape[0] n_frames = spec.shape[1] n_pix = spec.shape[2] for v,vmr in enumerate(vmrs): # Get", "for i in range(len(spec)/px - 1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp =", "np.isnan(sigma))] = 1e20 # Calculate number of data points in spectra minwave, maxwave", "cmap, phase): \"\"\" Shifts the cross-correlation map to planet's rest frame and creates", "vgrid, vsys, cmap, phase): \"\"\" Shifts the cross-correlation map to planet's rest frame", "and other base terms, for a given set of scaled line contrast values.", "stellar Fe signal p_ind = np.where((phase < 0.41) & (phase > -0.41))[0] phase", "Shift merr and cmap to the planet's velocity, so their axes are (Kp,", "A_p = 1. - contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p", "i,a in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift - 2 *", "1. - contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser", "for a given spectral order, along with the other two terms of the", "0.0005) | np.isnan(sigma))] = 1e20 # Calculate number of data points in spectra", "velocity and scale redshift = 1. - vel / 3e5 shift_wave = wave[0,w_idx]", "= np.exp(tmp) - 1. bbsor = a/b return bbsor def remove_env(wave, spec, px):", "* variation[:,:,:,np.newaxis] # Calculate lnL for given VMR for i in range(len(contrast)): for", "minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform cross-correlation", "Kp in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for frame in range(len(phase)): # Shift to", "wave[:,p_ind,:] # Determine size of arrays n_orders = spec.shape[0] n_frames = spec.shape[1] n_pix", "############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str)", "alpha, Kps, vgrid, Vsys, phase) lnL[v,i,j] += -N/2. * np.log(X2 / N) #", "model to desired velocity and scale redshift = 1. - vel / 3e5", "Calculate model term for log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2", "for given contrasts and offsets variation = brightvar(phase, offset, contrast) # Apply brightness", "# Perform cross-correlation cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave,", "map. \"\"\" fmap = np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap), len(vsys))) for i,", "vgrid, minwave, maxwave, filt_interp) cmap = submed(cmap0) cmap_osum += cmap merr_osum += merr", "brightvar(phase, offset, contrast) # Apply brightness variation to lnL terms lnL_term1 = serr_osum", "print 'C = %.1f' % (contrast[cidx]) print 'off = %.1f' % (offset[oidx]) print", "data w_idx = (wave[0,:] >= minwave) & (wave[0,:] <= maxwave) for frame in", "args.extension # Define parameter arrays vmrs = np.arange(-5., -2.1, 0.1) alpha = np.arange(0.5,", "= np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec, fill_value='extrapolate') envelope = interp(wave) corrected", "(offset[oidx]) print 'a = %.1f' % (alpha[aidx]) print 'Kp = %.1f' % (Kps[kidx])", "len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform cross-correlation cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma,", "Read in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames, pixels) wave", "Perform cross-correlation corr_top = np.nansum(mspec_weighted * fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i]", "np.nanmax(lnL) maxes = np.where(lnL == maximum) fidx = maxes[0][0] cidx = maxes[1][0] oidx", "args.datapath model_path = args.modelpath out_path = args.outpath ext = args.extension # Define parameter", "values maximum = np.nanmax(lnL) maxes = np.where(lnL == maximum) fidx = maxes[0][0] cidx", "day-night contrast and peak phase offset values over a given phase range. \"\"\"", "args.modelpath out_path = args.outpath ext = args.extension # Define parameter arrays vmrs =", "variation for a given set of day-night contrast and peak phase offset values", "= wv_interp(m_wave) # Convolve model with 1D Gaussian kernel, then filter FWHM_inst =", "variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate lnL for given VMR for", "/ 360. # Equation: Ap = 1 - C * cos^2 (pi *", "n_frames = spec.shape[1] n_pix = spec.shape[2] for v,vmr in enumerate(vmrs): # Get dayside", "= mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] # Perform cross-correlation corr_top = np.nansum(mspec_weighted", "(orders, frames, pixels) phase = np.load(data_path+night+'_phase.npy') # (frames) # Only include phases below", "cmap[frame,i] = corr_top #/ corr_bot # Calculate model term for log likelihood lnL_term2[frame,i]", "\"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate wavelength", "lnL_term1, alpha, Kps, vgrid, Vsys, phase) lnL[v,i,j] += -N/2. * np.log(X2 / N)", "np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/ corr_bot # Calculate model term for", "- 1. # (orders, frames, pixels) wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels)", "parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\",", "1. bbsor = a/b return bbsor def remove_env(wave, spec, px): \"\"\" Subtracts the", "# Calculate data term for log likelihood lnL_term1[frame] = np.nansum(fixspec**2) for i, vel", "minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx])", "# Shift model to desired velocity and scale redshift = 1. - vel", "= interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays for lnL components N =", "spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] # Calculate data term for log likelihood", "def remove_env(wave, spec, px): \"\"\" Subtracts the lower envelope from a model spectrum", "iterations used on spectra for each MJD night iters = {'56550': 5, '56561':", "Get dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum) model =", "+= serr # Compute brightness variation for given contrasts and offsets variation =", "lower envelope from a model spectrum by finding the minimum value in the", "low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec, fill_value='extrapolate') envelope = interp(wave) corrected = spec -", "range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys, phase) lnL[v,i,j] +=", "%.1f' % (alpha[aidx]) print 'Kp = %.1f' % (Kps[kidx]) print 'Vsys = %.1f'", "# (orders, frames, pixels) wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels) phase =", "= 1e20 # Calculate number of data points in spectra minwave, maxwave =", "- 1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec, fill_value='extrapolate') envelope", "lnL for given VMR for i in range(len(contrast)): for j in range(len(offset)): X2", "+= len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform cross-correlation cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:],", "np.nansum(X2_KTV, axis=1) return X2 def brightvar(phase, offset_deg, contrast): \"\"\" Computes the brightness variation", "= maxes[5][0] # Print highest likelihood values print 'Location of highest likelihood:' print", "map for a series of atmospheric models cross-correlated with planetary emission spectra. Parameters", "range. \"\"\" c1 = 1.1911e-12 c2 = 1.439 y = 1e4/wavelength a =", "the lower envelope from a model spectrum by finding the minimum value in", "squared, and the base model squared. \"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec))", "(wave[0,:] >= minwave) & (wave[0,:] <= maxwave) for frame in range(len(spec)): fixspec =", "/ 3e5 shift_wave = wave[0,w_idx] * redshift mspec_shifted = model_interp(shift_wave) mspec_weighted = mspec_shifted", "0.035, '56561': 0.04, '56904': 0.03, '56915': 0.025, '56966': 0.055, '57321': 0.025} for night", "from the cross-correlation map. \"\"\" mdn = np.nanmedian(cmap,axis=1) sub = cmap - mdn[:,np.newaxis]", "can handle this or reduce the ranges and/or stepsizes for the parameter arrays.", "in range(len(phase)): # Shift to planet's orbital velocity vp = Kp * np.sin(2.*np.pi*phase[frame])", "= {'56550': 0.035, '56561': 0.04, '56904': 0.03, '56915': 0.025, '56966': 0.055, '57321': 0.025}", "the output can be multiple Gigabytes. Either run the file on a server", "dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation", "signal p_ind = np.where((phase < 0.41) & (phase > -0.41))[0] phase = phase[p_ind]", "median along the velocity axis from the cross-correlation map. \"\"\" mdn = np.nanmedian(cmap,axis=1)", "for each value of alpha for i,a in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] +", "a series of atmospheric models cross-correlated with planetary emission spectra. Parameters are log", "chi squared portion of the lnL from the previously computed cross-correlation map and", "= np.arange(175.,275., 0.5) offset = np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1, 0.1) lnL =", "offset_deg, contrast): \"\"\" Computes the brightness variation for a given set of day-night", "def submed(cmap): \"\"\" Subtracts the median along the velocity axis from the cross-correlation", "len(vsys))) for i, Kp in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for frame in range(len(phase)):", "= np.nansum(mspec_weighted * fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/", "600 nm, and sum together for i,o in enumerate(np.arange(24,37)): # Calculate time- and", "np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2 def submed(cmap): \"\"\" Subtracts the median along the", "np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform cross-correlation cmap0, serr, merr =", "their axes are (Kp, time, Vsys) _, term2_shift = phasefold(Kps, vgrid, vsys, merr,", "# Calculate model term for log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap, lnL_term1,", "Kps, Vsys) # Shift merr and cmap to the planet's velocity, so their", "spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames, pixels) wave = np.load(data_path+night+'_wavelength.npy') #", "onto data's wavelength grid filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays", "wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave) # Convolve model with", "0.5) offset = np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset),", "Calculate the log likelihood for each value of alpha for i,a in enumerate(alpha):", "phase[p_ind] spec = spec[:,p_ind,:] wave = wave[:,p_ind,:] # Determine size of arrays n_orders", "likelihood map, not MCMC chains, this file is very computationally expensive to run", "likelihood in time X2[i] = np.nansum(X2_KTV, axis=1) return X2 def brightvar(phase, offset_deg, contrast):", "= 0. cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames))", "finding the minimum value in the given stepsize, then interpolating. \"\"\" low_wave, low_spec", "mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays for lnL components N = 0. cmap_osum", "wave[0,w_idx] * redshift mspec_shifted = model_interp(shift_wave) mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /=", "butterfilt = butter(order, freq, btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt, x) return x_filtered def", "for log likelihood lnL_term1[frame] = np.nansum(fixspec**2) for i, vel in enumerate(vgrid): # Shift", "interp1d from astropy.convolution import convolve, Gaussian1DKernel import argparse from scipy.optimize import curve_fit from", "Calculate data term for log likelihood lnL_term1[frame] = np.nansum(fixspec**2) for i, vel in", "1 - C * cos^2 (pi * (phi - theta)) A_p = 1.", "c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the", "data's wavelength grid filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays for", "= %.1f' % (vmrs[fidx]) print 'C = %.1f' % (contrast[cidx]) print 'off =", "- mdn[:,np.newaxis] return sub def phasefold(Kps, vgrid, vsys, cmap, phase): \"\"\" Shifts the", "cross-correlated with planetary emission spectra. Parameters are log VMR, day-night contrast, peak phase", "along the velocity axis from the cross-correlation map. \"\"\" mdn = np.nanmedian(cmap,axis=1) sub", "below 0.41 and above 0.59, to avoid stellar Fe signal p_ind = np.where((phase", "scale data w_idx = (wave[0,:] >= minwave) & (wave[0,:] <= maxwave) for frame", "%.1f' % (contrast[cidx]) print 'off = %.1f' % (offset[oidx]) print 'a = %.1f'", "curve_fit from scipy.signal import butter, sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates the Planck function", "% (vmrs[fidx]) print 'C = %.1f' % (contrast[cidx]) print 'off = %.1f' %", "'56904': 0.03, '56915': 0.025, '56966': 0.055, '57321': 0.025} for night in nights: #", "spec.shape[2] for v,vmr in enumerate(vmrs): # Get dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' %", "default=\"./\", help=\"path to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\",", "'56915': 0.025, '56966': 0.055, '57321': 0.025} for night in nights: # Read in", "kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays for lnL components N = 0. cmap_osum =", "maxwave) for frame in range(len(spec)): fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx]", "cmap = submed(cmap0) cmap_osum += cmap merr_osum += merr serr_osum += serr #", "range(len(spec)): fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] # Calculate data term", "= c1*(y**5.) tmp = c2*y/temp b = np.exp(tmp) - 1. bbsor = a/b", "= np.arange(-150., 150., 0.5) Kps = np.arange(175.,275., 0.5) offset = np.arange(-30.,60., 1.) contrast", "X2 def brightvar(phase, offset_deg, contrast): \"\"\" Computes the brightness variation for a given", "axis=1) sigma = np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) | np.isnan(sigma))]", "vsys, cmap, phase): \"\"\" Shifts the cross-correlation map to planet's rest frame and", "array evenly spaced in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength", "frequency for each night bfreq = {'56550': 0.035, '56561': 0.04, '56904': 0.03, '56915':", "Calculates the 6-D log likelihood map for a series of atmospheric models cross-correlated", "cidx = maxes[1][0] oidx = maxes[2][0] aidx = maxes[3][0] kidx = maxes[4][0] vidx", "maxes = np.where(lnL == maximum) fidx = maxes[0][0] cidx = maxes[1][0] oidx =", "(frames) # Only include phases below 0.41 and above 0.59, to avoid stellar", "interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) # Create variables/arrays for lnL components N = 0.", "& (phase > -0.41))[0] phase = phase[p_ind] spec = spec[:,p_ind,:] wave = wave[:,p_ind,:]", "fullmap[frame,:] = shifted_map KTVmap[i] = fullmap fmap[i,:] = np.nansum(fullmap, axis=0) return fmap, KTVmap", "lnL from the previously computed cross-correlation map and other base terms, for a", "each night bfreq = {'56550': 0.035, '56561': 0.04, '56904': 0.03, '56915': 0.025, '56966':", "% (contrast[cidx]) print 'off = %.1f' % (offset[oidx]) print 'a = %.1f' %", "the file on a server that can handle this or reduce the ranges", "return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation map for a given", "alpha for i,a in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift -", "offset, scaled line contrast, orbital velocity, and systemic velocity. NOTE: Because this computes", "and scale redshift = 1. - vel / 3e5 shift_wave = wave[0,w_idx] *", "= np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2 def submed(cmap): \"\"\" Subtracts the median along", "fixspec /= stdev[frame,w_idx] # Calculate data term for log likelihood lnL_term1[frame] = np.nansum(fixspec**2)", "Kps, vgrid, vsys, phase): \"\"\" Calculates the chi squared portion of the lnL", "wavelength grid with consistent resolution m_wave = wavegrid(model[0,0], model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1],", "in time X2[i] = np.nansum(X2_KTV, axis=1) return X2 def brightvar(phase, offset_deg, contrast): \"\"\"", "{'CFHT': 4.48, 'Subaru': 1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf", "mspec_shifted = model_interp(shift_wave) mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] # Perform", "frames, pixels) phase = np.load(data_path+night+'_phase.npy') # (frames) # Only include phases below 0.41", "v,vmr in enumerate(vmrs): # Get dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) #", "correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp) cmap = submed(cmap0) cmap_osum += cmap", "phase offset, scaled line contrast, orbital velocity, and systemic velocity. NOTE: Because this", "= %.1f' % (alpha[aidx]) print 'Kp = %.1f' % (Kps[kidx]) print 'Vsys =", "# Compute brightness variation for given contrasts and offsets variation = brightvar(phase, offset,", "Specify Butterworth filter cut-off frequency for each night bfreq = {'56550': 0.035, '56561':", "Parameters are log VMR, day-night contrast, peak phase offset, scaled line contrast, orbital", "output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file name extension\") args = parser.parse_args() nights =", "Modified: 2021-05-11 Description: Calculates the 6-D log likelihood map for a series of", "'Subaru': 1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv,", "-2.1, 0.1) alpha = np.arange(0.5, 5., 0.1) vgrid = np.arange(-600.,601.5, 1.5) Vsys =", "% (Vsys[vidx]) # Write lnL to fits file hdu2 = fits.PrimaryHDU(lnL) hdu2.writeto(out_path+'lnL_wasp33b_FeI%s' %", "planet's orbital velocity vp = Kp * np.sin(2.*np.pi*phase[frame]) vshift = vgrid - vp", "night bfreq = {'56550': 0.035, '56561': 0.04, '56904': 0.03, '56915': 0.025, '56966': 0.055,", "0.1) vgrid = np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150., 150., 0.5) Kps = np.arange(175.,275.,", "help=\"path to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path", "uncertainties tsigma = np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma, tsigma)", "= np.zeros((n_frames)) # Perform cross-correlation for orders redward of 600 nm, and sum", "so their axes are (Kp, time, Vsys) _, term2_shift = phasefold(Kps, vgrid, vsys,", "vgrid, vsys, phase): \"\"\" Calculates the chi squared portion of the lnL from", "'56561': 4, '56904': 4, '56915': 6, '56966': 6, '57321': 6} # Specify Butterworth", "model_path = args.modelpath out_path = args.outpath ext = args.extension # Define parameter arrays", "# Read in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames, pixels)", "cmap, lnL_term1, lnL_term2 def submed(cmap): \"\"\" Subtracts the median along the velocity axis", "1D Gaussian kernel, then filter FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8} mspec_conv =", "= spec - envelope return corrected def butterworth(x, order, freq, filt_type='highpass'): \"\"\" Applies", "fmap = np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap), len(vsys))) for i, Kp in", "%.1f' % (Kps[kidx]) print 'Vsys = %.1f' % (Vsys[vidx]) # Write lnL to", "= wave[:,p_ind,:] # Determine size of arrays n_orders = spec.shape[0] n_frames = spec.shape[1]", "= corr_top #/ corr_bot # Calculate model term for log likelihood lnL_term2[frame,i] =", "maximum = np.nanmax(lnL) maxes = np.where(lnL == maximum) fidx = maxes[0][0] cidx =", "day-night contrast, peak phase offset, scaled line contrast, orbital velocity, and systemic velocity.", "KTVmap[i] = fullmap fmap[i,:] = np.nansum(fullmap, axis=0) return fmap, KTVmap def chi2(cmap, merr,", "help=\"output file name extension\") args = parser.parse_args() nights = args.nights data_path = args.datapath", "for a given set of scaled line contrast values. \"\"\" X2 = np.zeros((len(alpha),", "chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys, phase) lnL[v,i,j] += -N/2. * np.log(X2", "values over a given phase range. \"\"\" offset = offset_deg / 360. #", "vshift = vgrid - vp shift = interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map = shift(vsys)", "term2_shift = phasefold(Kps, vgrid, vsys, merr, phase) _, term3_shift = phasefold(Kps, vgrid, vsys,", "set of scaled line contrast values. \"\"\" X2 = np.zeros((len(alpha), len(Kps), len(vsys))) #", "merr, phase) _, term3_shift = phasefold(Kps, vgrid, vsys, cmap, phase) # Calculate the", "theta)) A_p = 1. - contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return", "nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to", "len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames)) # Perform cross-correlation for orders", "c1*(y**5.) tmp = c2*y/temp b = np.exp(tmp) - 1. bbsor = a/b return", "given set of day-night contrast and peak phase offset values over a given", "interpolator to put model onto data's wavelength grid filt_interp = interp1d(m_wave, mspec_bf, kind='linear',", "np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) | np.isnan(sigma))] = 1e20 #", "n_pix = spec.shape[2] for v,vmr in enumerate(vmrs): # Get dayside model hdu =", "map. \"\"\" mdn = np.nanmedian(cmap,axis=1) sub = cmap - mdn[:,np.newaxis] return sub def", "Kp * np.sin(2.*np.pi*phase[frame]) vshift = vgrid - vp shift = interp1d(vshift, cmap[frame,:], bounds_error=False)", "= np.where(lnL == maximum) fidx = maxes[0][0] cidx = maxes[1][0] oidx = maxes[2][0]", "the cross-correlation map for a given spectral order, along with the other two", "butterworth(mspec_conv, 1, bfreq[night]) # Create interpolator to put model onto data's wavelength grid", "Calculates the cross-correlation map for a given spectral order, along with the other", "frame in range(len(phase)): # Shift to planet's orbital velocity vp = Kp *", "- 1. bbsor = a/b return bbsor def remove_env(wave, spec, px): \"\"\" Subtracts", "= remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv, 1, bfreq[night]) # Create interpolator to put", "Computes the brightness variation for a given set of day-night contrast and peak", "Calculates the Planck function for a given temperature over a given wavelength range.", "lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys, phase) lnL[v,i,j] += -N/2. * np.log(X2 /", "are (Kp, time, Vsys) _, term2_shift = phasefold(Kps, vgrid, vsys, merr, phase) _,", "4.48, 'Subaru': 1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf =", "low_wave, low_spec = [], [] for i in range(len(spec)/px - 1): idx =", "Gigabytes. Either run the file on a server that can handle this or", "= np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate wavelength range and", "systemic velocity. NOTE: Because this computes the full likelihood map, not MCMC chains,", "contrast values. \"\"\" X2 = np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps, Vsys) #", "np.arange(175.,275., 0.5) offset = np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast),", "Isolate wavelength range and scale data w_idx = (wave[0,:] >= minwave) & (wave[0,:]", "time X2[i] = np.nansum(X2_KTV, axis=1) return X2 def brightvar(phase, offset_deg, contrast): \"\"\" Computes", "0.04, '56904': 0.03, '56915': 0.025, '56966': 0.055, '57321': 0.025} for night in nights:", "over a given wavelength range. \"\"\" c1 = 1.1911e-12 c2 = 1.439 y", "for j in range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys,", "the ranges and/or stepsizes for the parameter arrays. \"\"\" from astropy.io import fits", "bfreq[night]) # Create interpolator to put model onto data's wavelength grid filt_interp =", "# Define parameter arrays vmrs = np.arange(-5., -2.1, 0.1) alpha = np.arange(0.5, 5.,", "term3_shift # Sum the log likelihood in time X2[i] = np.nansum(X2_KTV, axis=1) return", "y = 1e4/wavelength a = c1*(y**5.) tmp = c2*y/temp b = np.exp(tmp) -", "len(Vsys))) # Specify number of SYSREM iterations used on spectra for each MJD", "# (wavelength, spectrum) model = hdu[0].data # Interpolate model to wavelength grid with", "spaced in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno def", "the median along the velocity axis from the cross-correlation map. \"\"\" mdn =", "np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap), len(vsys))) for i, Kp in enumerate(Kps): fullmap", "0.03, '56915': 0.025, '56966': 0.055, '57321': 0.025} for night in nights: # Read", "= spec[:,p_ind,:] wave = wave[:,p_ind,:] # Determine size of arrays n_orders = spec.shape[0]", "for i,a in enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift - 2", "import butter, sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates the Planck function for a given", "the Planck function for a given temperature over a given wavelength range. \"\"\"", "0.5) Kps = np.arange(175.,275., 0.5) offset = np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1, 0.1)", "(vmr)) # (wavelength, spectrum) model = hdu[0].data # Interpolate model to wavelength grid", "the full parameter grid is used, and the output can be multiple Gigabytes.", "/ N) # Find highest likelihood values maximum = np.nanmax(lnL) maxes = np.where(lnL", "term for log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2 def submed(cmap):", "lnL_term1 = np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate wavelength range and scale data", "kidx = maxes[4][0] vidx = maxes[5][0] # Print highest likelihood values print 'Location", "model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave) # Convolve", "Created: 2020-10-28 Last Modified: 2021-05-11 Description: Calculates the 6-D log likelihood map for", "= (wave[0,:] >= minwave) & (wave[0,:] <= maxwave) for frame in range(len(spec)): fixspec", "this computes the full likelihood map, not MCMC chains, this file is very", "of the lnL from the previously computed cross-correlation map and other base terms,", "given set of scaled line contrast values. \"\"\" X2 = np.zeros((len(alpha), len(Kps), len(vsys)))", "np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys))) # Specify number of", "terms, for a given set of scaled line contrast values. \"\"\" X2 =", "by finding the minimum value in the given stepsize, then interpolating. \"\"\" low_wave,", "* np.log(X2 / N) # Find highest likelihood values maximum = np.nanmax(lnL) maxes", "interpolating. \"\"\" low_wave, low_spec = [], [] for i in range(len(spec)/px - 1):", "< 0.41) & (phase > -0.41))[0] phase = phase[p_ind] spec = spec[:,p_ind,:] wave", "vidx = maxes[5][0] # Print highest likelihood values print 'Location of highest likelihood:'", "Shift model to desired velocity and scale redshift = 1. - vel /", "Gaussian kernel, then filter FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8} mspec_conv = convolve(m_spec,", "the minimum value in the given stepsize, then interpolating. \"\"\" low_wave, low_spec =", "run the file on a server that can handle this or reduce the", "ranges and/or stepsizes for the parameter arrays. \"\"\" from astropy.io import fits import", "the cross-correlation map. \"\"\" mdn = np.nanmedian(cmap,axis=1) sub = cmap - mdn[:,np.newaxis] return", "a given set of scaled line contrast values. \"\"\" X2 = np.zeros((len(alpha), len(Kps),", "for v,vmr in enumerate(vmrs): # Get dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr))", "map for a given spectral order, along with the other two terms of", "along with the other two terms of the log likelihood equation: the spectra", "and cmap to the planet's velocity, so their axes are (Kp, time, Vsys)", "grid is used, and the output can be multiple Gigabytes. Either run the", "fmap[i,:] = np.nansum(fullmap, axis=0) return fmap, KTVmap def chi2(cmap, merr, serr, alpha, Kps,", "len(phase) # Perform cross-correlation cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave,", "np.log(X2 / N) # Find highest likelihood values maximum = np.nanmax(lnL) maxes =", "given model. \"\"\" butterfilt = butter(order, freq, btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt, x)", "then interpolating. \"\"\" low_wave, low_spec = [], [] for i in range(len(spec)/px -", "Description: Calculates the 6-D log likelihood map for a series of atmospheric models", "# Calculate the log likelihood for each value of alpha for i,a in", "= maxes[3][0] kidx = maxes[4][0] vidx = maxes[5][0] # Print highest likelihood values", "used, and the output can be multiple Gigabytes. Either run the file on", "def planck(wavelength,temp): \"\"\" Calculates the Planck function for a given temperature over a", "cmap to the planet's velocity, so their axes are (Kp, time, Vsys) _,", "* \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping", "mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] # Perform cross-correlation corr_top = np.nansum(mspec_weighted *", "two terms of the log likelihood equation: the spectra squared, and the base", "resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\"", "= np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps, Vsys) # Shift merr and cmap", "can be multiple Gigabytes. Either run the file on a server that can", "Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path", "for lnL components N = 0. cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames,", "i,o in enumerate(np.arange(24,37)): # Calculate time- and wavelength-dependent uncertainties tsigma = np.nanstd(spec[o], axis=0)", "def brightvar(phase, offset_deg, contrast): \"\"\" Computes the brightness variation for a given set", "= cmap - mdn[:,np.newaxis] return sub def phasefold(Kps, vgrid, vsys, cmap, phase): \"\"\"", "Shift to planet's orbital velocity vp = Kp * np.sin(2.*np.pi*phase[frame]) vshift = vgrid", "enumerate(vgrid): # Shift model to desired velocity and scale redshift = 1. -", "set of day-night contrast and peak phase offset values over a given phase", "merr_osum += merr serr_osum += serr # Compute brightness variation for given contrasts", "x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength array evenly spaced in resolution. \"\"\"", "= np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys))) # Specify number", "= serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] #", "j in range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys, phase)", "model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum) model = hdu[0].data #", "(phase > -0.41))[0] phase = phase[p_ind] spec = spec[:,p_ind,:] wave = wave[:,p_ind,:] #", "== maximum) fidx = maxes[0][0] cidx = maxes[1][0] oidx = maxes[2][0] aidx =", "cmap, phase) # Calculate the log likelihood for each value of alpha for", "'a = %.1f' % (alpha[aidx]) print 'Kp = %.1f' % (Kps[kidx]) print 'Vsys", "#mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv, 1, bfreq[night]) # Create interpolator to", "- vp shift = interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:] = shifted_map", "{'56550': 0.035, '56561': 0.04, '56904': 0.03, '56915': 0.025, '56966': 0.055, '57321': 0.025} for", "of data points in spectra minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx =", "lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate lnL", "and the base model squared. \"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2", "for i, vel in enumerate(vgrid): # Shift model to desired velocity and scale", "= 1. - vel / 3e5 shift_wave = wave[0,w_idx] * redshift mspec_shifted =", "range and scale data w_idx = (wave[0,:] >= minwave) & (wave[0,:] <= maxwave)", "lnL_term1[frame] = np.nansum(fixspec**2) for i, vel in enumerate(vgrid): # Shift model to desired", "given temperature over a given wavelength range. \"\"\" c1 = 1.1911e-12 c2 =", "the given model. \"\"\" butterfilt = butter(order, freq, btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt,", "interp1d(low_wave, low_spec, fill_value='extrapolate') envelope = interp(wave) corrected = spec - envelope return corrected", "from the previously computed cross-correlation map and other base terms, for a given", "from scipy.signal import butter, sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates the Planck function for", "# Print highest likelihood values print 'Location of highest likelihood:' print 'logVMR =", "of arrays n_orders = spec.shape[0] n_frames = spec.shape[1] n_pix = spec.shape[2] for v,vmr", "\"\"\" Shifts the cross-correlation map to planet's rest frame and creates the Kp-Vsys", "'57321': 0.025} for night in nights: # Read in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1]", "range. \"\"\" offset = offset_deg / 360. # Equation: Ap = 1 -", "maxwave, filt_interp) cmap = submed(cmap0) cmap_osum += cmap merr_osum += merr serr_osum +=", "to the planet's velocity, so their axes are (Kp, time, Vsys) _, term2_shift", "Subtracts the lower envelope from a model spectrum by finding the minimum value", "to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output file", "= args.outpath ext = args.extension # Define parameter arrays vmrs = np.arange(-5., -2.1,", "are log VMR, day-night contrast, peak phase offset, scaled line contrast, orbital velocity,", "fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave) # Convolve model with 1D Gaussian kernel, then", "for a given set of day-night contrast and peak phase offset values over", "parameter arrays vmrs = np.arange(-5., -2.1, 0.1) alpha = np.arange(0.5, 5., 0.1) vgrid", "np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec, fill_value='extrapolate') envelope = interp(wave) corrected =", "\"\"\" Applies a high-pass Butterworth filter, with a given order and cut-off frequency,", "redshift = 1. - vel / 3e5 shift_wave = wave[0,w_idx] * redshift mspec_shifted", "150., 0.5) Kps = np.arange(175.,275., 0.5) offset = np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1,", "points in spectra minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:])", "lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys))) # Specify number of SYSREM iterations", "= fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum) model = hdu[0].data # Interpolate model", "def butterworth(x, order, freq, filt_type='highpass'): \"\"\" Applies a high-pass Butterworth filter, with a", "/= stdev[frame,w_idx] # Calculate data term for log likelihood lnL_term1[frame] = np.nansum(fixspec**2) for", "m_spec = wv_interp(m_wave) # Convolve model with 1D Gaussian kernel, then filter FWHM_inst", "np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform cross-correlation cmap0, serr, merr", "data term for log likelihood lnL_term1[frame] = np.nansum(fixspec**2) for i, vel in enumerate(vgrid):", "map and other base terms, for a given set of scaled line contrast", "to planet's orbital velocity vp = Kp * np.sin(2.*np.pi*phase[frame]) vshift = vgrid -", "c2*y/temp b = np.exp(tmp) - 1. bbsor = a/b return bbsor def remove_env(wave,", "offset, contrast) # Apply brightness variation to lnL terms lnL_term1 = serr_osum lnL_term2", "a given wavelength range. \"\"\" c1 = 1.1911e-12 c2 = 1.439 y =", "the Kp-Vsys map. \"\"\" fmap = np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap), len(vsys)))", "print 'Vsys = %.1f' % (Vsys[vidx]) # Write lnL to fits file hdu2", "b = np.exp(tmp) - 1. bbsor = a/b return bbsor def remove_env(wave, spec,", "np.nansum(mspec_weighted * fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/ corr_bot", "maximum) fidx = maxes[0][0] cidx = maxes[1][0] oidx = maxes[2][0] aidx = maxes[3][0]", "maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform cross-correlation cmap0,", "= maxes[0][0] cidx = maxes[1][0] oidx = maxes[2][0] aidx = maxes[3][0] kidx =", "for a series of atmospheric models cross-correlated with planetary emission spectra. Parameters are", "wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength array evenly spaced in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res)", "% (Kps[kidx]) print 'Vsys = %.1f' % (Vsys[vidx]) # Write lnL to fits", "= Kp * np.sin(2.*np.pi*phase[frame]) vshift = vgrid - vp shift = interp1d(vshift, cmap[frame,:],", "0.41 and above 0.59, to avoid stellar Fe signal p_ind = np.where((phase <", "for each night bfreq = {'56550': 0.035, '56561': 0.04, '56904': 0.03, '56915': 0.025,", "\\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\",", "= np.where((phase < 0.41) & (phase > -0.41))[0] phase = phase[p_ind] spec =", "the spectra squared, and the base model squared. \"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1", "cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames)) # Perform", "base model squared. \"\"\" cmap = np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid)))", "Calculates the chi squared portion of the lnL from the previously computed cross-correlation", "to planet's rest frame and creates the Kp-Vsys map. \"\"\" fmap = np.empty((len(Kps),", "wv_interp(m_wave) # Convolve model with 1D Gaussian kernel, then filter FWHM_inst = {'CFHT':", "model spectrum by finding the minimum value in the given stepsize, then interpolating.", "fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum) model = hdu[0].data # Interpolate model to", "X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift - 2 * a * term3_shift", "mdn[:,np.newaxis] return sub def phasefold(Kps, vgrid, vsys, cmap, phase): \"\"\" Shifts the cross-correlation", "# Specify number of SYSREM iterations used on spectra for each MJD night", "enumerate(alpha): X2_KTV = serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift - 2 * a *", "spectra. Parameters are log VMR, day-night contrast, peak phase offset, scaled line contrast,", "Creates a wavelength array evenly spaced in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x)", "contrast): \"\"\" Computes the brightness variation for a given set of day-night contrast", "* (phi - theta)) A_p = 1. - contrast[:,np.newaxis,np.newaxis] * \\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] -", "the full likelihood map, not MCMC chains, this file is very computationally expensive", "spectra minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N +=", "print 'Location of highest likelihood:' print 'logVMR = %.1f' % (vmrs[fidx]) print 'C", "vgrid, vsys, merr, phase) _, term3_shift = phasefold(Kps, vgrid, vsys, cmap, phase) #", "term2_shift - 2 * a * term3_shift # Sum the log likelihood in", "= np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) * len(phase)", "tmp = c2*y/temp b = np.exp(tmp) - 1. bbsor = a/b return bbsor", "fill_value=0.,bounds_error=False) # Create variables/arrays for lnL components N = 0. cmap_osum = np.zeros((n_frames,", "5., 0.1) vgrid = np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150., 150., 0.5) Kps =", "= c2*y/temp b = np.exp(tmp) - 1. bbsor = a/b return bbsor def", "model_interp(shift_wave) mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] # Perform cross-correlation corr_top", "= np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps),", "low_spec = [], [] for i in range(len(spec)/px - 1): idx = np.nanargmin(spec[i*px:(i+1)*px])", "handle this or reduce the ranges and/or stepsizes for the parameter arrays. \"\"\"", "cos^2 (pi * (phi - theta)) A_p = 1. - contrast[:,np.newaxis,np.newaxis] * \\", "# Determine size of arrays n_orders = spec.shape[0] n_frames = spec.shape[1] n_pix =", "in spectra minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N", "in enumerate(vgrid): # Shift model to desired velocity and scale redshift = 1.", "file name extension\") args = parser.parse_args() nights = args.nights data_path = args.datapath model_path", "a server that can handle this or reduce the ranges and/or stepsizes for", "= np.zeros((len(Kps), len(cmap), len(vsys))) for i, Kp in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for", "highest likelihood values print 'Location of highest likelihood:' print 'logVMR = %.1f' %", "* fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/ corr_bot #", "Perform cross-correlation for orders redward of 600 nm, and sum together for i,o", "with consistent resolution m_wave = wavegrid(model[0,0], model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0,", "correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation map for a given spectral order, along with", "1. # (orders, frames, pixels) wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels) phase", "phase = phase[p_ind] spec = spec[:,p_ind,:] wave = wave[:,p_ind,:] # Determine size of", "desired velocity and scale redshift = 1. - vel / 3e5 shift_wave =", "= np.nanmax(lnL) maxes = np.where(lnL == maximum) fidx = maxes[0][0] cidx = maxes[1][0]", "import argparse from scipy.optimize import curve_fit from scipy.signal import butter, sosfiltfilt def planck(wavelength,temp):", "lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate lnL for given VMR for i", "to the given model. \"\"\" butterfilt = butter(order, freq, btype=filt_type, output='sos') x_filtered =", "= serr[np.newaxis,:,np.newaxis] + a**2 * term2_shift - 2 * a * term3_shift #", "= spec.shape[2] for v,vmr in enumerate(vmrs): # Get dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits'", "temperature over a given wavelength range. \"\"\" c1 = 1.1911e-12 c2 = 1.439", "np.sin(2.*np.pi*phase[frame]) vshift = vgrid - vp shift = interp1d(vshift, cmap[frame,:], bounds_error=False) shifted_map =", "from astropy.convolution import convolve, Gaussian1DKernel import argparse from scipy.optimize import curve_fit from scipy.signal", "6, '57321': 6} # Specify Butterworth filter cut-off frequency for each night bfreq", "filt_type='highpass'): \"\"\" Applies a high-pass Butterworth filter, with a given order and cut-off", "computes the full likelihood map, not MCMC chains, this file is very computationally", "[], [] for i in range(len(spec)/px - 1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px])", "to avoid stellar Fe signal p_ind = np.where((phase < 0.41) & (phase >", "the given stepsize, then interpolating. \"\"\" low_wave, low_spec = [], [] for i", "np.nanmax(wave[o,:,:]) minwidx, maxwidx = np.nanargmin(wave[o,0,:]), np.nanargmax(wave[o,0,:]) N += len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform", "= shifted_map KTVmap[i] = fullmap fmap[i,:] = np.nansum(fullmap, axis=0) return fmap, KTVmap def", "1.1911e-12 c2 = 1.439 y = 1e4/wavelength a = c1*(y**5.) tmp = c2*y/temp", "= phase[p_ind] spec = spec[:,p_ind,:] wave = wave[:,p_ind,:] # Determine size of arrays", "1e4/wavelength a = c1*(y**5.) tmp = c2*y/temp b = np.exp(tmp) - 1. bbsor", "phase) lnL[v,i,j] += -N/2. * np.log(X2 / N) # Find highest likelihood values", "cut-off frequency, to the given model. \"\"\" butterfilt = butter(order, freq, btype=filt_type, output='sos')", "pixels) wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels) phase = np.load(data_path+night+'_phase.npy') # (frames)", "lnL components N = 0. cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid)))", "contrasts and offsets variation = brightvar(phase, offset, contrast) # Apply brightness variation to", "i in range(len(spec)/px - 1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave,", "terms of the log likelihood equation: the spectra squared, and the base model", "'57321': 6} # Specify Butterworth filter cut-off frequency for each night bfreq =", "hdu[0].data # Interpolate model to wavelength grid with consistent resolution m_wave = wavegrid(model[0,0],", "= np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma, tsigma) sigma /=", "Interpolate model to wavelength grid with consistent resolution m_wave = wavegrid(model[0,0], model[0,-1], 3e5)", "serr, alpha, Kps, vgrid, vsys, phase): \"\"\" Calculates the chi squared portion of", "spec[:,p_ind,:] wave = wave[:,p_ind,:] # Determine size of arrays n_orders = spec.shape[0] n_frames", "#waveno=1e4/wavelength return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation map for a", "butter, sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates the Planck function for a given temperature", "# (orders, frames, pixels) phase = np.load(data_path+night+'_phase.npy') # (frames) # Only include phases", "= 1.439 y = 1e4/wavelength a = c1*(y**5.) tmp = c2*y/temp b =", "model with 1D Gaussian kernel, then filter FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8}", "Perform cross-correlation cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp)", "np.nanstd(spec[o], axis=1) sigma = np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) |", "def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp): \"\"\" Calculates the cross-correlation map for a given spectral order, along", "fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2)) cmap[frame,i] = corr_top #/ corr_bot # Calculate", "\\ np.cos(np.pi*(phase[np.newaxis,np.newaxis,:] - \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping of", "offset values over a given phase range. \"\"\" offset = offset_deg / 360.", "1, bfreq[night]) # Create interpolator to put model onto data's wavelength grid filt_interp", "A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\") parser.add_argument(\"-nights\", nargs=\"*\", help=\"MJD nights\",", "Convolve model with 1D Gaussian kernel, then filter FWHM_inst = {'CFHT': 4.48, 'Subaru':", "'Vsys = %.1f' % (Vsys[vidx]) # Write lnL to fits file hdu2 =", "freq, filt_type='highpass'): \"\"\" Applies a high-pass Butterworth filter, with a given order and", "1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv, 1,", "sub def phasefold(Kps, vgrid, vsys, cmap, phase): \"\"\" Shifts the cross-correlation map to", "of 600 nm, and sum together for i,o in enumerate(np.arange(24,37)): # Calculate time-", "planetary emission spectra. Parameters are log VMR, day-night contrast, peak phase offset, scaled", "1.439 y = 1e4/wavelength a = c1*(y**5.) tmp = c2*y/temp b = np.exp(tmp)", "velocity axis from the cross-correlation map. \"\"\" mdn = np.nanmedian(cmap,axis=1) sub = cmap", "mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv, 250) mspec_bf = butterworth(mspec_conv, 1, bfreq[night])", "to put model onto data's wavelength grid filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False)", "Compute brightness variation for given contrasts and offsets variation = brightvar(phase, offset, contrast)", "for the parameter arrays. \"\"\" from astropy.io import fits import numpy as np", "# Perform cross-correlation corr_top = np.nansum(mspec_weighted * fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) * np.nansum(fixspec**2))", "likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return cmap, lnL_term1, lnL_term2 def submed(cmap): \"\"\" Subtracts the", "* term3_shift # Sum the log likelihood in time X2[i] = np.nansum(X2_KTV, axis=1)", "enumerate(vmrs): # Get dayside model hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum)", "a given spectral order, along with the other two terms of the log", "\"\"\" Subtracts the lower envelope from a model spectrum by finding the minimum", "Fe signal p_ind = np.where((phase < 0.41) & (phase > -0.41))[0] phase =", "+= cmap merr_osum += merr serr_osum += serr # Compute brightness variation for", "len(alpha), len(Kps), len(Vsys))) # Specify number of SYSREM iterations used on spectra for", "velocity. NOTE: Because this computes the full likelihood map, not MCMC chains, this", "for i, Kp in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for frame in range(len(phase)): #", "given spectral order, along with the other two terms of the log likelihood", "vsys, phase): \"\"\" Calculates the chi squared portion of the lnL from the", "np.nansum(fullmap, axis=0) return fmap, KTVmap def chi2(cmap, merr, serr, alpha, Kps, vgrid, vsys,", "m_wave = wavegrid(model[0,0], model[0,-1], 3e5) wv_interp = interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec =", "- vel / 3e5 shift_wave = wave[0,w_idx] * redshift mspec_shifted = model_interp(shift_wave) mspec_weighted", "spectrum) model = hdu[0].data # Interpolate model to wavelength grid with consistent resolution", "= submed(cmap0) cmap_osum += cmap merr_osum += merr serr_osum += serr # Compute", "= sosfiltfilt(butterfilt, x) return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength array evenly", "mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] # Perform cross-correlation corr_top =", "shift_wave = wave[0,w_idx] * redshift mspec_shifted = model_interp(shift_wave) mspec_weighted = mspec_shifted - np.nanmean(mspec_shifted)", "wavelength range and scale data w_idx = (wave[0,:] >= minwave) & (wave[0,:] <=", "6, '56966': 6, '57321': 6} # Specify Butterworth filter cut-off frequency for each", "c1 = 1.1911e-12 c2 = 1.439 y = 1e4/wavelength a = c1*(y**5.) tmp", "- 2 * a * term3_shift # Sum the log likelihood in time", "= spec.shape[1] n_pix = spec.shape[2] for v,vmr in enumerate(vmrs): # Get dayside model", "with the other two terms of the log likelihood equation: the spectra squared,", "variables/arrays for lnL components N = 0. cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum =", "np.zeros((len(alpha), len(Kps), len(vsys))) # (alpha, Kps, Vsys) # Shift merr and cmap to", "np.empty((len(spec),len(vgrid))) # Isolate wavelength range and scale data w_idx = (wave[0,:] >= minwave)", "np.arange(0.5, 5., 0.1) vgrid = np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150., 150., 0.5) Kps", "model. \"\"\" butterfilt = butter(order, freq, btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt, x) return", "vsys, merr, phase) _, term3_shift = phasefold(Kps, vgrid, vsys, cmap, phase) # Calculate", "\"\"\" Creates a wavelength array evenly spaced in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx)", "order and cut-off frequency, to the given model. \"\"\" butterfilt = butter(order, freq,", "the 6-D log likelihood map for a series of atmospheric models cross-correlated with", "def wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength array evenly spaced in resolution. \"\"\" c=299792458.", "N = 0. cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum = np.zeros((n_frames, len(vgrid))) serr_osum =", "arrays vmrs = np.arange(-5., -2.1, 0.1) alpha = np.arange(0.5, 5., 0.1) vgrid =", "and peak phase offset values over a given phase range. \"\"\" offset =", "model to wavelength grid with consistent resolution m_wave = wavegrid(model[0,0], model[0,-1], 3e5) wv_interp", "= np.nanmedian(cmap,axis=1) sub = cmap - mdn[:,np.newaxis] return sub def phasefold(Kps, vgrid, vsys,", "-0.41))[0] phase = phase[p_ind] spec = spec[:,p_ind,:] wave = wave[:,p_ind,:] # Determine size", "N += len(wave[o,0,minwidx:maxwidx]) * len(phase) # Perform cross-correlation cmap0, serr, merr = correlate(wave[o,:,:],", "iters = {'56550': 5, '56561': 4, '56904': 4, '56915': 6, '56966': 6, '57321':", "and systemic velocity. NOTE: Because this computes the full likelihood map, not MCMC", "corrected = spec - envelope return corrected def butterworth(x, order, freq, filt_type='highpass'): \"\"\"", "+ a**2 * term2_shift - 2 * a * term3_shift # Sum the", "contrast, orbital velocity, and systemic velocity. NOTE: Because this computes the full likelihood", "envelope from a model spectrum by finding the minimum value in the given", "with planetary emission spectra. Parameters are log VMR, day-night contrast, peak phase offset,", "= np.empty((len(cmap),len(vsys))) for frame in range(len(phase)): # Shift to planet's orbital velocity vp", "# Find highest likelihood values maximum = np.nanmax(lnL) maxes = np.where(lnL == maximum)", "x) return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\" Creates a wavelength array evenly spaced in", "and creates the Kp-Vsys map. \"\"\" fmap = np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps),", "file is very computationally expensive to run when the full parameter grid is", "offset = np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha),", "bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i] = fullmap fmap[i,:] = np.nansum(fullmap,", "of SYSREM iterations used on spectra for each MJD night iters = {'56550':", "bbsor = a/b return bbsor def remove_env(wave, spec, px): \"\"\" Subtracts the lower", "+= merr serr_osum += serr # Compute brightness variation for given contrasts and", "cmap[frame,:], bounds_error=False) shifted_map = shift(vsys) fullmap[frame,:] = shifted_map KTVmap[i] = fullmap fmap[i,:] =", "multiple Gigabytes. Either run the file on a server that can handle this", "time- and wavelength-dependent uncertainties tsigma = np.nanstd(spec[o], axis=0) wsigma = np.nanstd(spec[o], axis=1) sigma", "nights: # Read in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames,", "- np.nanmean(mspec_shifted) mspec_weighted /= stdev[frame,w_idx] # Perform cross-correlation corr_top = np.nansum(mspec_weighted * fixspec)", "% (vmr)) # (wavelength, spectrum) model = hdu[0].data # Interpolate model to wavelength", "Kp-Vsys map. \"\"\" fmap = np.empty((len(Kps), len(vsys))) KTVmap = np.zeros((len(Kps), len(cmap), len(vsys))) for", "= spec.shape[0] n_frames = spec.shape[1] n_pix = spec.shape[2] for v,vmr in enumerate(vmrs): #", "cross-correlation map to planet's rest frame and creates the Kp-Vsys map. \"\"\" fmap", "\"\"\" offset = offset_deg / 360. # Equation: Ap = 1 - C", "hdu = fits.open(model_path+'model_wasp33b_FeI_logvmr%.1f.fits' % (vmr)) # (wavelength, spectrum) model = hdu[0].data # Interpolate", "sub = cmap - mdn[:,np.newaxis] return sub def phasefold(Kps, vgrid, vsys, cmap, phase):", "contrast and peak phase offset values over a given phase range. \"\"\" offset", "(alpha, Kps, Vsys) # Shift merr and cmap to the planet's velocity, so", "250) mspec_bf = butterworth(mspec_conv, 1, bfreq[night]) # Create interpolator to put model onto", "stdev[frame,w_idx] # Perform cross-correlation corr_top = np.nansum(mspec_weighted * fixspec) #corr_bot = np.sqrt(np.nansum(mspec_weighted**2) *", "= args.datapath model_path = args.modelpath out_path = args.outpath ext = args.extension # Define", "Vsys = np.arange(-150., 150., 0.5) Kps = np.arange(175.,275., 0.5) offset = np.arange(-30.,60., 1.)", "the planet's velocity, so their axes are (Kp, time, Vsys) _, term2_shift =", "\"\"\" c1 = 1.1911e-12 c2 = 1.439 y = 1e4/wavelength a = c1*(y**5.)", "fixspec = spec[frame,w_idx] - np.nanmean(spec[frame,w_idx]) fixspec /= stdev[frame,w_idx] # Calculate data term for", "Vsys, phase) lnL[v,i,j] += -N/2. * np.log(X2 / N) # Find highest likelihood", "1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec, fill_value='extrapolate') envelope =", "serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate", "above 0.59, to avoid stellar Fe signal p_ind = np.where((phase < 0.41) &", "peak phase offset, scaled line contrast, orbital velocity, and systemic velocity. NOTE: Because", "alpha = np.arange(0.5, 5., 0.1) vgrid = np.arange(-600.,601.5, 1.5) Vsys = np.arange(-150., 150.,", "fill_value='extrapolate') envelope = interp(wave) corrected = spec - envelope return corrected def butterworth(x,", "cmap - mdn[:,np.newaxis] return sub def phasefold(Kps, vgrid, vsys, cmap, phase): \"\"\" Shifts", "the log likelihood in time X2[i] = np.nansum(X2_KTV, axis=1) return X2 def brightvar(phase,", "Determine size of arrays n_orders = spec.shape[0] n_frames = spec.shape[1] n_pix = spec.shape[2]", "= np.zeros((n_frames, len(vgrid))) serr_osum = np.zeros((n_frames)) # Perform cross-correlation for orders redward of", "of atmospheric models cross-correlated with planetary emission spectra. Parameters are log VMR, day-night", "import interp1d from astropy.convolution import convolve, Gaussian1DKernel import argparse from scipy.optimize import curve_fit", "'--modelpath', default=\"./\", help=\"path to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\", '--extension',", "# (frames) # Only include phases below 0.41 and above 0.59, to avoid", "is used, and the output can be multiple Gigabytes. Either run the file", "from a model spectrum by finding the minimum value in the given stepsize,", "this file is very computationally expensive to run when the full parameter grid", "lnL terms lnL_term1 = serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:]", "= np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate wavelength range and scale data w_idx", "np.arange(-30.,60., 1.) contrast = np.arange(0.,1.1, 0.1) lnL = np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys)))", "night in nights: # Read in data spec = np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. #", "= np.zeros((len(vmrs),len(contrast), len(offset), len(alpha), len(Kps), len(Vsys))) # Specify number of SYSREM iterations used", "= np.outer(wsigma, tsigma) sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) | np.isnan(sigma))] = 1e20", "terms lnL_term1 = serr_osum lnL_term2 = merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] *", "np.empty((len(spec),len(vgrid))) lnL_term1 = np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate wavelength range and scale", "cmap0, serr, merr = correlate(wave[o,:,:], spec[o,:,:], sigma, vgrid, minwave, maxwave, filt_interp) cmap =", "butter(order, freq, btype=filt_type, output='sos') x_filtered = sosfiltfilt(butterfilt, x) return x_filtered def wavegrid(wavemin,wavemax,res): \"\"\"", "help=\"MJD nights\", type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path", "put model onto data's wavelength grid filt_interp = interp1d(m_wave, mspec_bf, kind='linear', fill_value=0.,bounds_error=False) #", "astropy.convolution import convolve, Gaussian1DKernel import argparse from scipy.optimize import curve_fit from scipy.signal import", "orders redward of 600 nm, and sum together for i,o in enumerate(np.arange(24,37)): #", "FWHM_inst = {'CFHT': 4.48, 'Subaru': 1.8} mspec_conv = convolve(m_spec, Gaussian1DKernel(stddev=FWHM_inst['CFHT']/2.35)) #mspec_day = remove_env(m_wave,mspec_conv,", "= interp1d(model[0],model[1], kind='linear', fill_value=0, bounds_error=False) m_spec = wv_interp(m_wave) # Convolve model with 1D", "help=\"path to models\") parser.add_argument(\"-o\", '--outpath', default=\"./\", help=\"path for output\") parser.add_argument(\"-ext\", '--extension', default=\".fits\", help=\"output", "= interp(wave) corrected = spec - envelope return corrected def butterworth(x, order, freq,", "Ap = 1 - C * cos^2 (pi * (phi - theta)) A_p", "enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys))) for frame in range(len(phase)): # Shift to planet's orbital", "evenly spaced in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno", "nights = args.nights data_path = args.datapath model_path = args.modelpath out_path = args.outpath ext", "np.empty((len(cmap),len(vsys))) for frame in range(len(phase)): # Shift to planet's orbital velocity vp =", "sigma /= np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) | np.isnan(sigma))] = 1e20 # Calculate number", "= np.nansum(fixspec**2) for i, vel in enumerate(vgrid): # Shift model to desired velocity", "# Calculate number of data points in spectra minwave, maxwave = np.nanmin(wave[o,:,:]), np.nanmax(wave[o,:,:])", "planck(wavelength,temp): \"\"\" Calculates the Planck function for a given temperature over a given", "to wavelength grid with consistent resolution m_wave = wavegrid(model[0,0], model[0,-1], 3e5) wv_interp =", "in range(len(spec)/px - 1): idx = np.nanargmin(spec[i*px:(i+1)*px]) low_spec.append(spec[idx+i*px]) low_wave.append(wave[idx+i*px]) interp = interp1d(low_wave, low_spec,", "- \\ offset[np.newaxis,:,np.newaxis]))**2 return A_p ############################################################################### parser = argparse.ArgumentParser(description=\"Likelihood Mapping of High-resolution Spectra\")", "(contrast[cidx]) print 'off = %.1f' % (offset[oidx]) print 'a = %.1f' % (alpha[aidx])", "= merr_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate lnL for", "cmap_osum += cmap merr_osum += merr serr_osum += serr # Compute brightness variation", "maxes[0][0] cidx = maxes[1][0] oidx = maxes[2][0] aidx = maxes[3][0] kidx = maxes[4][0]", "Create variables/arrays for lnL components N = 0. cmap_osum = np.zeros((n_frames, len(vgrid))) merr_osum", "'--datapath', default=\"./\", help=\"path to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to models\") parser.add_argument(\"-o\", '--outpath',", "= maxes[2][0] aidx = maxes[3][0] kidx = maxes[4][0] vidx = maxes[5][0] # Print", "interp = interp1d(low_wave, low_spec, fill_value='extrapolate') envelope = interp(wave) corrected = spec - envelope", "KTVmap def chi2(cmap, merr, serr, alpha, Kps, vgrid, vsys, phase): \"\"\" Calculates the", "np.nanstd(spec[o,:,:]) sigma[((sigma < 0.0005) | np.isnan(sigma))] = 1e20 # Calculate number of data", "(Vsys[vidx]) # Write lnL to fits file hdu2 = fits.PrimaryHDU(lnL) hdu2.writeto(out_path+'lnL_wasp33b_FeI%s' % (ext),", "'--extension', default=\".fits\", help=\"output file name extension\") args = parser.parse_args() nights = args.nights data_path", "#/ corr_bot # Calculate model term for log likelihood lnL_term2[frame,i] = np.nansum(mspec_weighted**2) return", "in range(len(offset)): X2 = chi2(lnL_term3[i,j], lnL_term2[i,j], lnL_term1, alpha, Kps, vgrid, Vsys, phase) lnL[v,i,j]", "(Kps[kidx]) print 'Vsys = %.1f' % (Vsys[vidx]) # Write lnL to fits file", "velocity, and systemic velocity. NOTE: Because this computes the full likelihood map, not", "and the output can be multiple Gigabytes. Either run the file on a", "np.empty(len(spec)) lnL_term2 = np.empty((len(spec),len(vgrid))) # Isolate wavelength range and scale data w_idx =", "velocity, so their axes are (Kp, time, Vsys) _, term2_shift = phasefold(Kps, vgrid,", "not MCMC chains, this file is very computationally expensive to run when the", "= %.1f' % (Kps[kidx]) print 'Vsys = %.1f' % (Vsys[vidx]) # Write lnL", "_, term2_shift = phasefold(Kps, vgrid, vsys, merr, phase) _, term3_shift = phasefold(Kps, vgrid,", "cross-correlation map. \"\"\" mdn = np.nanmedian(cmap,axis=1) sub = cmap - mdn[:,np.newaxis] return sub", "Calculate lnL for given VMR for i in range(len(contrast)): for j in range(len(offset)):", "import fits import numpy as np from scipy.interpolate import interp1d from astropy.convolution import", "np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels) phase = np.load(data_path+night+'_phase.npy') # (frames) # Only include", "in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return wavelength #,waveno def correlate(wave,spec,stdev,vgrid,minwave,maxwave,model_interp):", "'56904': 4, '56915': 6, '56966': 6, '57321': 6} # Specify Butterworth filter cut-off", "cross-correlation map and other base terms, for a given set of scaled line", "filter, with a given order and cut-off frequency, to the given model. \"\"\"", "KTVmap = np.zeros((len(Kps), len(cmap), len(vsys))) for i, Kp in enumerate(Kps): fullmap = np.empty((len(cmap),len(vsys)))", "spectral order, along with the other two terms of the log likelihood equation:", "of day-night contrast and peak phase offset values over a given phase range.", "number of SYSREM iterations used on spectra for each MJD night iters =", "# Shift merr and cmap to the planet's velocity, so their axes are", "print 'logVMR = %.1f' % (vmrs[fidx]) print 'C = %.1f' % (contrast[cidx]) print", "mspec_weighted /= stdev[frame,w_idx] # Perform cross-correlation corr_top = np.nansum(mspec_weighted * fixspec) #corr_bot =", "maxes[3][0] kidx = maxes[4][0] vidx = maxes[5][0] # Print highest likelihood values print", "Print highest likelihood values print 'Location of highest likelihood:' print 'logVMR = %.1f'", "or reduce the ranges and/or stepsizes for the parameter arrays. \"\"\" from astropy.io", "wavelength array evenly spaced in resolution. \"\"\" c=299792458. dx=np.log(1.+1./res) x=np.arange(np.log(wavemin),np.log(wavemax),dx) wavelength=np.exp(x) #waveno=1e4/wavelength return", "= 1 - C * cos^2 (pi * (phi - theta)) A_p =", "import curve_fit from scipy.signal import butter, sosfiltfilt def planck(wavelength,temp): \"\"\" Calculates the Planck", "models cross-correlated with planetary emission spectra. Parameters are log VMR, day-night contrast, peak", "np.load(data_path+night+'_spectra.npy')[iters[night]-1] - 1. # (orders, frames, pixels) wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames,", "+= -N/2. * np.log(X2 / N) # Find highest likelihood values maximum =", "= {'56550': 5, '56561': 4, '56904': 4, '56915': 6, '56966': 6, '57321': 6}", "spec, px): \"\"\" Subtracts the lower envelope from a model spectrum by finding", "together for i,o in enumerate(np.arange(24,37)): # Calculate time- and wavelength-dependent uncertainties tsigma =", "server that can handle this or reduce the ranges and/or stepsizes for the", "type=str) parser.add_argument(\"-d\", '--datapath', default=\"./\", help=\"path to data\") parser.add_argument(\"-m\", '--modelpath', default=\"./\", help=\"path to models\")", "np.zeros((n_frames)) # Perform cross-correlation for orders redward of 600 nm, and sum together", "range(len(phase)): # Shift to planet's orbital velocity vp = Kp * np.sin(2.*np.pi*phase[frame]) vshift", "cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate lnL for given VMR for i in range(len(contrast)):", "(vmrs[fidx]) print 'C = %.1f' % (contrast[cidx]) print 'off = %.1f' % (offset[oidx])", "print 'a = %.1f' % (alpha[aidx]) print 'Kp = %.1f' % (Kps[kidx]) print", "* variation[:,:,:,np.newaxis]**2 lnL_term3 = cmap_osum[np.newaxis,np.newaxis,:,:] * variation[:,:,:,np.newaxis] # Calculate lnL for given VMR", "3e5 shift_wave = wave[0,w_idx] * redshift mspec_shifted = model_interp(shift_wave) mspec_weighted = mspec_shifted -", "% (offset[oidx]) print 'a = %.1f' % (alpha[aidx]) print 'Kp = %.1f' %", "np from scipy.interpolate import interp1d from astropy.convolution import convolve, Gaussian1DKernel import argparse from", "sum together for i,o in enumerate(np.arange(24,37)): # Calculate time- and wavelength-dependent uncertainties tsigma", "wave = np.load(data_path+night+'_wavelength.npy') # (orders, frames, pixels) phase = np.load(data_path+night+'_phase.npy') # (frames) #", "(Kp, time, Vsys) _, term2_shift = phasefold(Kps, vgrid, vsys, merr, phase) _, term3_shift", "* cos^2 (pi * (phi - theta)) A_p = 1. - contrast[:,np.newaxis,np.newaxis] *", "np.load(data_path+night+'_phase.npy') # (frames) # Only include phases below 0.41 and above 0.59, to", "for frame in range(len(phase)): # Shift to planet's orbital velocity vp = Kp", "a * term3_shift # Sum the log likelihood in time X2[i] = np.nansum(X2_KTV,", "return sub def phasefold(Kps, vgrid, vsys, cmap, phase): \"\"\" Shifts the cross-correlation map", "maxes[1][0] oidx = maxes[2][0] aidx = maxes[3][0] kidx = maxes[4][0] vidx = maxes[5][0]", "spectra for each MJD night iters = {'56550': 5, '56561': 4, '56904': 4," ]
[ "'.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to publish')", "class CrateObj(): def __init__(self, crate_dir): self.crate_dir = crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists", "# such as debian, etc, so the above line will work fine #", "publish') if __name__ == \"__main__\" : # Rename these variables to something meaningful", "this_crate.metadata_path) log.info('ROCrate ready to publish') if __name__ == \"__main__\" : # Rename these", "log.warning('Problem while creating symlink:') log.warning(err) return def create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to", "crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding Header/Footer template to preview file...') # #TODO:", "if os.path.exists(self.preview_path): log.debug('ROCrate preview file exists: {0}'.format(self.preview_path)) self.preview_exists = True else: log.warning('ROCrate preview", "= False self.crate_valid = None self.metadata_valid = None self.preview_valid = None def check_rocrate_valid(self):", "= sys.argv[1] loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s -", "= True return def check_metadata(self): log.debug('Checking if metadata is valid...') #TODO:some test self.metadata_valid", "Write updated page to html file # with open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\"))", "these variables to something meaningful crate_path = sys.argv[1] loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout,", "#Create index.html page # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ## Create symlink", "Footer # footer_path = './footer.html' # with open(footer_path, 'r') as footer_file: # foot_soup", "False self.crate_valid = None self.metadata_valid = None self.preview_valid = None def check_rocrate_valid(self): #", "this_crate.preview_path) # else: # #Create index.html page # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if", "index.html page # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ## Create symlink between", "python3 # NOTE: If you are using an alpine docker image # such", "open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): # steps to follow to", "higher as your base docker image. # Steps in this action: # -", "If you are using an alpine docker image # such as pyaction-lite, the", "exists: {0}'.format(self.preview_path)) self.preview_exists = True else: log.warning('ROCrate preview file DOES NOT exist: {0}'.format(self.preview_path))", "= None self.preview_valid = None def check_rocrate_valid(self): # Checks if there are rocrate", "False self.check_metadata() self.check_preview() if self.metadata_valid and self.preview_valid: log.info('Crate passes validity checks.') self.crate_valid =", "metadata file DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid = False exit(1)", "format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=getattr(logging, loglevel)) log.setLevel(getattr(logging, loglevel)) # The", "stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=getattr(logging, loglevel)) log.setLevel(getattr(logging, loglevel)) #", "file DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid = False exit(1) if", "os.symlink(src, dst) except Exception as err: log.warning('Problem while creating symlink:') log.warning(err) return def", "create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ## Create symlink between the .json >>", "= CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists: # create_preview_html(this_crate) #", ">> .jsonld file extensions depending on which exists if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld',", "Steps in this action: # - check if correct files exist. # -", "# work. The above line works fine on other linux distributions # such", "template to preview file...') # #TODO: Find a better way of getting the", "'ro-crate-preview.html')) self.preview_exists = False self.crate_valid = None self.metadata_valid = None self.preview_valid = None", "page # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ## Create symlink between the", "self.crate_valid = None self.metadata_valid = None self.preview_valid = None def check_rocrate_valid(self): # Checks", "Header/footer functionality moved to jekyll # with open(crate_obj.preview_path, 'r') as preview_file: # soup", "variables to something meaningful crate_path = sys.argv[1] loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s", "create_symlink('index.html', this_crate.preview_path) # else: # #Create index.html page # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path)", "# #Add Header # header_path = './header.html' # with open(header_path) as header_file: #", "to jekyll # with open(crate_obj.preview_path, 'r') as preview_file: # soup = BeautifulSoup(preview_file, 'html.parser')", "self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists =", "err: log.warning('Problem while creating symlink:') log.warning(err) return def create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js", "self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True else: log.error('ROCrate metadata", "self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file exists:", "The above line works fine on other linux distributions # such as debian,", "log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True else: log.error('ROCrate metadata file", "self.crate_dir = crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path = Path(os.path.join(self.crate_dir,", "symlink for index.html and crate.json # - check validation? import sys import os", "CrateObj(): def __init__(self, crate_dir): self.crate_dir = crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists =", "== \"__main__\" : # Rename these variables to something meaningful crate_path = sys.argv[1]", "else: # #Create index.html page # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ##", "False self.crate_valid = False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview file exists: {0}'.format(self.preview_path)) self.preview_exists", "if metadata is valid...') #TODO:some test self.metadata_valid = True return def check_preview(self): log.debug('Checking", "log.debug('Checking if preview is valid...') #TODO:some test self.preview_valid = True return def create_symlink(dst,", "crate.json # - check validation? import sys import os from pathlib import Path", "# if this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # else: # #Create index.html", "__name__ == \"__main__\" : # Rename these variables to something meaningful crate_path =", "getting the header/footer templates. # NOTE: Header/footer functionality moved to jekyll # with", "pyaction-lite, the -S option above won't # work. The above line works fine", "log.info('Crate passes validity checks.') self.crate_valid = True return def check_metadata(self): log.debug('Checking if metadata", "on other linux distributions # such as debian, etc, so the above line", ".json >> .jsonld file extensions depending on which exists if os.path.splitext(this_crate.metadata_path) == '.json':", "log.debug('Adding Header/Footer template to preview file...') # #TODO: Find a better way of", "a preview.html from a rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview", "will work fine # if you use pyaction:4.0.0 or higher as your base", "subprocess import logging from bs4 import BeautifulSoup log = logging.getLogger('entrypoint') class CrateObj(): def", "sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=getattr(logging, loglevel)) log.setLevel(getattr(logging,", "this_crate.metadata_exists: ## Create symlink between the .json >> .jsonld file extensions depending on", "return def create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html from a", "if self.metadata_valid and self.preview_valid: log.info('Crate passes validity checks.') self.crate_valid = True return def", "= True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path))", "return def check_preview(self): log.debug('Checking if preview is valid...') #TODO:some test self.preview_valid = True", "soup = BeautifulSoup(preview_file, 'html.parser') # #Add Header # header_path = './header.html' # with", "None def check_rocrate_valid(self): # Checks if there are rocrate objects in directory log.debug('Checking", "foot_soup = BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) # # Write updated page to html", "preview file DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata() self.check_preview() if self.metadata_valid", "{0}'.format(self.metadata_path)) self.metadata_exists = True else: log.error('ROCrate metadata file DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists", "crate_dir): self.crate_dir = crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path =", "if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate", "above line works fine on other linux distributions # such as debian, etc,", "= Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False", "files exist. # - create symlink for index.html and crate.json # - check", "self.metadata_exists = True else: log.error('ROCrate metadata file DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists =", "def publish_rocrate(crate_dir): # steps to follow to create the correct files to publish", "docker image # such as pyaction-lite, the -S option above won't # work.", "such as pyaction-lite, the -S option above won't # work. The above line", "import BeautifulSoup log = logging.getLogger('entrypoint') class CrateObj(): def __init__(self, crate_dir): self.crate_dir = crate_dir", "'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False self.crate_valid =", "os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True", "jekyll # with open(crate_obj.preview_path, 'r') as preview_file: # soup = BeautifulSoup(preview_file, 'html.parser') #", "self.crate_valid = False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview file exists: {0}'.format(self.preview_path)) self.preview_exists =", "#Add Header # header_path = './header.html' # with open(header_path) as header_file: # head_soup", "except Exception as err: log.warning('Problem while creating symlink:') log.warning(err) return def create_preview_html(crate_obj): '''", "while creating symlink:') log.warning(err) return def create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to create", "so the above line will work fine # if you use pyaction:4.0.0 or", "# - create symlink for index.html and crate.json # - check validation? import", "HTML preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding", "the .json >> .jsonld file extensions depending on which exists if os.path.splitext(this_crate.metadata_path) ==", "= './header.html' # with open(header_path) as header_file: # head_soup = BeautifulSoup(header_file, 'html.parser') #", "option above won't # work. The above line works fine on other linux", "- check validation? import sys import os from pathlib import Path import subprocess", "import Path import subprocess import logging from bs4 import BeautifulSoup log = logging.getLogger('entrypoint')", "crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists", "rocrate objects in directory log.debug('Checking that rocrate files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata", "NOTE: Header/footer functionality moved to jekyll # with open(crate_obj.preview_path, 'r') as preview_file: #", "to preview file...') # #TODO: Find a better way of getting the header/footer", "os.path.exists(self.preview_path): log.debug('ROCrate preview file exists: {0}'.format(self.preview_path)) self.preview_exists = True else: log.warning('ROCrate preview file", "file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path", "# Steps in this action: # - check if correct files exist. #", "def check_rocrate_valid(self): # Checks if there are rocrate objects in directory log.debug('Checking that", "correct files to publish to GH-Pages log.info('Preparing to publish ROCrate.') this_crate = CrateObj(crate_dir)", "self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True else:", "file extensions depending on which exists if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif", "an alpine docker image # such as pyaction-lite, the -S option above won't", "dst) except Exception as err: log.warning('Problem while creating symlink:') log.warning(err) return def create_preview_html(crate_obj):", ".jsonld file extensions depending on which exists if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path)", "self.metadata_valid = True return def check_preview(self): log.debug('Checking if preview is valid...') #TODO:some test", "create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html from a rocrate json", "elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists =", "uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html from a rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json", "soup.html.body.append(foot_soup) # # Write updated page to html file # with open('./test_out.html','wb') as", "#TODO: Find a better way of getting the header/footer templates. # NOTE: Header/footer", "preview file exists: {0}'.format(self.preview_path)) self.preview_exists = True else: log.warning('ROCrate preview file DOES NOT", "# if you use pyaction:4.0.0 or higher as your base docker image. #", "bs4 import BeautifulSoup log = logging.getLogger('entrypoint') class CrateObj(): def __init__(self, crate_dir): self.crate_dir =", "in tmp directory log.debug(f'Creating symlink between {src} and {dst}') try: os.symlink(src, dst) except", "== '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to publish') if __name__ == \"__main__\" :", "log.debug('ROCrate preview file exists: {0}'.format(self.preview_path)) self.preview_exists = True else: log.warning('ROCrate preview file DOES", "image # such as pyaction-lite, the -S option above won't # work. The", "= None self.metadata_valid = None self.preview_valid = None def check_rocrate_valid(self): # Checks if", "This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html from a rocrate json file. rochtml", "self.check_preview() if self.metadata_valid and self.preview_valid: log.info('Crate passes validity checks.') self.crate_valid = True return", "check_rocrate_valid(self): # Checks if there are rocrate objects in directory log.debug('Checking that rocrate", "directory log.debug('Checking that rocrate files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file exists:", "which exists if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json',", "#Add Footer # footer_path = './footer.html' # with open(footer_path, 'r') as footer_file: #", "something meaningful crate_path = sys.argv[1] loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s", "as debian, etc, so the above line will work fine # if you", "# create_symlink('index.html', this_crate.preview_path) # else: # #Create index.html page # create_preview_html(this_crate) # create_symlink('index.html',", "# with open(header_path) as header_file: # head_soup = BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) #", "create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to publish') if __name__ == \"__main__\" : # Rename", "publish ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists:", "to create a preview.html from a rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating", "os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path", "soup.html.body.insert_before(head_soup) # #Add Footer # footer_path = './footer.html' # with open(footer_path, 'r') as", "steps to follow to create the correct files to publish to GH-Pages log.info('Preparing", "and self.preview_valid: log.info('Crate passes validity checks.') self.crate_valid = True return def check_metadata(self): log.debug('Checking", "updated page to html file # with open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\")) return", "or higher as your base docker image. # Steps in this action: #", "# with open(footer_path, 'r') as footer_file: # foot_soup = BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup)", "os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready", "rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml", "with open(footer_path, 'r') as footer_file: # foot_soup = BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) #", "publish to GH-Pages log.info('Preparing to publish ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) #", "None self.preview_valid = None def check_rocrate_valid(self): # Checks if there are rocrate objects", "your base docker image. # Steps in this action: # - check if", "= crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding Header/Footer template to preview file...') #", "BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) # # Write updated page to html file #", "# else: # #Create index.html page # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists:", "if correct files exist. # - create symlink for index.html and crate.json #", "= self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True else: log.error('ROCrate", "moved to jekyll # with open(crate_obj.preview_path, 'r') as preview_file: # soup = BeautifulSoup(preview_file,", "between the .json >> .jsonld file extensions depending on which exists if os.path.splitext(this_crate.metadata_path)", "False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False self.crate_valid = None self.metadata_valid =", "rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file", "other linux distributions # such as debian, etc, so the above line will", "line will work fine # if you use pyaction:4.0.0 or higher as your", "as outfile: # outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): # steps to follow to create", "if there are rocrate objects in directory log.debug('Checking that rocrate files exist...') if", "use pyaction:4.0.0 or higher as your base docker image. # Steps in this", "as pyaction-lite, the -S option above won't # work. The above line works", "create the correct files to publish to GH-Pages log.info('Preparing to publish ROCrate.') this_crate", "this_crate.preview_path) if this_crate.metadata_exists: ## Create symlink between the .json >> .jsonld file extensions", "self.metadata_exists = False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False self.crate_valid = None", "crate_path = sys.argv[1] loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s", "exist. # - create symlink for index.html and crate.json # - check validation?", "= True else: log.error('ROCrate metadata file DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists = False", "#TODO:some test self.metadata_valid = True return def check_preview(self): log.debug('Checking if preview is valid...')", "is valid...') #TODO:some test self.metadata_valid = True return def check_preview(self): log.debug('Checking if preview", "file...') # #TODO: Find a better way of getting the header/footer templates. #", "if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')):", "= Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False self.crate_valid = None self.metadata_valid = None self.preview_valid", "exist: {0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid = False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview", "== '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to", "metadata jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True else: log.error('ROCrate metadata file DOES", "self.preview_exists = True else: log.warning('ROCrate preview file DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists =", "%(levelname)s - %(name)s - %(message)s', level=getattr(logging, loglevel)) log.setLevel(getattr(logging, loglevel)) # The work: publish_rocrate(crate_path)", "pyaction:4.0.0 or higher as your base docker image. # Steps in this action:", "exist: {0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata() self.check_preview() if self.metadata_valid and self.preview_valid: log.info('Crate passes", "are rocrate objects in directory log.debug('Checking that rocrate files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate", "# soup = BeautifulSoup(preview_file, 'html.parser') # #Add Header # header_path = './header.html' #", "check_preview(self): log.debug('Checking if preview is valid...') #TODO:some test self.preview_valid = True return def", "\"__main__\" : # Rename these variables to something meaningful crate_path = sys.argv[1] loglevel", "log = logging.getLogger('entrypoint') class CrateObj(): def __init__(self, crate_dir): self.crate_dir = crate_dir self.metadata_path =", "# - check if correct files exist. # - create symlink for index.html", "# soup.html.body.append(foot_soup) # # Write updated page to html file # with open('./test_out.html','wb')", "# steps to follow to create the correct files to publish to GH-Pages", "are using an alpine docker image # such as pyaction-lite, the -S option", "src): # This creates a symbolic link on python in tmp directory log.debug(f'Creating", "import sys import os from pathlib import Path import subprocess import logging from", "= True return def create_symlink(dst, src): # This creates a symbolic link on", "to publish') if __name__ == \"__main__\" : # Rename these variables to something", "preview.html from a rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview file", "= None def check_rocrate_valid(self): # Checks if there are rocrate objects in directory", "''' log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True)", "is valid...') #TODO:some test self.preview_valid = True return def create_symlink(dst, src): # This", "symlink between the .json >> .jsonld file extensions depending on which exists if", "os from pathlib import Path import subprocess import logging from bs4 import BeautifulSoup", "# soup.html.body.insert_before(head_soup) # #Add Footer # footer_path = './footer.html' # with open(footer_path, 'r')", "head_soup = BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) # #Add Footer # footer_path = './footer.html'", "create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to publish') if", "footer_file: # foot_soup = BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) # # Write updated page", "templates. # NOTE: Header/footer functionality moved to jekyll # with open(crate_obj.preview_path, 'r') as", "using an alpine docker image # such as pyaction-lite, the -S option above", "to publish to GH-Pages log.info('Preparing to publish ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate)", "def __init__(self, crate_dir): self.crate_dir = crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False", "follow to create the correct files to publish to GH-Pages log.info('Preparing to publish", "# Rename these variables to something meaningful crate_path = sys.argv[1] loglevel = sys.argv[2]", "link on python in tmp directory log.debug(f'Creating symlink between {src} and {dst}') try:", "metadata json file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld')", "# outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): # steps to follow to create the correct", "preview is valid...') #TODO:some test self.preview_valid = True return def create_symlink(dst, src): #", "#TODO:some test self.preview_valid = True return def create_symlink(dst, src): # This creates a", "return def publish_rocrate(crate_dir): # steps to follow to create the correct files to", "GH-Pages log.info('Preparing to publish ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path)", "line works fine on other linux distributions # such as debian, etc, so", "True else: log.error('ROCrate metadata file DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid", "way of getting the header/footer templates. # NOTE: Header/footer functionality moved to jekyll", "True return def check_preview(self): log.debug('Checking if preview is valid...') #TODO:some test self.preview_valid =", "NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid = False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate", "test self.metadata_valid = True return def check_preview(self): log.debug('Checking if preview is valid...') #TODO:some", "# log.debug('Adding Header/Footer template to preview file...') # #TODO: Find a better way", "{0}'.format(self.metadata_path)) self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file", "Exception as err: log.warning('Problem while creating symlink:') log.warning(err) return def create_preview_html(crate_obj): ''' This", "footer_path = './footer.html' # with open(footer_path, 'r') as footer_file: # foot_soup = BeautifulSoup(footer_file,", "files to publish to GH-Pages log.info('Preparing to publish ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid()", "= logging.getLogger('entrypoint') class CrateObj(): def __init__(self, crate_dir): self.crate_dir = crate_dir self.metadata_path = Path(os.path.join(self.crate_dir,", "create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # else: #", "BeautifulSoup log = logging.getLogger('entrypoint') class CrateObj(): def __init__(self, crate_dir): self.crate_dir = crate_dir self.metadata_path", "linux distributions # such as debian, etc, so the above line will work", "with open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): # steps to follow", "NOT exist: {0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata() self.check_preview() if self.metadata_valid and self.preview_valid: log.info('Crate", "create a preview.html from a rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML", "# head_soup = BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) # #Add Footer # footer_path =", "BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) # #Add Footer # footer_path = './footer.html' # with", "a better way of getting the header/footer templates. # NOTE: Header/footer functionality moved", "from pathlib import Path import subprocess import logging from bs4 import BeautifulSoup log", "json file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate", "this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # else: # #Create index.html page #", "Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False self.crate_valid = None self.metadata_valid = None self.preview_valid =", "exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True elif", "file exists: {0}'.format(self.preview_path)) self.preview_exists = True else: log.warning('ROCrate preview file DOES NOT exist:", "log.debug('Checking if metadata is valid...') #TODO:some test self.metadata_valid = True return def check_preview(self):", "{metadata_file}', shell=True) # log.debug('Adding Header/Footer template to preview file...') # #TODO: Find a", "create symlink for index.html and crate.json # - check validation? import sys import", "return def check_metadata(self): log.debug('Checking if metadata is valid...') #TODO:some test self.metadata_valid = True", "creating symlink:') log.warning(err) return def create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a", "with open(crate_obj.preview_path, 'r') as preview_file: # soup = BeautifulSoup(preview_file, 'html.parser') # #Add Header", "open(footer_path, 'r') as footer_file: # foot_soup = BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) # #", "# Write updated page to html file # with open('./test_out.html','wb') as outfile: #", "as err: log.warning('Problem while creating symlink:') log.warning(err) return def create_preview_html(crate_obj): ''' This uses", "between {src} and {dst}') try: os.symlink(src, dst) except Exception as err: log.warning('Problem while", "log.error('ROCrate metadata file DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid = False", "# foot_soup = BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) # # Write updated page to", "= False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False self.crate_valid = None self.metadata_valid", "DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid = False exit(1) if os.path.exists(self.preview_path):", "# create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ## Create symlink between the .json >> .jsonld", "'./footer.html' # with open(footer_path, 'r') as footer_file: # foot_soup = BeautifulSoup(footer_file, 'html.parser') #", "index.html and crate.json # - check validation? import sys import os from pathlib", "CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html',", "# #TODO: Find a better way of getting the header/footer templates. # NOTE:", "this action: # - check if correct files exist. # - create symlink", "'r') as preview_file: # soup = BeautifulSoup(preview_file, 'html.parser') # #Add Header # header_path", "alpine docker image # such as pyaction-lite, the -S option above won't #", "if __name__ == \"__main__\" : # Rename these variables to something meaningful crate_path", "Path import subprocess import logging from bs4 import BeautifulSoup log = logging.getLogger('entrypoint') class", "subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding Header/Footer template to preview file...') # #TODO: Find", "def check_metadata(self): log.debug('Checking if metadata is valid...') #TODO:some test self.metadata_valid = True return", "def create_symlink(dst, src): # This creates a symbolic link on python in tmp", "correct files exist. # - create symlink for index.html and crate.json # -", "self.metadata_valid and self.preview_valid: log.info('Crate passes validity checks.') self.crate_valid = True return def check_metadata(self):", "def check_preview(self): log.debug('Checking if preview is valid...') #TODO:some test self.preview_valid = True return", "# # Write updated page to html file # with open('./test_out.html','wb') as outfile:", "to something meaningful crate_path = sys.argv[1] loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s -", "try: os.symlink(src, dst) except Exception as err: log.warning('Problem while creating symlink:') log.warning(err) return", "# footer_path = './footer.html' # with open(footer_path, 'r') as footer_file: # foot_soup =", "this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists: # create_preview_html(this_crate)", "the header/footer templates. # NOTE: Header/footer functionality moved to jekyll # with open(crate_obj.preview_path,", "{0}'.format(self.preview_path)) self.preview_exists = True else: log.warning('ROCrate preview file DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists", "self.preview_valid = None def check_rocrate_valid(self): # Checks if there are rocrate objects in", "for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding Header/Footer template to", "'./header.html' # with open(header_path) as header_file: # head_soup = BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup)", "header_file: # head_soup = BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) # #Add Footer # footer_path", "ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists: #", "if this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # else: # #Create index.html page", "log.warning('ROCrate preview file DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata() self.check_preview() if", "# NOTE: If you are using an alpine docker image # such as", "metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding Header/Footer template to preview file...')", "False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview file exists: {0}'.format(self.preview_path)) self.preview_exists = True else:", "log.debug(f'Creating symlink between {src} and {dst}') try: os.symlink(src, dst) except Exception as err:", "better way of getting the header/footer templates. # NOTE: Header/footer functionality moved to", "page to html file # with open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\")) return def", "file DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata() self.check_preview() if self.metadata_valid and", "in directory log.debug('Checking that rocrate files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file", "True else: log.warning('ROCrate preview file DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata()", "html file # with open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): #", "else: log.error('ROCrate metadata file DOES NOT exist: {0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid =", "meaningful crate_path = sys.argv[1] loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s -", "works fine on other linux distributions # such as debian, etc, so the", "functionality moved to jekyll # with open(crate_obj.preview_path, 'r') as preview_file: # soup =", "logging.getLogger('entrypoint') class CrateObj(): def __init__(self, crate_dir): self.crate_dir = crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json'))", "file for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding Header/Footer template", "as header_file: # head_soup = BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) # #Add Footer #", "ready to publish') if __name__ == \"__main__\" : # Rename these variables to", "files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True", "create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) #", "log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path =", "self.preview_exists = False self.check_metadata() self.check_preview() if self.metadata_valid and self.preview_valid: log.info('Crate passes validity checks.')", "def create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html from a rocrate", "tmp directory log.debug(f'Creating symlink between {src} and {dst}') try: os.symlink(src, dst) except Exception", "True return def create_symlink(dst, src): # This creates a symbolic link on python", "work fine # if you use pyaction:4.0.0 or higher as your base docker", "= sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=getattr(logging, loglevel))", "shell=True) # log.debug('Adding Header/Footer template to preview file...') # #TODO: Find a better", "directory log.debug(f'Creating symlink between {src} and {dst}') try: os.symlink(src, dst) except Exception as", "sys import os from pathlib import Path import subprocess import logging from bs4", "self.preview_exists = False self.crate_valid = None self.metadata_valid = None self.preview_valid = None def", "'html.parser') # #Add Header # header_path = './header.html' # with open(header_path) as header_file:", "{0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding Header/Footer template to preview", "as preview_file: # soup = BeautifulSoup(preview_file, 'html.parser') # #Add Header # header_path =", "to publish ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # if", "{dst}') try: os.symlink(src, dst) except Exception as err: log.warning('Problem while creating symlink:') log.warning(err)", "file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True else: log.error('ROCrate metadata file DOES NOT exist:", "check if correct files exist. # - create symlink for index.html and crate.json", "Checks if there are rocrate objects in directory log.debug('Checking that rocrate files exist...')", "exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview file exists: {0}'.format(self.preview_path)) self.preview_exists = True else: log.warning('ROCrate", "from a rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview file for", "of getting the header/footer templates. # NOTE: Header/footer functionality moved to jekyll #", "preview_file: # soup = BeautifulSoup(preview_file, 'html.parser') # #Add Header # header_path = './header.html'", "self.preview_valid = True return def create_symlink(dst, src): # This creates a symbolic link", "image. # Steps in this action: # - check if correct files exist.", "in this action: # - check if correct files exist. # - create", "you are using an alpine docker image # such as pyaction-lite, the -S", "creates a symbolic link on python in tmp directory log.debug(f'Creating symlink between {src}", "- create symlink for index.html and crate.json # - check validation? import sys", "above won't # work. The above line works fine on other linux distributions", "self.check_metadata() self.check_preview() if self.metadata_valid and self.preview_valid: log.info('Crate passes validity checks.') self.crate_valid = True", "valid...') #TODO:some test self.preview_valid = True return def create_symlink(dst, src): # This creates", "won't # work. The above line works fine on other linux distributions #", "this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path)", "on which exists if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld':", "Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False self.crate_valid", "fine on other linux distributions # such as debian, etc, so the above", "with open(header_path) as header_file: # head_soup = BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) # #Add", "and crate.json # - check validation? import sys import os from pathlib import", "action: # - check if correct files exist. # - create symlink for", "as footer_file: # foot_soup = BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) # # Write updated", "__init__(self, crate_dir): self.crate_dir = crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path", "header/footer templates. # NOTE: Header/footer functionality moved to jekyll # with open(crate_obj.preview_path, 'r')", "os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to publish') if __name__ == \"__main__\"", "= BeautifulSoup(preview_file, 'html.parser') # #Add Header # header_path = './header.html' # with open(header_path)", "elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to publish') if __name__ ==", "as your base docker image. # Steps in this action: # - check", "log.info('Preparing to publish ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) #", "{0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata() self.check_preview() if self.metadata_valid and self.preview_valid: log.info('Crate passes validity", "log.warning(err) return def create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html from", "# NOTE: Header/footer functionality moved to jekyll # with open(crate_obj.preview_path, 'r') as preview_file:", "docker image. # Steps in this action: # - check if correct files", "outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): # steps to follow to create the correct files", "create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ## Create symlink between the .json >> .jsonld file", "NOTE: If you are using an alpine docker image # such as pyaction-lite,", "# This creates a symbolic link on python in tmp directory log.debug(f'Creating symlink", "- %(levelname)s - %(name)s - %(message)s', level=getattr(logging, loglevel)) log.setLevel(getattr(logging, loglevel)) # The work:", "# create_symlink('index.html', this_crate.preview_path) # if this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # else:", "self.metadata_exists = False self.crate_valid = False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview file exists:", "else: log.warning('ROCrate preview file DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata() self.check_preview()", "json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file =", "jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True else: log.error('ROCrate metadata file DOES NOT", "validity checks.') self.crate_valid = True return def check_metadata(self): log.debug('Checking if metadata is valid...')", "to html file # with open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir):", "= crate_dir self.metadata_path = Path(os.path.join(self.crate_dir, 'ro-crate-metadata.json')) self.metadata_exists = False self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html'))", "self.preview_valid: log.info('Crate passes validity checks.') self.crate_valid = True return def check_metadata(self): log.debug('Checking if", "and {dst}') try: os.symlink(src, dst) except Exception as err: log.warning('Problem while creating symlink:')", "{0}'.format(self.metadata_path)) self.metadata_exists = False self.crate_valid = False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview file", "base docker image. # Steps in this action: # - check if correct", "self.crate_valid = True return def check_metadata(self): log.debug('Checking if metadata is valid...') #TODO:some test", "Create symlink between the .json >> .jsonld file extensions depending on which exists", "import logging from bs4 import BeautifulSoup log = logging.getLogger('entrypoint') class CrateObj(): def __init__(self,", "- check if correct files exist. # - create symlink for index.html and", "exists: {0}'.format(self.metadata_path)) self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld", "-S option above won't # work. The above line works fine on other", "test self.preview_valid = True return def create_symlink(dst, src): # This creates a symbolic", "etc, so the above line will work fine # if you use pyaction:4.0.0", "metadata is valid...') #TODO:some test self.metadata_valid = True return def check_preview(self): log.debug('Checking if", "debian, etc, so the above line will work fine # if you use", "None self.metadata_valid = None self.preview_valid = None def check_rocrate_valid(self): # Checks if there", "'html.parser') # soup.html.body.insert_before(head_soup) # #Add Footer # footer_path = './footer.html' # with open(footer_path,", "# such as pyaction-lite, the -S option above won't # work. The above", "if you use pyaction:4.0.0 or higher as your base docker image. # Steps", "# - check validation? import sys import os from pathlib import Path import", "file exists: {0}'.format(self.metadata_path)) self.metadata_exists = True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata", "{src} and {dst}') try: os.symlink(src, dst) except Exception as err: log.warning('Problem while creating", "if preview is valid...') #TODO:some test self.preview_valid = True return def create_symlink(dst, src):", "a symbolic link on python in tmp directory log.debug(f'Creating symlink between {src} and", "'r') as footer_file: # foot_soup = BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) # # Write", "# #Add Footer # footer_path = './footer.html' # with open(footer_path, 'r') as footer_file:", "check validation? import sys import os from pathlib import Path import subprocess import", "import os from pathlib import Path import subprocess import logging from bs4 import", "log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) #", "the above line will work fine # if you use pyaction:4.0.0 or higher", "such as debian, etc, so the above line will work fine # if", "objects in directory log.debug('Checking that rocrate files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json", "exists if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path)", "Rename these variables to something meaningful crate_path = sys.argv[1] loglevel = sys.argv[2] logging.basicConfig(", "distributions # such as debian, etc, so the above line will work fine", "create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # else: # #Create index.html page # create_preview_html(this_crate) #", "depending on which exists if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) ==", "self.preview_path = Path(os.path.join(self.crate_dir, 'ro-crate-preview.html')) self.preview_exists = False self.crate_valid = None self.metadata_valid = None", "extensions depending on which exists if os.path.splitext(this_crate.metadata_path) == '.json': create_symlink('ro-crate-metadata.jsonld', this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path)", "passes validity checks.') self.crate_valid = True return def check_metadata(self): log.debug('Checking if metadata is", "BeautifulSoup(preview_file, 'html.parser') # #Add Header # header_path = './header.html' # with open(header_path) as", "to follow to create the correct files to publish to GH-Pages log.info('Preparing to", "= BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) # #Add Footer # footer_path = './footer.html' #", "# create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # else: # #Create index.html page # create_preview_html(this_crate)", "if this_crate.metadata_exists: ## Create symlink between the .json >> .jsonld file extensions depending", "DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists = False self.check_metadata() self.check_preview() if self.metadata_valid and self.preview_valid:", "preview file...') # #TODO: Find a better way of getting the header/footer templates.", "file # with open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): # steps", "= False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview file exists: {0}'.format(self.preview_path)) self.preview_exists = True", "valid...') #TODO:some test self.metadata_valid = True return def check_preview(self): log.debug('Checking if preview is", "open(header_path) as header_file: # head_soup = BeautifulSoup(header_file, 'html.parser') # soup.html.body.insert_before(head_soup) # #Add Footer", "= True return def check_preview(self): log.debug('Checking if preview is valid...') #TODO:some test self.preview_valid", "check_metadata(self): log.debug('Checking if metadata is valid...') #TODO:some test self.metadata_valid = True return def", "create_symlink(dst, src): # This creates a symbolic link on python in tmp directory", "Find a better way of getting the header/footer templates. # NOTE: Header/footer functionality", "work. The above line works fine on other linux distributions # such as", "= './footer.html' # with open(footer_path, 'r') as footer_file: # foot_soup = BeautifulSoup(footer_file, 'html.parser')", "to create the correct files to publish to GH-Pages log.info('Preparing to publish ROCrate.')", "# with open('./test_out.html','wb') as outfile: # outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): # steps to", "This creates a symbolic link on python in tmp directory log.debug(f'Creating symlink between", "symlink:') log.warning(err) return def create_preview_html(crate_obj): ''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html", "header_path = './header.html' # with open(header_path) as header_file: # head_soup = BeautifulSoup(header_file, 'html.parser')", "log.debug('Checking that rocrate files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path))", "sys.argv[1] loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',", "# Checks if there are rocrate objects in directory log.debug('Checking that rocrate files", ": # Rename these variables to something meaningful crate_path = sys.argv[1] loglevel =", "Header # header_path = './header.html' # with open(header_path) as header_file: # head_soup =", "# header_path = './header.html' # with open(header_path) as header_file: # head_soup = BeautifulSoup(header_file,", "above line will work fine # if you use pyaction:4.0.0 or higher as", "that rocrate files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path)) self.metadata_exists", "True return def check_metadata(self): log.debug('Checking if metadata is valid...') #TODO:some test self.metadata_valid =", "this_crate.preview_path) # if this_crate.preview_exists: # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) # else: # #Create", "fine # if you use pyaction:4.0.0 or higher as your base docker image.", "python in tmp directory log.debug(f'Creating symlink between {src} and {dst}') try: os.symlink(src, dst)", "rocrate files exist...') if os.path.exists(self.metadata_path): log.debug('ROCrate metadata json file exists: {0}'.format(self.metadata_path)) self.metadata_exists =", "= True else: log.warning('ROCrate preview file DOES NOT exist: {0}'.format(self.preview_path)) self.preview_exists = False", "from bs4 import BeautifulSoup log = logging.getLogger('entrypoint') class CrateObj(): def __init__(self, crate_dir): self.crate_dir", "open(crate_obj.preview_path, 'r') as preview_file: # soup = BeautifulSoup(preview_file, 'html.parser') # #Add Header #", "= False self.crate_valid = False exit(1) if os.path.exists(self.preview_path): log.debug('ROCrate preview file exists: {0}'.format(self.preview_path))", "# create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ## Create symlink between the .json", "'.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to publish') if __name__ == \"__main__\" : #", "there are rocrate objects in directory log.debug('Checking that rocrate files exist...') if os.path.exists(self.metadata_path):", "for index.html and crate.json # - check validation? import sys import os from", "'html.parser') # soup.html.body.append(foot_soup) # # Write updated page to html file # with", "this_crate.metadata_path) elif os.path.splitext(this_crate.metadata_path) == '.jsonld': create_symlink('ro-crate-metadata.json', this_crate.metadata_path) log.info('ROCrate ready to publish') if __name__", "exists: {0}'.format(self.metadata_path)) self.metadata_exists = True else: log.error('ROCrate metadata file DOES NOT exist: {0}'.format(self.metadata_path))", "a rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path))", "checks.') self.crate_valid = True return def check_metadata(self): log.debug('Checking if metadata is valid...') #TODO:some", "logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=getattr(logging, loglevel)) log.setLevel(getattr(logging, loglevel))", "outfile: # outfile.write(soup.prettify(\"utf-8\")) return def publish_rocrate(crate_dir): # steps to follow to create the", "to GH-Pages log.info('Preparing to publish ROCrate.') this_crate = CrateObj(crate_dir) this_crate.check_rocrate_valid() create_preview_html(this_crate) # create_symlink('index.html',", "''' This uses https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html from a rocrate json file.", "the correct files to publish to GH-Pages log.info('Preparing to publish ROCrate.') this_crate =", "symbolic link on python in tmp directory log.debug(f'Creating symlink between {src} and {dst}')", "## Create symlink between the .json >> .jsonld file extensions depending on which", "logging from bs4 import BeautifulSoup log = logging.getLogger('entrypoint') class CrateObj(): def __init__(self, crate_dir):", "loglevel = sys.argv[2] logging.basicConfig( stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=getattr(logging,", "validation? import sys import os from pathlib import Path import subprocess import logging", "https://github.com/UTS-eResearch/ro-crate-html-js to create a preview.html from a rocrate json file. rochtml rocrate_datacrate_test/ro-crate-metadata.json '''", "you use pyaction:4.0.0 or higher as your base docker image. # Steps in", "#!/usr/bin/env python3 # NOTE: If you are using an alpine docker image #", "return def create_symlink(dst, src): # This creates a symbolic link on python in", "rocrate_datacrate_test/ro-crate-metadata.json ''' log.info('Creating HTML preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}',", "on python in tmp directory log.debug(f'Creating symlink between {src} and {dst}') try: os.symlink(src,", "log.info('ROCrate ready to publish') if __name__ == \"__main__\" : # Rename these variables", "Header/Footer template to preview file...') # #TODO: Find a better way of getting", "preview file for {0}...'.format(crate_obj.metadata_path)) metadata_file = crate_obj.metadata_path subprocess.check_call(f'rochtml {metadata_file}', shell=True) # log.debug('Adding Header/Footer", "the -S option above won't # work. The above line works fine on", "pathlib import Path import subprocess import logging from bs4 import BeautifulSoup log =", "# with open(crate_obj.preview_path, 'r') as preview_file: # soup = BeautifulSoup(preview_file, 'html.parser') # #Add", "symlink between {src} and {dst}') try: os.symlink(src, dst) except Exception as err: log.warning('Problem", "= False self.check_metadata() self.check_preview() if self.metadata_valid and self.preview_valid: log.info('Crate passes validity checks.') self.crate_valid", "True elif os.path.exists(self.metadata_path.with_suffix('.jsonld')): self.metadata_path = self.metadata_path.with_suffix('.jsonld') log.debug('ROCrate metadata jsonld file exists: {0}'.format(self.metadata_path)) self.metadata_exists", "import subprocess import logging from bs4 import BeautifulSoup log = logging.getLogger('entrypoint') class CrateObj():", "# #Create index.html page # create_preview_html(this_crate) # create_symlink('index.html', this_crate.preview_path) if this_crate.metadata_exists: ## Create", "publish_rocrate(crate_dir): # steps to follow to create the correct files to publish to", "self.metadata_valid = None self.preview_valid = None def check_rocrate_valid(self): # Checks if there are", "= BeautifulSoup(footer_file, 'html.parser') # soup.html.body.append(foot_soup) # # Write updated page to html file" ]
[ "class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year'])", "[\"JPN\", 6]] data_frame_2_columns = ['Country Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns),", "StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass(", "impressions\", \"Test - Total number of data\", \"Test - Total number of missing", "DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator", "Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def", "missing data\"], [\"Year, Country Code, Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def", "- Total percent of missing data\"], [\"Year, Country Code, Test\", 6, 1, 16.67]]", "Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report", "of missing data\"], [\"Country Code, Test\", 4, 6, 1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set(", "columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country", "1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900,", "4, 1978, 41, 1687.22, 6, 1, 16.67], [\"Test\"], [\"Columns\", \"Country Code - Unique", "Container import pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter", "4, 1978, 41, 1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self):", "2], [1990, \"RUS\", ], [2010, \"SRB\", 4], [2020, \"ESP\", 5], [1996, \"JPN\", 6]]", ") data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value =", "= MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value = [[\"Test\"],", "pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Test - Total number", "1], [\"JPN\", 2], [\"RUS\", ], [\"SRB\", 4], [\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns =", "setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country", "test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator,", "self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code -", "Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67], [\"Test\"], [\"Columns\", \"Country Code", "[\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns = ['Country Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\",", "[1900, \"SRB\", 1], [1950, \"JPN\", 2], [1990, \"RUS\", ], [2010, \"SRB\", 4], [2020,", "\"Test - Total percent of missing data\"], [\"Year, Country Code, Test\", 6, 1,", "= ['Country Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator,", "16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values =", "StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"],", "\"Test - Total percent of missing data\"], [\"Year, Country Code, Test\", \"1900-2020\", 4,", "expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code - Unique impressions\", \"Year", "[data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values,", "self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator,", "Unique impressions\", \"Year - Mean value\", \"Year - Standard deviation\", \"Year - Variance\",", "self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] )", "[\"Columns\", \"Year - Range\"], [\"Year, Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report(", "data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\",", "def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns),", "data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\", "self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\", 1], [\"JPN\", 2], [\"RUS\",", "Code - Unique impressions\", \"Year - Mean value\", \"Year - Standard deviation\", \"Year", "Code, Test\", 4, 6, 1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self):", "[2010, \"SRB\", 4], [2020, \"ESP\", 5], [1996, \"JPN\", 6]] data_frame_columns = ['Year', 'Country", "data_set_statistic_reporter.depedency_injector.container import Container import pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def", "1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values", "deviation\", \"Year - Variance\"], [\"Year, Country Code, Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set(", "Standard deviation\", \"Year - Variance\", \"Test - Total number of data\", \"Test -", "\"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Test - Total", "Mean value\", \"Year - Standard deviation\", \"Year - Variance\", \"Test - Total number", "data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\",", "MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator", "def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns),", "import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container import pandas", "deviation\", \"Year - Variance\", \"Test - Total number of data\", \"Test - Total", "data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value", "Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator =", "pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Country Code - Unique", "pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set()", "1978, 41, 1687.22, 6, 1, 16.67], [\"Test\"], [\"Columns\", \"Country Code - Unique impressions\",", "from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container", "Code - Unique impressions\"], [\"Year, Country Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def", "1687.22, 6, 1, 16.67], [\"Test\"], [\"Columns\", \"Country Code - Unique impressions\", \"Test -", "= self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value =", "self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900, \"SRB\", 1], [1950,", "- Mean value\", \"Year - Standard deviation\", \"Year - Variance\", \"Test - Total", "columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Test - Total number of", "41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports", "- Variance\", \"Test - Total number of data\", \"Test - Total number of", "= self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator]", "[\"Columns\", \"Country Code - Unique impressions\", \"Test - Total number of data\", \"Test", "self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass(", "DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator", "[[\"Test\"], [\"Columns\", \"Year - Range\"], [\"Year, Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def", "expected_return_value = [[\"Test\"], [\"Columns\", \"Country Code - Unique impressions\"], [\"Year, Country Code, Test\",", "def get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900, \"SRB\", 1], [1950, \"JPN\", 2], [1990, \"RUS\",", "[[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code - Unique impressions\", \"Year - Mean", "[[\"Test\"], [\"Columns\", \"Test - Total number of data\", \"Test - Total number of", "self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator]", "pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator =", "self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator,", "expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Mean value\", \"Year - Standard deviation\", \"Year", "Variance\", \"Test - Total number of data\", \"Test - Total number of missing", "- Unique impressions\", \"Year - Mean value\", \"Year - Standard deviation\", \"Year -", "pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator =", "self.range_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\"], [\"Year, Country Code, Test\",", "= self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value =", "Total percent of missing data\"], [\"Country Code, Test\", 4, 6, 1, 16.67] ]", "\"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year -", "self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\",", "[\"SRB\", 1], [\"JPN\", 2], [\"RUS\", ], [\"SRB\", 4], [\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns", "self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code'])", "= [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code - Unique impressions\", \"Year -", "\"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Country Code -", "= self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\", 1], [\"JPN\", 2], [\"RUS\", ], [\"SRB\", 4],", "from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container import pandas as pd class", "data\"], [\"Country Code, Test\", 4, 6, 1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports]))", "= StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Country", "6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()", "StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year -", "data_frame_2_columns = ['Country Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator,", "UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import", "Unique impressions\"], [\"Year, Country Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self):", "Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()", "Country Code, Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns,", "\"RUS\", ], [2010, \"SRB\", 4], [2020, \"ESP\", 5], [1996, \"JPN\", 6]] data_frame_columns =", "= ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator", "\"Country Code - Unique impressions\", \"Year - Mean value\", \"Year - Standard deviation\",", "= self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value =", ") expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\"], [\"Year, Country Code, Test\", \"1900-2020\"]]", "import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator", "6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns()", "= StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Test", "4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report =", "\"Country Code - Unique impressions\", \"Test - Total number of data\", \"Test -", "= [[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()", "16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report =", "self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator]", "StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year", "= StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year", "\"Test - Total percent of missing data\"], [\"Country Code, Test\", 4, 6, 1,", "\"JPN\", 2], [1990, \"RUS\", ], [2010, \"SRB\", 4], [2020, \"ESP\", 5], [1996, \"JPN\",", "impressions\", \"Year - Mean value\", \"Year - Standard deviation\", \"Year - Variance\", \"Test", "StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value = [[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self):", "], [\"SRB\", 4], [\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns = ['Country Code', 'Test'] data_set_1_with_multiple_reports", "self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900, \"SRB\", 1], [1950, \"JPN\",", "[data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900, \"SRB\", 1], [1950, \"JPN\", 2],", "import Container import pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self):", "data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container import pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase):", "self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"],", "data\"], [\"Year, Country Code, Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report(", "\"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\",", "Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()", "missing data\"], [\"Country Code, Test\", 4, 6, 1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports,", "[[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report", "6, 1, 16.67], [\"Test\"], [\"Columns\", \"Country Code - Unique impressions\", \"Test - Total", "pd.DataFrame([]), []) expected_return_value = [[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns,", "] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900, \"SRB\", 1],", "Total number of missing data\", \"Test - Total percent of missing data\"], [\"Year,", "data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\", 1], [\"JPN\", 2], [\"RUS\", ], [\"SRB\",", "Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\",", "def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns),", "test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator,", "impressions\"], [\"Year, Country Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns,", "[self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code", "data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] )", "- Total percent of missing data\"], [\"Year, Country Code, Test\", \"1900-2020\", 4, 1978,", "data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass", "from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import", "pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year", "Total number of data\", \"Test - Total number of missing data\", \"Test -", "data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\", "16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900, \"SRB\",", "2], [\"RUS\", ], [\"SRB\", 4], [\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns = ['Country Code',", "import pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter =", "self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\",", "percent of missing data\"], [\"Country Code, Test\", 4, 6, 1, 16.67] ] self.assertEqual(expected_return_value,", "self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Country Code - Unique impressions\"], [\"Year, Country", "4], [2020, \"ESP\", 5], [1996, \"JPN\", 6]] data_frame_columns = ['Year', 'Country Code', 'Test']", "1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report", "'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] )", "\\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from", "RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays(", "data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator,", "number of data\", \"Test - Total number of missing data\", \"Test - Total", "= StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value =", "of data\", \"Test - Total number of missing data\", \"Test - Total percent", "import unittest from data_set_statistic_reporter.classes.statistic_generator.implementations.column_names_statistic_generator import \\ ColumnNamesStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from", "[\"Columns\", \"Year - Mean value\", \"Year - Standard deviation\", \"Year - Variance\"], [\"Year,", "data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator,", "- Total number of data\", \"Test - Total number of missing data\", \"Test", "[\"Country Code, Test\", 4, 6, 1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def", "self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]),", "\\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container", "- Range\", \"Country Code - Unique impressions\", \"Year - Mean value\", \"Year -", "Total percent of missing data\"], [\"Year, Country Code, Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value,", "empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value = [[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def", "self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\",", "number of missing data\", \"Test - Total percent of missing data\"], [\"Year, Country", "[2020, \"ESP\", 5], [1996, \"JPN\", 6]] data_frame_columns = ['Year', 'Country Code', 'Test'] return", "def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns),", "get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900, \"SRB\", 1], [1950, \"JPN\", 2], [1990, \"RUS\", ],", "[\"SRB\", 4], [\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns = ['Country Code', 'Test'] data_set_1_with_multiple_reports =", ") expected_return_value = [[\"Test\"], [\"Columns\", \"Test - Total number of data\", \"Test -", "\"Year - Variance\", \"Test - Total number of data\", \"Test - Total number", "[ [\"SRB\", 1], [\"JPN\", 2], [\"RUS\", ], [\"SRB\", 4], [\"ESP\", 5], [\"JPN\", 6]]", "import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container import pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass", "[[\"Test\"], [\"Columns\", \"Year - Mean value\", \"Year - Standard deviation\", \"Year - Variance\"],", "Test\", 4, 6, 1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values", "[1990, \"RUS\", ], [2010, \"SRB\", 4], [2020, \"ESP\", 5], [1996, \"JPN\", 6]] data_frame_columns", "[\"Test\"], [\"Columns\", \"Country Code - Unique impressions\", \"Test - Total number of data\",", "[data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\", 1],", "\"Year - Standard deviation\", \"Year - Variance\"], [\"Year, Country Code, Test\", 1978, 41,", "self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code -", "of missing data\"], [\"Year, Country Code, Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report]))", "number of missing data\", \"Test - Total percent of missing data\"], [\"Country Code,", "[data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values,", "Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report =", "self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass(", "[]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass(", "[\"Year, Country Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values", "self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country", "self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator]", "self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value = [[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object]))", "data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\",", "self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"])", "self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\",", "columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Mean value\", \"Year", "columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year -", "Country Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report( self): data_frame_columns, data_frame_values =", "columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns),", "self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass(", "columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Country Code - Unique impressions\"],", "[]) expected_return_value = [[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values", "16.67], [\"Test\"], [\"Columns\", \"Country Code - Unique impressions\", \"Test - Total number of", "def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\", 1], [\"JPN\",", "\"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\"],", "data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value", "data_frame_values = [ [1900, \"SRB\", 1], [1950, \"JPN\", 2], [1990, \"RUS\", ], [2010,", "pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values,", "VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value", "1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports =", "import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from", "\\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from", "[\"Columns\", \"Test - Total number of data\", \"Test - Total number of missing", "= [[\"Test\"], [\"Columns\", \"Year - Mean value\", \"Year - Standard deviation\", \"Year -", "= StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\",", "self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value = [[\"Test\"],", "data_frame_2_values = [ [\"SRB\", 1], [\"JPN\", 2], [\"RUS\", ], [\"SRB\", 4], [\"ESP\", 5],", "self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\",", "[self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Mean value\", \"Year -", "\"Year - Mean value\", \"Year - Standard deviation\", \"Year - Variance\"], [\"Year, Country", "self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator]", "expected_return_value = [[\"Test\"], [\"Columns\", \"Test - Total number of data\", \"Test - Total", "as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator", "data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] )", "Total number of missing data\", \"Test - Total percent of missing data\"], [\"Country", "self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code", "\"Year - Mean value\", \"Year - Standard deviation\", \"Year - Variance\", \"Test -", "[\"Columns\", \"Country Code - Unique impressions\"], [\"Year, Country Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set(", "Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values =", "columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\"], [\"Year, Country", "Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set(", "data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values = [ [1900, \"SRB\", 1], [1950, \"JPN\", 2], [1990,", "5], [1996, \"JPN\", 6]] data_frame_columns = ['Year', 'Country Code', 'Test'] return data_frame_columns, data_frame_values", "self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"],", "of missing data\", \"Test - Total percent of missing data\"], [\"Country Code, Test\",", "data\", \"Test - Total percent of missing data\"], [\"Year, Country Code, Test\", \"1900-2020\",", "def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator =", "1978, 41, 1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns,", "value\", \"Year - Standard deviation\", \"Year - Variance\", \"Test - Total number of", "= VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), [])", "Range\", \"Country Code - Unique impressions\", \"Year - Mean value\", \"Year - Standard", "StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Country Code", "[self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\"], [\"Year, Country Code,", "self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values,", "\\ ColumnNamesStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from", "self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code - Unique", "[\"Year, Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values =", "= RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def", "missing data\", \"Test - Total percent of missing data\"], [\"Year, Country Code, Test\",", "= [ [1900, \"SRB\", 1], [1950, \"JPN\", 2], [1990, \"RUS\", ], [2010, \"SRB\",", "[\"Year, Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67], [\"Test\"],", "[data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values,", "Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports]))", "data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\", 1], [\"JPN\", 2], [\"RUS\", ],", "of missing data\"], [\"Year, Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6,", "test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\", 1], [\"JPN\", 2],", "def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value = [[\"Test\"], [], []]", "UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object =", ") expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\", \"Country Code - Unique impressions\",", "pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Mean value\",", "data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] )", "Country Code, Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns,", "data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports", "data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] )", "self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator]", "self.get_test_data_frame_values_and_columns() data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] )", "6]] data_frame_2_columns = ['Country Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator,", "self.variance_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Mean value\", \"Year - Standard", "Code, Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values", "\\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container import pandas as", "= self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value =", "\"SRB\", 1], [1950, \"JPN\", 2], [1990, \"RUS\", ], [2010, \"SRB\", 4], [2020, \"ESP\",", "MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value = [[\"Test\"], [],", "pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\"], [\"Year,", "data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value", "def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns),", "ColumnNamesStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator", "Unique impressions\", \"Test - Total number of data\", \"Test - Total number of", "[self.columns_statistic_generator, self.unique_impression_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Country Code - Unique impressions\"], [\"Year,", "Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report", "VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container import pandas as pd", "self.get_test_data_frame_values_and_columns() data_frame_2_values = [ [\"SRB\", 1], [\"JPN\", 2], [\"RUS\", ], [\"SRB\", 4], [\"ESP\",", "= [[\"Test\"], [\"Columns\", \"Year - Range\"], [\"Year, Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report]))", "- Total number of missing data\", \"Test - Total percent of missing data\"],", "= UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object", "= StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year", "expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\"], [\"Year, Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value,", "Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values =", "percent of missing data\"], [\"Year, Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22,", "\"Country Code - Unique impressions\"], [\"Year, Country Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report]))", "from data_set_statistic_reporter.depedency_injector.container import Container import pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase):", "Code - Unique impressions\", \"Test - Total number of data\", \"Test - Total", "6, 1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values = [", "data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"],", "], [2010, \"SRB\", 4], [2020, \"ESP\", 5], [1996, \"JPN\", 6]] data_frame_columns = ['Year',", "test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator,", "[self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator,", "self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\",", "- Variance\"], [\"Year, Country Code, Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def", "1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns()", "Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator]", "Total percent of missing data\"], [\"Year, Country Code, Test\", \"1900-2020\", 4, 1978, 41,", "[1950, \"JPN\", 2], [1990, \"RUS\", ], [2010, \"SRB\", 4], [2020, \"ESP\", 5], [1996,", "RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class", "Range\"], [\"Year, Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values", "data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container import", "self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Test - Total number of data\", \"Test", "1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values =", "\"Year - Standard deviation\", \"Year - Variance\", \"Test - Total number of data\",", "percent of missing data\"], [\"Year, Country Code, Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set(", "import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator", "5], [\"JPN\", 6]] data_frame_2_columns = ['Country Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values,", "\"ESP\", 5], [1996, \"JPN\", 6]] data_frame_columns = ['Year', 'Country Code', 'Test'] return data_frame_columns,", "= [ [\"SRB\", 1], [\"JPN\", 2], [\"RUS\", ], [\"SRB\", 4], [\"ESP\", 5], [\"JPN\",", "41, 1687.22, 6, 1, 16.67], [\"Test\"], [\"Columns\", \"Country Code - Unique impressions\", \"Test", "data_set_with_missing_data_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\",", "= [[\"Test\"], [\"Columns\", \"Country Code - Unique impressions\"], [\"Year, Country Code, Test\", 4]]", "self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value =", "data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value", "data_set_statistic_reporter.classes.statistic_generator.implementations.column_names_statistic_generator import \\ ColumnNamesStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\", "self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator] ) expected_return_value = [[\"Test\"],", "Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67], [\"Test\"], [\"Columns\",", "of missing data\", \"Test - Total percent of missing data\"], [\"Year, Country Code,", "[\"RUS\", ], [\"SRB\", 4], [\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns = ['Country Code', 'Test']", "self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"])", "\"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\",", "<filename>automatization_of_data_mining_project/data_set_statistic_reporter/test/test_data_set_statistic_reporter.py import unittest from data_set_statistic_reporter.classes.statistic_generator.implementations.column_names_statistic_generator import \\ ColumnNamesStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator", "Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67], [\"Test\"], [\"Columns\", \"Country", "StatisticReporterDataClass from data_set_statistic_reporter.depedency_injector.container import Container import pandas as pd class DataSetStatisticReporterTestBase(unittest.TestCase): pass class", "Mean value\", \"Year - Standard deviation\", \"Year - Variance\"], [\"Year, Country Code, Test\",", "\"Year - Range\"], [\"Year, Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self):", "- Standard deviation\", \"Year - Variance\", \"Test - Total number of data\", \"Test", "missing data\"], [\"Year, Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1,", "test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator,", "unittest from data_set_statistic_reporter.classes.statistic_generator.implementations.column_names_statistic_generator import \\ ColumnNamesStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator", "class DataSetStatisticReporterTestBase(unittest.TestCase): pass class DataSetStatisticGeneratorTestDummyCases(DataSetStatisticReporterTestBase): def setUp(self): self.data_set_statistic_reporter = Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([])", "= [[\"Test\"], [\"Columns\", \"Test - Total number of data\", \"Test - Total number", "\"SRB\", 4], [2020, \"ESP\", 5], [1996, \"JPN\", 6]] data_frame_columns = ['Year', 'Country Code',", "- Standard deviation\", \"Year - Variance\"], [\"Year, Country Code, Test\", 1978, 41, 1687.22]]", "Standard deviation\", \"Year - Variance\"], [\"Year, Country Code, Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value,", "[self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Test - Total number of data\",", "= StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value = [[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics(", "\"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass(", "value\", \"Year - Standard deviation\", \"Year - Variance\"], [\"Year, Country Code, Test\", 1978,", "self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values = self.get_test_data_frame_values_and_columns() data_frame_2_values = [", "StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Test -", "- Mean value\", \"Year - Standard deviation\", \"Year - Variance\"], [\"Year, Country Code,", "Code, Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values", "[self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\",", "- Total percent of missing data\"], [\"Country Code, Test\", 4, 6, 1, 16.67]", "[\"Columns\", \"Year - Range\", \"Country Code - Unique impressions\", \"Year - Mean value\",", "from data_set_statistic_reporter.classes.statistic_generator.implementations.column_names_statistic_generator import \\ ColumnNamesStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import", "from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import \\ UniqueImpressionStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.variance_statistic_generator import \\ VarianceStatisticGenerator from data_set_statistic_reporter.classes.data_class.statistic_reporter_data_class import", "data\", \"Test - Total percent of missing data\"], [\"Year, Country Code, Test\", 6,", "1, 16.67], [\"Test\"], [\"Columns\", \"Country Code - Unique impressions\", \"Test - Total number", "[ [1900, \"SRB\", 1], [1950, \"JPN\", 2], [1990, \"RUS\", ], [2010, \"SRB\", 4],", "- Range\"], [\"Year, Country Code, Test\", \"1900-2020\"]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns,", "test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_variance_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator,", ") expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Mean value\", \"Year - Standard deviation\",", "data_set_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value", "data\", \"Test - Total number of missing data\", \"Test - Total percent of", "Variance\"], [\"Year, Country Code, Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics(", "['Country Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator,", "self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value", "expected_return_value = [[\"Test\"], [], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values =", "import \\ ColumnNamesStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator", "[], []] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set([empty_statistic_object])) def test_given_statistic_object_with_range_generator_when_get_statistics_as_data_set_then_return_data_set_name_with_range_statistics( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_range_report =", "[\"JPN\", 2], [\"RUS\", ], [\"SRB\", 4], [\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns = ['Country", "data\"], [\"Year, Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67],", "\"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics(", "ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator =", "- Unique impressions\", \"Test - Total number of data\", \"Test - Total number", "data\", \"Test - Total percent of missing data\"], [\"Country Code, Test\", 4, 6,", "[\"Year, Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value,", "[\"Year, Country Code, Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report])) def test_given_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self):", "pd.DataFrame(data_frame_2_values, columns=data_frame_2_columns), [self.columns_statistic_generator, self.unique_impression_statistic_generator, self.missing_data_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Range\",", "4, 6, 1, 16.67] ] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_1_with_multiple_reports, data_set_2_with_multiple_reports])) def get_test_data_frame_values_and_columns(self): data_frame_values =", "from data_set_statistic_reporter.classes.statistic_generator.implementations.missing_data_statistic_generator import \\ MissingDataStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.range_statistic_generator import \\ RangeStatisticGenerator from data_set_statistic_reporter.classes.statistic_generator.implementations.unique_impression_statistic_generator import", "\"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year - Mean", "StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values, columns=data_frame_columns), [self.columns_statistic_generator, self.variance_statistic_generator] ) expected_return_value = [[\"Test\"], [\"Columns\", \"Year -", "\"Year - Range\", \"Country Code - Unique impressions\", \"Year - Mean value\", \"Year", "\"Test - Total number of missing data\", \"Test - Total percent of missing", "- Unique impressions\"], [\"Year, Country Code, Test\", 4]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_unique_impression_report])) def test_given_statistic_object_with_missing_data_report_array_when_reports_statistic_then_return_data_set_name_with_missing_data_report(", "\"Year - Variance\"], [\"Year, Country Code, Test\", 1978, 41, 1687.22]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_variance_report]))", "data\"], [\"Year, Country Code, Test\", \"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67]]", "1], [1950, \"JPN\", 2], [1990, \"RUS\", ], [2010, \"SRB\", 4], [2020, \"ESP\", 5],", "[[\"Test\"], [\"Columns\", \"Country Code - Unique impressions\"], [\"Year, Country Code, Test\", 4]] self.assertEqual(expected_return_value,", "= StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_1_values, columns=data_frame_1_columns), [self.columns_statistic_generator, self.range_statistic_generator, self.unique_impression_statistic_generator, self.variance_statistic_generator, self.missing_data_statistic_generator] ) data_set_2_with_multiple_reports =", "missing data\", \"Test - Total percent of missing data\"], [\"Country Code, Test\", 4,", "self.data_set_statistic_reporter.get_statistics_as_data_set([data_set_with_range_report])) def test_given_statistic_object_with_unique_impression_report_array_when_reports_statistic_then_return_data_set_name_with_unique_impression_report( self): data_frame_columns, data_frame_values = self.get_test_data_frame_values_and_columns() data_set_with_unique_impression_report = StatisticReporterDataClass( \"Test\", pd.DataFrame(data_frame_values,", "self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator = VarianceStatisticGenerator([\"Year\"]) self.missing_data_statistic_generator = MissingDataStatisticGenerator([\"Test\"]) def test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self):", "= Container.statistic_reporter_data_set() self.columns_statistic_generator = ColumnNamesStatisticGenerator([]) self.range_statistic_generator = RangeStatisticGenerator(['Year']) self.unique_impression_statistic_generator = UniqueImpressionStatisticGenerator(['Country Code']) self.variance_statistic_generator", "\"1900-2020\", 4, 1978, 41, 1687.22, 6, 1, 16.67], [\"Test\"], [\"Columns\", \"Country Code -", "[\"Year, Country Code, Test\", 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_missing_data_report])) def test_given_statistic_object_with_variance_report_array_when_reports_statistic_then_return_data_set_name_with_normal_distribution_report( self):", "4], [\"ESP\", 5], [\"JPN\", 6]] data_frame_2_columns = ['Country Code', 'Test'] data_set_1_with_multiple_reports = StatisticReporterDataClass(", ") expected_return_value = [[\"Test\"], [\"Columns\", \"Country Code - Unique impressions\"], [\"Year, Country Code,", "test_given_empty_statistic_object_when_get_statistics_as_data_set_then_return_only_data_set_name_with_two_empty_arrays( self): empty_statistic_object = StatisticReporterDataClass(\"Test\", pd.DataFrame([]), []) expected_return_value = [[\"Test\"], [], []] self.assertEqual(expected_return_value,", "41, 1687.22, 6, 1, 16.67]] self.assertEqual(expected_return_value, self.data_set_statistic_reporter.get_statistics_as_data_set( [data_set_with_multiple_reports])) def test_given_two_statistic_object_with_multiple_report_arrays_when_reports_statistic_then_return_data_set_name_with_required_statistics( self): data_frame_1_columns, data_frame_1_values", "\"Test - Total number of data\", \"Test - Total number of missing data\"," ]