ngram
listlengths 0
82k
|
|---|
[
"/ contador print(f'Total: {contador} cobaias') print(f'Total de coelhos: {coelho}') print(f'Total",
"<= q <= 15: contador += q if t ==",
"int(q) if 1 <= q <= 15: contador += q",
"porcsapo = (sapo * 100) / contador print(f'Total: {contador} cobaias')",
"%') print(f'Percentual de ratos: {porcrato:.2f} %') print(f'Percentual de sapos: {porcsapo:.2f}",
"for i in range(0, n): q, t = input().split(' ')",
"= (coelho * 100) / contador porcrato = (rato *",
"sapo += q porccoelho = (coelho * 100) / contador",
"+= q if t == 'C': coelho += q elif",
"elif t == 'S': sapo += q porccoelho = (coelho",
"100) / contador porcrato = (rato * 100) / contador",
"q porccoelho = (coelho * 100) / contador porcrato =",
"contador = 0 for i in range(0, n): q, t",
"= (rato * 100) / contador porcsapo = (sapo *",
"15: contador += q if t == 'C': coelho +=",
"t == 'R': rato += q elif t == 'S':",
"{sapo}') print(f'Percentual de coelhos: {porccoelho:.2f} %') print(f'Percentual de ratos: {porcrato:.2f}",
"rato += q elif t == 'S': sapo += q",
"<= 15: contador += q if t == 'C': coelho",
"t == 'C': coelho += q elif t == 'R':",
"/ contador porcrato = (rato * 100) / contador porcsapo",
"= input().split(' ') t = t.upper() q = int(q) if",
"contador porcrato = (rato * 100) / contador porcsapo =",
"print(f'Total de sapos: {sapo}') print(f'Percentual de coelhos: {porccoelho:.2f} %') print(f'Percentual",
"de ratos: {rato}') print(f'Total de sapos: {sapo}') print(f'Percentual de coelhos:",
"de coelhos: {porccoelho:.2f} %') print(f'Percentual de ratos: {porcrato:.2f} %') print(f'Percentual",
"= int(q) if 1 <= q <= 15: contador +=",
"rato = sapo = contador = 0 for i in",
"de coelhos: {coelho}') print(f'Total de ratos: {rato}') print(f'Total de sapos:",
"q, t = input().split(' ') t = t.upper() q =",
"1 <= q <= 15: contador += q if t",
"= t.upper() q = int(q) if 1 <= q <=",
"input().split(' ') t = t.upper() q = int(q) if 1",
"= contador = 0 for i in range(0, n): q,",
"+= q porccoelho = (coelho * 100) / contador porcrato",
"* 100) / contador print(f'Total: {contador} cobaias') print(f'Total de coelhos:",
"(coelho * 100) / contador porcrato = (rato * 100)",
"= (sapo * 100) / contador print(f'Total: {contador} cobaias') print(f'Total",
"ratos: {rato}') print(f'Total de sapos: {sapo}') print(f'Percentual de coelhos: {porccoelho:.2f}",
"cobaias') print(f'Total de coelhos: {coelho}') print(f'Total de ratos: {rato}') print(f'Total",
"int(input()) coelho = rato = sapo = contador = 0",
"q if t == 'C': coelho += q elif t",
"0 for i in range(0, n): q, t = input().split('",
"(sapo * 100) / contador print(f'Total: {contador} cobaias') print(f'Total de",
"n): q, t = input().split(' ') t = t.upper() q",
"+= q elif t == 'R': rato += q elif",
"coelhos: {coelho}') print(f'Total de ratos: {rato}') print(f'Total de sapos: {sapo}')",
"porccoelho = (coelho * 100) / contador porcrato = (rato",
"/ contador porcsapo = (sapo * 100) / contador print(f'Total:",
"q elif t == 'R': rato += q elif t",
"if 1 <= q <= 15: contador += q if",
"print(f'Percentual de coelhos: {porccoelho:.2f} %') print(f'Percentual de ratos: {porcrato:.2f} %')",
"q = int(q) if 1 <= q <= 15: contador",
"contador += q if t == 'C': coelho += q",
"contador print(f'Total: {contador} cobaias') print(f'Total de coelhos: {coelho}') print(f'Total de",
"i in range(0, n): q, t = input().split(' ') t",
"in range(0, n): q, t = input().split(' ') t =",
"{rato}') print(f'Total de sapos: {sapo}') print(f'Percentual de coelhos: {porccoelho:.2f} %')",
"') t = t.upper() q = int(q) if 1 <=",
"coelhos: {porccoelho:.2f} %') print(f'Percentual de ratos: {porcrato:.2f} %') print(f'Percentual de",
"{contador} cobaias') print(f'Total de coelhos: {coelho}') print(f'Total de ratos: {rato}')",
"t = input().split(' ') t = t.upper() q = int(q)",
"elif t == 'R': rato += q elif t ==",
"{coelho}') print(f'Total de ratos: {rato}') print(f'Total de sapos: {sapo}') print(f'Percentual",
"(rato * 100) / contador porcsapo = (sapo * 100)",
"t == 'S': sapo += q porccoelho = (coelho *",
"* 100) / contador porcsapo = (sapo * 100) /",
"range(0, n): q, t = input().split(' ') t = t.upper()",
"porcrato = (rato * 100) / contador porcsapo = (sapo",
"= int(input()) coelho = rato = sapo = contador =",
"= rato = sapo = contador = 0 for i",
"coelho += q elif t == 'R': rato += q",
"'C': coelho += q elif t == 'R': rato +=",
"contador porcsapo = (sapo * 100) / contador print(f'Total: {contador}",
"sapos: {sapo}') print(f'Percentual de coelhos: {porccoelho:.2f} %') print(f'Percentual de ratos:",
"if t == 'C': coelho += q elif t ==",
"== 'S': sapo += q porccoelho = (coelho * 100)",
"== 'C': coelho += q elif t == 'R': rato",
"n = int(input()) coelho = rato = sapo = contador",
"print(f'Total de coelhos: {coelho}') print(f'Total de ratos: {rato}') print(f'Total de",
"* 100) / contador porcrato = (rato * 100) /",
"'S': sapo += q porccoelho = (coelho * 100) /",
"t.upper() q = int(q) if 1 <= q <= 15:",
"+= q elif t == 'S': sapo += q porccoelho",
"'R': rato += q elif t == 'S': sapo +=",
"coelho = rato = sapo = contador = 0 for",
"sapo = contador = 0 for i in range(0, n):",
"q elif t == 'S': sapo += q porccoelho =",
"== 'R': rato += q elif t == 'S': sapo",
"print(f'Percentual de ratos: {porcrato:.2f} %') print(f'Percentual de sapos: {porcsapo:.2f} %')",
"t = t.upper() q = int(q) if 1 <= q",
"100) / contador print(f'Total: {contador} cobaias') print(f'Total de coelhos: {coelho}')",
"{porccoelho:.2f} %') print(f'Percentual de ratos: {porcrato:.2f} %') print(f'Percentual de sapos:",
"print(f'Total de ratos: {rato}') print(f'Total de sapos: {sapo}') print(f'Percentual de",
"print(f'Total: {contador} cobaias') print(f'Total de coelhos: {coelho}') print(f'Total de ratos:",
"= sapo = contador = 0 for i in range(0,",
"de sapos: {sapo}') print(f'Percentual de coelhos: {porccoelho:.2f} %') print(f'Percentual de",
"100) / contador porcsapo = (sapo * 100) / contador",
"q <= 15: contador += q if t == 'C':",
"= 0 for i in range(0, n): q, t ="
] |
[
"import LabelBox # NOQA from gravur.utils import load_widget @load_widget class",
"import BoxLayout from gravur.common.labelbox import LabelBox # NOQA from gravur.utils",
"<gh_stars>1-10 # coding: utf-8 # Copyright (c) 2015 <NAME> <<EMAIL>>",
"file) from kivy.uix.boxlayout import BoxLayout from gravur.common.labelbox import LabelBox #",
"License: MIT (see LICENSE file) from kivy.uix.boxlayout import BoxLayout from",
"# Copyright (c) 2015 <NAME> <<EMAIL>> # License: MIT (see",
"LICENSE file) from kivy.uix.boxlayout import BoxLayout from gravur.common.labelbox import LabelBox",
"Copyright (c) 2015 <NAME> <<EMAIL>> # License: MIT (see LICENSE",
"utf-8 # Copyright (c) 2015 <NAME> <<EMAIL>> # License: MIT",
"<<EMAIL>> # License: MIT (see LICENSE file) from kivy.uix.boxlayout import",
"MIT (see LICENSE file) from kivy.uix.boxlayout import BoxLayout from gravur.common.labelbox",
"(c) 2015 <NAME> <<EMAIL>> # License: MIT (see LICENSE file)",
"from kivy.uix.boxlayout import BoxLayout from gravur.common.labelbox import LabelBox # NOQA",
"gravur.common.labelbox import LabelBox # NOQA from gravur.utils import load_widget @load_widget",
"(see LICENSE file) from kivy.uix.boxlayout import BoxLayout from gravur.common.labelbox import",
"kivy.uix.boxlayout import BoxLayout from gravur.common.labelbox import LabelBox # NOQA from",
"<NAME> <<EMAIL>> # License: MIT (see LICENSE file) from kivy.uix.boxlayout",
"# NOQA from gravur.utils import load_widget @load_widget class AmountInput(BoxLayout): pass",
"2015 <NAME> <<EMAIL>> # License: MIT (see LICENSE file) from",
"BoxLayout from gravur.common.labelbox import LabelBox # NOQA from gravur.utils import",
"# License: MIT (see LICENSE file) from kivy.uix.boxlayout import BoxLayout",
"coding: utf-8 # Copyright (c) 2015 <NAME> <<EMAIL>> # License:",
"# coding: utf-8 # Copyright (c) 2015 <NAME> <<EMAIL>> #",
"from gravur.common.labelbox import LabelBox # NOQA from gravur.utils import load_widget",
"LabelBox # NOQA from gravur.utils import load_widget @load_widget class AmountInput(BoxLayout):"
] |
[
"n_commands > max_n_commands: all_cmd_lengths = [len(cmd.split()[0]) for cmd in all_commands]",
"elif first_two_words in self.exclude_list: return True elif first_three_words in self.exclude_list:",
"format for timestamp timestamp_format = '%Y-%m-%d-%H:%M:%S' # These commands are",
"= max_cmd_length - 1 while(n_commands > max_n_commands and collapsed_len >",
"True continue if not line or line.isspace(): is_in_table = False",
"first timestamp in the top commands will be used. :param",
"n_letters): \"\"\"returns first n letters of a string e.g. first_characters('abc",
"and mems which can be filled through method :func: digest.",
"if first_word in self.exclude_list: return True elif first_two_words in self.exclude_list:",
"The commands are by default sorted alphabetically, but optionally can",
"and collapsed_len > 1): reduced_commands = set() for cmd in",
"first_words(string, n_words): \"\"\"returns first n words of a string e.g.",
"self.cpus[command][timestamp_ind] += process.cpu self.mems[command][timestamp_ind] += process.mem timestamp_ind += 1 #",
"float(prinfo_as_list[8]) self.mem = float(prinfo_as_list[9]) self.command = ' '.join(prinfo_as_list[11:]) def as_dict(self):",
"Swap: 0 total, 0 free, 0 used. 10834606+avail Mem PID",
"# decide the number of words from the beginning of",
"all_commands] # number of characters of the first word max_cmd_length",
"last_minute = 5 with open(csv_file, 'w') as fo: # header",
"\"\"\"sort self.commands by total cpu (default) or mem in reverse",
"else: return -round((dt_start - dt).seconds / 60) def timestamps_as_minutes(self, timestamp_start):",
"starts with minute 0. Time points with no top records",
"= len(self.processes) timestamp_ind = 0 for timestamp in sorted(self.processes): #",
"prefixes. :sort_by: alphabetical|cpu|mem The commands are by default sorted alphabetically,",
"mems from processes, matching collapsed commands. self.nTimepoints = len(self.processes) timestamp_ind",
"False for line in contents.splitlines(): if line.startswith('Timestamp:'): timestamp = line.split()[1]",
"available (empty object) timestamps_as_minutes = range(0, 5) last_minute = 5",
"get incorrect column counts when creating the metrics report. fo.write(delimiter.join([colname_for_timestamps]",
"5.59, 5.28, 5.76 Tasks: 7 total, 1 running, 6 sleeping,",
"timestamp_end=None, base=0): \"\"\"write metrics as csv file with commands as",
"same format (e.g. 01:23:45) \"\"\" return [self.as_minutes(t, timestamp_start) for t",
"``tibanna log -j <job_id> -t`` or through API ``API().log(job_id=<job_id>, top=True)``.",
"earlier than timestamp_start for i in range(0, len(timestamps_as_minutes)): if timestamps_as_minutes[i]",
"%Cpu(s): 6.6 us, 0.1 sy, 0.0 ni, 93.2 id, 0.0",
"fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in self.commands])) i += 1",
"0 total, 0 free, 0 used. 10834606+avail Mem PID USER",
"users, load average: 5.59, 5.28, 5.76 Tasks: 7 total, 1",
"sleeping, 0 stopped, 0 zombie %Cpu(s): 6.6 us, 0.1 sy,",
"an extra timestamp is added before each top command. To",
"len(x), reverse=True)[0] return longest_prefix def total_cpu_per_command(self, command): return sum([v for",
"7 total, 1 running, 6 sleeping, 0 stopped, 0 zombie",
"running, 6 sleeping, 0 stopped, 0 zombie %Cpu(s): 6.6 us,",
"decide the number of words from the beginning of the",
"16 unique commands, they will be collapsed into prefixes. ::",
"initialization works at any time interval and can be used",
"object. This will create processes attribute, which is a raw",
"work. exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron', 'docker-untar', 'containerd',",
"can be obtained through ``tibanna log -j <job_id> -t`` or",
"def convert_command_to_collapsed_command(cmd, collapsed_commands): if collapsed_commands == 'all_commands': # collapsed to",
"excluded. \"\"\" first_word = Top.first_words(process.command, 1) first_two_words = Top.first_words(process.command, 2)",
"20 0 40676 3828 3144 R 6.2 0.0 0:00.01 top",
"attributes timestamps, commands, cpus and mems which can be filled",
"dict() # First fill in commands from commands in processes",
"and commands (columns), use :func: write_to_csv. :: top.write_to_csv(...) \"\"\" #",
"if by == 'cpu': self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x),",
"if 1, minutes are 1-based (shifted by 1). \"\"\" metric_array",
"If not specified, the last timestamp in the top commands",
"specified, the last timestamp in the top commands will be",
"round((dt - dt_start).seconds / 60) else: return -round((dt_start - dt).seconds",
"self.commands])) fo.write('\\n') # contents # skip timepoints earlier than timestamp_start",
"last_minute + 1): clock_shifted = clock + base if i",
"The report starts with minute 0. Time points with no",
"use :func: write_to_csv. :: top.write_to_csv(...) \"\"\" # assume this format",
"mems attributes from processes attribute. :param max_n_commands: When the number",
"alphabetical|cpu|mem The commands are by default sorted alphabetically, but optionally",
"stamps. :: top = Top(top_output_content) To reorganize the contents by",
"run it twice self.timestamps = [] self.commands = [] self.cpus",
"1): reduced_commands = set() for cmd in all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len))",
"return ' '.join(words[0:min(n_words, len(words))]) @staticmethod def first_characters(string, n_letters): \"\"\"returns first",
"time in the same timestamp format (e.g. 01:23:45), The reports",
"commands, cpus and mems attributes from processes attribute. :param max_n_commands:",
"(default) or mem in reverse order, or alphabetically (by='alphabetical')\"\"\" if",
"= True continue if not line or line.isspace(): is_in_table =",
"specified, the first timestamp in the top commands will be",
"default sorted alphabetically, but optionally can be sorted by total",
"metric='cpu', delimiter=',', colname_for_timestamps='timepoints', timestamp_start=None, timestamp_end=None, base=0): \"\"\"write metrics as csv",
"len(reduced_commands) collapsed_len -= 1 if n_commands > max_n_commands: return ['all_commands']",
"is within max_n_commands. First decide the number of words from",
"'containerd-shim', '/usr/bin/python3 /bin/unattended-upgrade', '/usr/bin/python3 /usr/local/bin/awsf3', '/usr/bin/python3 /usr/local/bin/aws s3', 'java -jar",
"8.223g 19572 S 100.0 6.6 125:55.12 java -Xmx32g -Xms32g -jar",
"time stamps. :: top = Top(top_output_content) To reorganize the contents",
"in list so that we can remove elements in the",
"set() for timestamp in self.processes: all_commands.update(set([pr.command for pr in self.processes[timestamp]]))",
"RES SHR S %CPU %MEM TIME+ COMMAND 712 root 20",
"we # will get incorrect column counts when creating the",
"in self.timestamps] @classmethod def as_datetime(cls, timestamp): return datetime.datetime.strptime(timestamp, cls.timestamp_format) @staticmethod",
"cron jobs. (some can be skipped but there should be",
"haha -> \"haha\") \"\"\" return '\\\"' + string + '\\\"'",
"and a list of Process class objects as a value.",
"be filled through method :func: digest. \"\"\" self.processes = dict()",
"\"\"\" # Reinitializat these so that you get the same",
"these so that you get the same results if you",
"= Process(line) if not self.should_skip_process(process): self.processes[timestamp].append(Process(line)) def digest(self, max_n_commands=16, sort_by='alphabetical'):",
"x: self.total_cpu_per_command(x), reverse=True) elif by == 'mem': self.commands = sorted(self.commands,",
"(rows) and commands (columns), use :func: write_to_csv. :: top.write_to_csv(...) \"\"\"",
"word is not sufficient, go down to the characters of",
"the return value. \"\"\" dt = cls.as_datetime(timestamp) dt_start = cls.as_datetime(timestamp_start)",
"top commands will be used. :param base: default 0. If",
"filled with 0. If not specified, the first timestamp in",
"= Top.first_words(process.command, 3) if first_word in self.exclude_list: return True elif",
"= prinfo_as_list[1] self.cpu = float(prinfo_as_list[8]) self.mem = float(prinfo_as_list[9]) self.command =",
"timestamp: given timestamp in the same format (e.g. 01:23:45) :param",
"input process should be skipped. e.g. the top command itself",
"1 per minute). This top output can be obtained through",
"given string with double quotes (e.g. haha -> \"haha\") \"\"\"",
"prefixes. :: top.digest() To write a csv / tsv file",
"return sum([v for v in self.mems[command]]) def sort_commands(self, by='cpu'): \"\"\"sort",
"cmd in self.commands])) # add 0 for timepoints not reported",
"reduced for r_cmd in list(reduced_commands): # wrap in list so",
"stamps will be converted to minutes since start time. The",
"= self.timestamps[0] if not timestamp_end: timestamp_end = self.timestamps[-1] timestamps_as_minutes =",
"days, 2:37, 0 users, load average: 5.59, 5.28, 5.76 Tasks:",
"# will get incorrect column counts when creating the metrics",
"commands starting with the same words all_cmd_lengths = [len(cmd.split()) for",
"timepoints earlier than timestamp_start for i in range(0, len(timestamps_as_minutes)): if",
"return a collapsed set of commands that consists of prefixes",
"processes over time. An example input content looks like below,",
"API ``API().log(job_id=<job_id>, top=True)``. :: Timestamp: 2020-12-18-18:55:37 top - 18:55:37 up",
"common for bioinformatics pipelines. So, an extra timestamp is added",
"not specified, the last timestamp in the top commands will",
"dict() self.parse_contents(contents) def parse_contents(self, contents): is_in_table = False for line",
"them into a single command ('all_commands') After the collapse, commands",
"-w 10000 The default timestamp from top output does not",
"process = Process(line) if not self.should_skip_process(process): self.processes[timestamp].append(Process(line)) def digest(self, max_n_commands=16,",
"commands (columns), use :func: write_to_csv. :: top.write_to_csv(...) \"\"\" # assume",
"cmd.startswith(r_cmd)] if len(uniq_cmds) == 1: reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0]) return reduced_commands def",
"if is_in_table: if timestamp not in self.processes: self.processes[timestamp] = []",
"find a matching collapsed command (i.e. command prefix) and use",
"len(timestamps_as_minutes)): if timestamps_as_minutes[i] >= 0: break for clock in range(0,",
":: top = Top(top_output_content) To reorganize the contents by commands,",
"False @staticmethod def convert_command_to_collapsed_command(cmd, collapsed_commands): if collapsed_commands == 'all_commands': #",
"'Timestamp: '; date +%F-%H:%M:%S top -b -n1 [-i] [-c] over",
"-n1 [-i] [-c] over short intervals to monitor the same",
"for timepoints not reported fo.write('\\n') def should_skip_process(self, process): \"\"\"A predicate",
"cmd, before wrapping it in double quotes. Otherwise we #",
"a regular top commands above run at about 1-minute intervals,",
"matching collapsed command (i.e. command prefix) and use that as",
"= float(prinfo_as_list[8]) self.mem = float(prinfo_as_list[9]) self.command = ' '.join(prinfo_as_list[11:]) def",
"line.lstrip().startswith('PID'): is_in_table = True continue if not line or line.isspace():",
"in all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len)) n_commands = len(reduced_commands) collapsed_len -= 1",
"reduced_commands def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints', timestamp_start=None, timestamp_end=None, base=0):",
"list(reduced_commands): # wrap in list so that we can remove",
"1000 -d -F out.hic 17919 ubuntu 20 0 40676 3828",
"-Xms32g -jar juicer_tools.jar addNorm -w 1000 -d -F out.hic 17919",
"in self.exclude_list: return True elif first_three_words in self.exclude_list: return True",
"/usr/local/bin/cromwell-35.jar'] def __init__(self, contents): \"\"\"initialization parsed top output content and",
"convert_command_to_collapsed_command(cmd, collapsed_commands): if collapsed_commands == 'all_commands': # collapsed to one",
"timestamp from top output does not contain dates, which can",
"self.processes[timestamp] = [] process = Process(line) if not self.should_skip_process(process): self.processes[timestamp].append(Process(line))",
"self.timestamps_as_minutes(timestamp_start) last_minute = self.as_minutes(timestamp_end, timestamp_start) else: # default when timestamps",
"the max number of commands is 16, and if there",
"= max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len = max_cmd_length - 1",
"'/usr/bin/python3 /usr/local/bin/aws s3', 'java -jar /usr/local/bin/cromwell.jar', 'java -jar /usr/local/bin/cromwell-35.jar'] def",
"present in the cmd, before wrapping it in double quotes.",
"first_word in self.exclude_list: return True elif first_two_words in self.exclude_list: return",
"@classmethod def as_datetime(cls, timestamp): return datetime.datetime.strptime(timestamp, cls.timestamp_format) @staticmethod def wrap_in_double_quotes(string):",
"if line.startswith('Timestamp:'): timestamp = line.split()[1] continue if line.lstrip().startswith('PID'): is_in_table =",
"escape any double quotes that are present in the cmd,",
"timestamp_start) else: # default when timestamps is not available (empty",
"self.cpus[command]]) def total_mem_per_command(self, command): return sum([v for v in self.mems[command]])",
":param timestamp_start: timestamp in the same format (e.g. 01:23:45) \"\"\"",
"will be filled with 0. If not specified, the last",
"threads (single-word commands wrapped in bracket (e.g. [perl]) are also",
"unique commands, they will be collapsed into prefixes. :: top.digest()",
"in the top commands will be used. :param base: default",
"command max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len = max_cmd_length",
"\"\"\" words = string.split() return ' '.join(words[0:min(n_words, len(words))]) @staticmethod def",
"and first_word.endswith(']'): return True return False @staticmethod def convert_command_to_collapsed_command(cmd, collapsed_commands):",
"the total number is within max_n_commands. First decide the number",
"the loop uniq_cmds = [cmd for cmd in all_commands if",
"is not available (empty object) timestamps_as_minutes = range(0, 5) last_minute",
"return True if first_word.startswith('[') and first_word.endswith(']'): return True return False",
"if needed.) self.commands = self.get_collapsed_commands(max_n_commands) # Fill in timestamps, cpus",
"0 used. 10834606+avail Mem PID USER PR NI VIRT RES",
"in the same format (e.g. 01:23:45) \"\"\" return [self.as_minutes(t, timestamp_start)",
"'docker', 'dockerd', '/usr/bin/dockerd', 'cron', 'docker-untar', 'containerd', 'goofys-latest', 'cwltool', '/usr/bin/containerd-shim-runc-v2', 'goofys',",
"output content, simply create an object. This will create processes",
"there should be no more than 1 per minute). This",
"all_commands.update(set([pr.command for pr in self.processes[timestamp]])) if len(all_commands) <= max_n_commands: #",
"',' :param colname_for_timestamps: colunm name for the timepoint column (1st",
"self.timestamps] @classmethod def as_datetime(cls, timestamp): return datetime.datetime.strptime(timestamp, cls.timestamp_format) @staticmethod def",
"or first two or three words only. Kernel threads (single-word",
"==> 'abc def' \"\"\" words = string.split() return ' '.join(words[0:min(n_words,",
"not self.should_skip_process(process): self.processes[timestamp].append(Process(line)) def digest(self, max_n_commands=16, sort_by='alphabetical'): \"\"\"Fills in timestamps,",
"in range(0, last_minute + 1): clock_shifted = clock + base",
"content, simply create an object. This will create processes attribute,",
"unique to a collapsed prefix are extended back to the",
"timestamps (rows) and commands (columns), use :func: write_to_csv. :: top.write_to_csv(...)",
"= [] self.commands = [] self.cpus = dict() self.mems =",
"0.0 si, 0.0 st KiB Mem : 12971188+total, 10379019+free, 20613644",
"= min(all_cmd_lengths) collapsed_len = max_cmd_length - 1 n_commands = len(all_commands)",
"if self.timestamps: if not timestamp_start: timestamp_start = self.timestamps[0] if not",
"<job_id> -t`` or through API ``API().log(job_id=<job_id>, top=True)``. :: Timestamp: 2020-12-18-18:55:37",
"for the output of a regular top commands above run",
"or through API ``API().log(job_id=<job_id>, top=True)``. :: Timestamp: 2020-12-18-18:55:37 top -",
"ubuntu 20 0 40676 3828 3144 R 6.2 0.0 0:00.01",
"instance through cron jobs. (some can be skipped but there",
"= self.get_collapsed_commands(max_n_commands) # Fill in timestamps, cpus and mems from",
"5.76 Tasks: 7 total, 1 running, 6 sleeping, 0 stopped,",
"\"\"\"If the number of commands exceeds max_n_commands, return a collapsed",
"0.0 wa, 0.0 hi, 0.0 si, 0.0 st KiB Mem",
"commands. self.nTimepoints = len(self.processes) timestamp_ind = 0 for timestamp in",
"+ 's') if self.timestamps: if not timestamp_start: timestamp_start = self.timestamps[0]",
"for cmd in self.commands])) fo.write('\\n') # contents # skip timepoints",
"quotes. Otherwise we # will get incorrect column counts when",
"max_n_commands, return a collapsed set of commands that consists of",
"Time points with no top records will be filled with",
"excluded is in self.exclude_list. It compares either first word or",
"n_words): \"\"\"returns first n words of a string e.g. first_words('abc",
"first_word = Top.first_words(process.command, 1) first_two_words = Top.first_words(process.command, 2) first_three_words =",
"in self.processes: all_commands.update(set([pr.command for pr in self.processes[timestamp]])) if len(all_commands) <=",
"dt_start).seconds / 60) else: return -round((dt_start - dt).seconds / 60)",
"self.should_skip_process(process): self.processes[timestamp].append(Process(line)) def digest(self, max_n_commands=16, sort_by='alphabetical'): \"\"\"Fills in timestamps, commands,",
"collapsed_len -= 1 # went down to the first words",
"True elif first_two_words in self.exclude_list: return True elif first_three_words in",
"+%F-%H:%M:%S top -b -n1 [-i] [-c] over short intervals to",
"AWSEM instance through cron jobs. (some can be skipped but",
"0 zombie %Cpu(s): 6.6 us, 0.1 sy, 0.0 ni, 93.2",
"sufficient, go down to the characters of the first word.",
"number is within max_n_commands. First decide the number of words",
"ghi', 2) ==> 'abc def' \"\"\" words = string.split() return",
"class Process(object): def __init__(self, top_line): prinfo_as_list = top_line.lstrip().split() self.pid =",
"3828 3144 R 6.2 0.0 0:00.01 top -b -n1 -c",
"is excluded, as well as docker, awsf3, cwltool, etc. the",
"in contents.splitlines(): if line.startswith('Timestamp:'): timestamp = line.split()[1] continue if line.lstrip().startswith('PID'):",
"to minutes since start time. The report starts with minute",
"example input content looks like below, or a series of",
"process in self.processes[timestamp]: # find a matching collapsed command (i.e.",
"To reorganize the contents by commands, run digest. By default,",
"in self.commands])) # add 0 for timepoints not reported fo.write('\\n')",
"True elif first_three_words in self.exclude_list: return True if first_word.startswith('[') and",
"i.e. find the maximum number of words that makes the",
"same words, i.e. find the maximum number of words that",
"01:20:45) In the above example, 3 will be the return",
"'abc def' \"\"\" words = string.split() return ' '.join(words[0:min(n_words, len(words))])",
"' '.join(words[0:min(n_words, len(words))]) @staticmethod def first_characters(string, n_letters): \"\"\"returns first n",
"by total cpus or total mem (in reverser order) (e.g.",
"sort_commands(self, by='cpu'): \"\"\"sort self.commands by total cpu (default) or mem",
"information of a series of top commands :: echo -n",
"through ``tibanna log -j <job_id> -t`` or through API ``API().log(job_id=<job_id>,",
"in all_commands] # number of characters of the first word",
"report starts with minute 0. Time points with no top",
"dt_start: return round((dt - dt_start).seconds / 60) else: return -round((dt_start",
"-t`` or through API ``API().log(job_id=<job_id>, top=True)``. :: Timestamp: 2020-12-18-18:55:37 top",
"of Process class objects as a value. It also creates",
"sufficient, collapse all of them into a single command ('all_commands')",
"timestamp in sorted(self.processes): # sorted timestamps (columns) self.timestamps.append(timestamp) # commands",
"'/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim', '/usr/bin/python3 /bin/unattended-upgrade', '/usr/bin/python3 /usr/local/bin/awsf3', '/usr/bin/python3 /usr/local/bin/aws s3',",
"To parse top output content, simply create an object. This",
"if not timestamp_end: timestamp_end = self.timestamps[-1] timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start) last_minute",
"keys and a list of Process class objects as a",
"Process class objects as a value. It also creates empty",
"6.2 0.0 0:00.01 top -b -n1 -c -i -w 10000",
"= ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron', 'docker-untar', 'containerd', 'goofys-latest', 'cwltool',",
"0.0 0:00.01 top -b -n1 -c -i -w 10000 The",
"or 'mem' :param delimiter: default ',' :param colname_for_timestamps: colunm name",
"if timestamps_as_minutes[i] >= 0: break for clock in range(0, last_minute",
"0.1 sy, 0.0 ni, 93.2 id, 0.0 wa, 0.0 hi,",
"(empty object) timestamps_as_minutes = range(0, 5) last_minute = 5 with",
"This top output can be obtained through ``tibanna log -j",
"will be used. :param base: default 0. If 0, minutes",
"of words from the beginning of the commands # to",
"output of a regular top commands above run at about",
"predicate function to check if the process should be skipped",
"COMMAND 712 root 20 0 36.464g 8.223g 19572 S 100.0",
"in the same format (e.g. 01:23:45) :param timestamp_start: start timestamp",
"the commands to collapse commands that start with the same",
"'; date +%F-%H:%M:%S top -b -n1 [-i] [-c] over short",
"by both timestamps (rows) and commands (columns), use :func: write_to_csv.",
"be collapsed into prefixes. :: top.digest() To write a csv",
"commands are excluded when parsing the top output # Currently",
"than timestamp_start for i in range(0, len(timestamps_as_minutes)): if timestamps_as_minutes[i] >=",
"It returns True if the input process should be skipped.",
"with 0. If not specified, the first timestamp in the",
"for clock in range(0, last_minute + 1): clock_shifted = clock",
"timestamp_start :param timestamp_start: timestamp in the same format (e.g. 01:23:45)",
"to be bounded by max_n_commands. If using only the first",
"there are more than 16 unique commands, they will be",
"fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"')) for cmd in self.commands])) fo.write('\\n') #",
"'cron', 'docker-untar', 'containerd', 'goofys-latest', 'cwltool', '/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval', '/usr/bin/python3",
"the same timestamp format (e.g. 01:23:45), The reports will be",
"in the loop uniq_cmds = [cmd for cmd in all_commands",
"the number of commands exceeds max_n_commands, return a collapsed set",
"cpu) \"\"\" # Reinitializat these so that you get the",
"longest_prefix = sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0] return longest_prefix def",
"self.commands = [] self.cpus = dict() self.mems = dict() self.parse_contents(contents)",
"self.commands) if command not in self.cpus: self.cpus[command] = [0] *",
"self.total_mem_per_command(x), reverse=True) elif by == 'alphabetical': self.commands = sorted(self.commands) @classmethod",
"Currently only 1-, 2- or 3-word prefixes work. exclude_list =",
"a value. It also creates empty attributes timestamps, commands, cpus",
"class, but the class is designed for the output of",
"= top_line.lstrip().split() self.pid = prinfo_as_list[0] self.user = prinfo_as_list[1] self.cpu =",
"of a string e.g. first_characters('abc def ghi', 2) ==> 'ab'",
"number of unique commands exceeds this value, they are collapsed",
"- 1 while(n_commands > max_n_commands and collapsed_len > 1): reduced_commands",
"= list(string) return ''.join(letters[0:min(n_letters, len(letters))]) def as_dict(self): return self.__dict__ class",
"or line.isspace(): is_in_table = False if is_in_table: if timestamp not",
"extend reduced commands that don't need to be reduced for",
"= [0] * self.nTimepoints self.mems[command] = [0] * self.nTimepoints self.cpus[command][timestamp_ind]",
"timestamp_start = self.timestamps[0] if not timestamp_end: timestamp_end = self.timestamps[-1] timestamps_as_minutes",
"first_characters('abc def ghi', 2) ==> 'ab' \"\"\" letters = list(string)",
"are present in the cmd, before wrapping it in double",
"5.28, 5.76 Tasks: 7 total, 1 running, 6 sleeping, 0",
"into unique prefixes. :sort_by: alphabetical|cpu|mem The commands are by default",
"in all_commands if cmd.startswith(r_cmd)] if len(uniq_cmds) == 1: reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0])",
"into prefixes. :: top.digest() To write a csv / tsv",
"def wrap_in_double_quotes(string): \"\"\"wrap a given string with double quotes (e.g.",
"up 4 days, 2:37, 0 users, load average: 5.59, 5.28,",
"= 0 for timestamp in sorted(self.processes): # sorted timestamps (columns)",
"self.processes = dict() self.timestamps = [] self.commands = [] self.cpus",
"dt_start = cls.as_datetime(timestamp_start) # negative numbers are not supported by",
"but optionally can be sorted by total cpus or total",
"java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w 1000 -d -F",
"if not timestamp_start: timestamp_start = self.timestamps[0] if not timestamp_end: timestamp_end",
"[0] * self.nTimepoints self.cpus[command][timestamp_ind] += process.cpu self.mems[command][timestamp_ind] += process.mem timestamp_ind",
"in self.mems[command]]) def sort_commands(self, by='cpu'): \"\"\"sort self.commands by total cpu",
"if not line or line.isspace(): is_in_table = False if is_in_table:",
"skipped (excluded). It returns True if the input process should",
"fill in commands from commands in processes (and collapse if",
"start time. The report starts with minute 0. Time points",
"beginning of the commands # to collapse commands starting with",
"to one command return 'all_commands' elif cmd in collapsed_commands: #",
"An example input content looks like below, or a series",
"of commands is 16, and if there are more than",
"collapsed set of commands that consists of prefixes of commands",
"and mems attributes from processes attribute. :param max_n_commands: When the",
"Top.first_words(process.command, 3) if first_word in self.exclude_list: return True elif first_two_words",
"columns :param metric: 'cpu' or 'mem' :param delimiter: default ','",
"quotes (e.g. haha -> \"haha\") \"\"\" return '\\\"' + string",
"0: break for clock in range(0, last_minute + 1): clock_shifted",
"is common for bioinformatics pipelines. So, an extra timestamp is",
"double quotes. Otherwise we # will get incorrect column counts",
"(and collapse if needed.) self.commands = self.get_collapsed_commands(max_n_commands) # Fill in",
"return True elif first_three_words in self.exclude_list: return True if first_word.startswith('[')",
"self.parse_contents(contents) def parse_contents(self, contents): is_in_table = False for line in",
"free, 0 used. 10834606+avail Mem PID USER PR NI VIRT",
"not sufficient, collapse all of them into a single command",
"colname_for_timestamps: colunm name for the timepoint column (1st column). default",
"self.__dict__ class Process(object): def __init__(self, top_line): prinfo_as_list = top_line.lstrip().split() self.pid",
"collapsed to one command return 'all_commands' elif cmd in collapsed_commands:",
"self.total_cpu_per_command(x), reverse=True) elif by == 'mem': self.commands = sorted(self.commands, key=lambda",
"since timestamp_start :param timestamp_start: timestamp in the same format (e.g.",
"matching collapsed commands. self.nTimepoints = len(self.processes) timestamp_ind = 0 for",
"len(all_commands) while(n_commands > max_n_commands and collapsed_len > 1): reduced_commands =",
"self.processes: all_commands.update(set([pr.command for pr in self.processes[timestamp]])) if len(all_commands) <= max_n_commands:",
"in self.cpus: self.cpus[command] = [0] * self.nTimepoints self.mems[command] = [0]",
"screw up multi-day processes which is common for bioinformatics pipelines.",
"or 3-word prefixes work. exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd',",
"max_n_commands: all_cmd_lengths = [len(cmd.split()[0]) for cmd in all_commands] # number",
"(shifted by 1). \"\"\" metric_array = getattr(self, metric + 's')",
"= [0] * self.nTimepoints self.cpus[command][timestamp_ind] += process.cpu self.mems[command][timestamp_ind] += process.mem",
"but there should be no more than 1 per minute).",
"as docker, awsf3, cwltool, etc. the list to be excluded",
"for v in self.cpus[command]]) def total_mem_per_command(self, command): return sum([v for",
"word or first two or three words only. Kernel threads",
"reduced_commands = set() for cmd in all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len)) n_commands",
"return -round((dt_start - dt).seconds / 60) def timestamps_as_minutes(self, timestamp_start): \"\"\"convert",
"i in range(0, len(timestamps_as_minutes)): if timestamps_as_minutes[i] >= 0: break for",
"len(self.processes) timestamp_ind = 0 for timestamp in sorted(self.processes): # sorted",
"as columns :param metric: 'cpu' or 'mem' :param delimiter: default",
"are collapsed into unique prefixes. :sort_by: alphabetical|cpu|mem The commands are",
"def first_words(string, n_words): \"\"\"returns first n words of a string",
"[] self.cpus = dict() self.mems = dict() self.parse_contents(contents) def parse_contents(self,",
"will be the return value. \"\"\" dt = cls.as_datetime(timestamp) dt_start",
"wrap in list so that we can remove elements in",
"first word. If that's still not sufficient, collapse all of",
"@staticmethod def wrap_in_double_quotes(string): \"\"\"wrap a given string with double quotes",
"any time interval and can be used as a generic",
"same format (e.g. 01:23:45) :param timestamp_start: start timestamp in the",
"self.processes[timestamp]])) if len(all_commands) <= max_n_commands: # no need to collapse",
"list(all_commands) # decide the number of words from the beginning",
"default ',' :param colname_for_timestamps: colunm name for the timepoint column",
"class objects as a value. It also creates empty attributes",
"timestamp in the same format (e.g. 01:20:45) In the above",
"method :func: digest. \"\"\" self.processes = dict() self.timestamps = []",
"= line.split()[1] continue if line.lstrip().startswith('PID'): is_in_table = True continue if",
"# wrap in list so that we can remove elements",
"to a list of minutes since timestamp_start :param timestamp_start: timestamp",
"collapsed_len > 1): reduced_commands = set() for cmd in all_commands:",
"elif by == 'mem': self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x),",
"It also creates empty attributes timestamps, commands, cpus and mems",
"return list(all_commands) # decide the number of words from the",
"and creates processes which is a dictionary with timestamps as",
"commands as columns :param metric: 'cpu' or 'mem' :param delimiter:",
"words of a string e.g. first_words('abc def ghi', 2) ==>",
"if dt > dt_start: return round((dt - dt_start).seconds / 60)",
"order) (e.g. the first command consumed the most cpu) \"\"\"",
"in commands from commands in processes (and collapse if needed.)",
"= [len(cmd.split()[0]) for cmd in all_commands] # number of characters",
"cls.as_datetime(timestamp_start) # negative numbers are not supported by timedelta, so",
"= self.timestamps_as_minutes(timestamp_start) last_minute = self.as_minutes(timestamp_end, timestamp_start) else: # default when",
"[len(cmd.split()[0]) for cmd in all_commands] # number of characters of",
"'java -jar /usr/local/bin/cromwell.jar', 'java -jar /usr/local/bin/cromwell-35.jar'] def __init__(self, contents): \"\"\"initialization",
"# Reinitializat these so that you get the same results",
"of commands that consists of prefixes of commands so that",
"SHR S %CPU %MEM TIME+ COMMAND 712 root 20 0",
"[_ for _ in collapsed_commands if cmd.startswith(_)] longest_prefix = sorted(all_prefixes,",
"if cmd.startswith(r_cmd)] if len(uniq_cmds) == 1: reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0]) return reduced_commands",
"word. If that's still not sufficient, collapse all of them",
"self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True) elif by ==",
"collapsed_len = max_cmd_length - 1 n_commands = len(all_commands) while(n_commands >",
"len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock: fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd",
"elif first_three_words in self.exclude_list: return True if first_word.startswith('[') and first_word.endswith(']'):",
"__init__(self, contents): \"\"\"initialization parsed top output content and creates processes",
"def timestamps_as_minutes(self, timestamp_start): \"\"\"convert self.timestamps to a list of minutes",
"in the same timestamp format (e.g. 01:23:45), time stamps will",
"dict() self.mems = dict() self.parse_contents(contents) def parse_contents(self, contents): is_in_table =",
"'dockerd', '/usr/bin/dockerd', 'cron', 'docker-untar', 'containerd', 'goofys-latest', 'cwltool', '/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs",
"of commands exceeds max_n_commands, return a collapsed set of commands",
"not sufficient, go down to the characters of the first",
"processes which is a dictionary with timestamps as keys and",
"top -b -n1 [-i] [-c] over short intervals to monitor",
"t in self.timestamps] @classmethod def as_datetime(cls, timestamp): return datetime.datetime.strptime(timestamp, cls.timestamp_format)",
"string + '\\\"' @staticmethod def first_words(string, n_words): \"\"\"returns first n",
"60) else: return -round((dt_start - dt).seconds / 60) def timestamps_as_minutes(self,",
"words all_cmd_lengths = [len(cmd.split()) for cmd in all_commands] # number",
"not timestamp_start: timestamp_start = self.timestamps[0] if not timestamp_end: timestamp_end =",
"self.processes[timestamp]: # find a matching collapsed command (i.e. command prefix)",
"used, 5308056 buff/cache KiB Swap: 0 total, 0 free, 0",
"and can be used as a generic class, but the",
"40676 3828 3144 R 6.2 0.0 0:00.01 top -b -n1",
"n words of a string e.g. first_words('abc def ghi', 2)",
"through cron jobs. (some can be skipped but there should",
"True if first_word.startswith('[') and first_word.endswith(']'): return True return False @staticmethod",
"be generated only up to the end time. Time points",
"splitting characters then if n_commands > max_n_commands: all_cmd_lengths = [len(cmd.split()[0])",
"'mem' :param delimiter: default ',' :param colname_for_timestamps: colunm name for",
"exceeds max_n_commands, return a collapsed set of commands that consists",
"total_cpu_per_command(self, command): return sum([v for v in self.cpus[command]]) def total_mem_per_command(self,",
"are more than 16 unique commands, they will be collapsed",
"that as command. command = Top.convert_command_to_collapsed_command(process.command, self.commands) if command not",
"collapse if needed.) self.commands = self.get_collapsed_commands(max_n_commands) # Fill in timestamps,",
"a string e.g. first_characters('abc def ghi', 2) ==> 'ab' \"\"\"",
"top command. To parse top output content, simply create an",
"'\"\"')) for cmd in self.commands])) fo.write('\\n') # contents # skip",
"generated only up to the end time. Time points with",
"def total_cpu_per_command(self, command): return sum([v for v in self.cpus[command]]) def",
"id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st KiB",
"# collapsed to one command return 'all_commands' elif cmd in",
"/ 60) else: return -round((dt_start - dt).seconds / 60) def",
"of words from the beginning of the commands to collapse",
"Mem PID USER PR NI VIRT RES SHR S %CPU",
"for timestamp in sorted(self.processes): # sorted timestamps (columns) self.timestamps.append(timestamp) #",
"all_cmd_lengths = [len(cmd.split()) for cmd in all_commands] # number of",
"by total cpu (default) or mem in reverse order, or",
"sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0] return longest_prefix def total_cpu_per_command(self, command):",
"this value, they are collapsed into unique prefixes. :sort_by: alphabetical|cpu|mem",
"to collapse return list(all_commands) # decide the number of words",
"If using only the first word is not sufficient, go",
"0.0 hi, 0.0 si, 0.0 st KiB Mem : 12971188+total,",
"def' \"\"\" words = string.split() return ' '.join(words[0:min(n_words, len(words))]) @staticmethod",
"when parsing the top output # Currently only 1-, 2-",
"that are unique to a collapsed prefix are extended back",
"from processes attribute. :param max_n_commands: When the number of unique",
"that consists of prefixes of commands so that the total",
"self.cpus[command] = [0] * self.nTimepoints self.mems[command] = [0] * self.nTimepoints",
"in all_commands] # number of words per command max_cmd_length =",
":param colname_for_timestamps: colunm name for the timepoint column (1st column).",
"mems which can be filled through method :func: digest. \"\"\"",
"return datetime.datetime.strptime(timestamp, cls.timestamp_format) @staticmethod def wrap_in_double_quotes(string): \"\"\"wrap a given string",
"'docker-untar', 'containerd', 'goofys-latest', 'cwltool', '/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval', '/usr/bin/python3 /usr/local/bin/cwltool',",
"the collapse, commands that are unique to a collapsed prefix",
"format (e.g. 01:23:45), The reports will be generated only up",
"filled with 0. If not specified, the last timestamp in",
"average: 5.59, 5.28, 5.76 Tasks: 7 total, 1 running, 6",
"cmd.startswith(_)] longest_prefix = sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0] return longest_prefix",
"fo.write('\\n') # contents # skip timepoints earlier than timestamp_start for",
"buff/cache KiB Swap: 0 total, 0 free, 0 used. 10834606+avail",
"commands from commands in processes (and collapse if needed.) self.commands",
"not supported by timedelta, so do each case separately if",
"cpu self.sort_commands(by=sort_by) def get_collapsed_commands(self, max_n_commands): \"\"\"If the number of commands",
"given timestamp in the same format (e.g. 01:23:45) :param timestamp_start:",
"metric: 'cpu' or 'mem' :param delimiter: default ',' :param colname_for_timestamps:",
"with commands as columns :param metric: 'cpu' or 'mem' :param",
"clock + base if i < len(timestamps_as_minutes) and timestamps_as_minutes[i] ==",
"of a series of top commands :: echo -n 'Timestamp:",
"# sorted timestamps (columns) self.timestamps.append(timestamp) # commands (rows) for process",
"reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0]) return reduced_commands def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints',",
"* self.nTimepoints self.mems[command] = [0] * self.nTimepoints self.cpus[command][timestamp_ind] += process.cpu",
"in double quotes. Otherwise we # will get incorrect column",
"characters of the first word max_cmd_length = max(all_cmd_lengths) min_cmd_length =",
"csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints', timestamp_start=None, timestamp_end=None, base=0): \"\"\"write metrics as",
"or total mem (in reverser order) (e.g. the first command",
"and use that as command. command = Top.convert_command_to_collapsed_command(process.command, self.commands) if",
"quotes that are present in the cmd, before wrapping it",
"-c -i -w 10000 The default timestamp from top output",
"2) first_three_words = Top.first_words(process.command, 3) if first_word in self.exclude_list: return",
"To write a csv / tsv file organized by both",
"consists of prefixes of commands so that the total number",
"key=lambda x: self.total_cpu_per_command(x), reverse=True) elif by == 'mem': self.commands =",
"as command. command = Top.convert_command_to_collapsed_command(process.command, self.commands) if command not in",
"or three words only. Kernel threads (single-word commands wrapped in",
"two or three words only. Kernel threads (single-word commands wrapped",
"parsed result organized by time stamps. :: top = Top(top_output_content)",
"== 'alphabetical': self.commands = sorted(self.commands) @classmethod def as_minutes(cls, timestamp, timestamp_start):",
"last_minute = self.as_minutes(timestamp_end, timestamp_start) else: # default when timestamps is",
"words that makes the number of unique commands to be",
"e.g. the top command itself is excluded, as well as",
"reverse=True)[0] return longest_prefix def total_cpu_per_command(self, command): return sum([v for v",
"timestamp_start=None, timestamp_end=None, base=0): \"\"\"write metrics as csv file with commands",
"double quotes that are present in the cmd, before wrapping",
"needed.) self.commands = self.get_collapsed_commands(max_n_commands) # Fill in timestamps, cpus and",
"cpu (default) or mem in reverse order, or alphabetically (by='alphabetical')\"\"\"",
"extended back to the original command. \"\"\" all_commands = set()",
"= set() for timestamp in self.processes: all_commands.update(set([pr.command for pr in",
"words only. Kernel threads (single-word commands wrapped in bracket (e.g.",
"[perl]) are also excluded. \"\"\" first_word = Top.first_words(process.command, 1) first_two_words",
"self.mem = float(prinfo_as_list[9]) self.command = ' '.join(prinfo_as_list[11:]) def as_dict(self): return",
"[0] * self.nTimepoints self.mems[command] = [0] * self.nTimepoints self.cpus[command][timestamp_ind] +=",
"report. fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"')) for cmd in self.commands])) fo.write('\\n')",
"= dict() self.parse_contents(contents) def parse_contents(self, contents): is_in_table = False for",
"is_in_table = False for line in contents.splitlines(): if line.startswith('Timestamp:'): timestamp",
"max_n_commands: return ['all_commands'] else: # extend reduced commands that don't",
"x: self.total_mem_per_command(x), reverse=True) elif by == 'alphabetical': self.commands = sorted(self.commands)",
"hi, 0.0 si, 0.0 st KiB Mem : 12971188+total, 10379019+free,",
"all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len)) n_commands = len(reduced_commands) collapsed_len -= 1 if",
"start with 0, if 1, minutes are 1-based (shifted by",
"more than 16 unique commands, they will be collapsed into",
"reverse=True) elif by == 'mem': self.commands = sorted(self.commands, key=lambda x:",
"to prefix all_prefixes = [_ for _ in collapsed_commands if",
"/bin/unattended-upgrade', '/usr/bin/python3 /usr/local/bin/awsf3', '/usr/bin/python3 /usr/local/bin/aws s3', 'java -jar /usr/local/bin/cromwell.jar', 'java",
"-= 1 # went down to the first words but",
"@classmethod def as_minutes(cls, timestamp, timestamp_start): \"\"\"timestamp as minutes since timestamp_start.",
"the beginning of the commands to collapse commands that start",
"well as docker, awsf3, cwltool, etc. the list to be",
"first word or first two or three words only. Kernel",
"[] self.commands = [] self.cpus = dict() self.mems = dict()",
"by == 'alphabetical': self.commands = sorted(self.commands) @classmethod def as_minutes(cls, timestamp,",
":: top.write_to_csv(...) \"\"\" # assume this format for timestamp timestamp_format",
"all_commands = set() for timestamp in self.processes: all_commands.update(set([pr.command for pr",
"numbers are not supported by timedelta, so do each case",
"skipped. e.g. the top command itself is excluded, as well",
"extra timestamp is added before each top command. To parse",
"if there are more than 16 unique commands, they will",
"> 1): reduced_commands = set() for cmd in all_commands: reduced_commands.add(Top.first_words(cmd,",
"colname_for_timestamps='timepoints', timestamp_start=None, timestamp_end=None, base=0): \"\"\"write metrics as csv file with",
"contents.splitlines(): if line.startswith('Timestamp:'): timestamp = line.split()[1] continue if line.lstrip().startswith('PID'): is_in_table",
"when timestamps is not available (empty object) timestamps_as_minutes = range(0,",
"it in double quotes. Otherwise we # will get incorrect",
"a matching collapsed command (i.e. command prefix) and use that",
"the above example, 3 will be the return value. \"\"\"",
"of them into a single command ('all_commands') After the collapse,",
"10379019+free, 20613644 used, 5308056 buff/cache KiB Swap: 0 total, 0",
"return longest_prefix def total_cpu_per_command(self, command): return sum([v for v in",
"-w 1000 -d -F out.hic 17919 ubuntu 20 0 40676",
"first two or three words only. Kernel threads (single-word commands",
"words from the beginning of the commands to collapse commands",
"= Top.convert_command_to_collapsed_command(process.command, self.commands) if command not in self.cpus: self.cpus[command] =",
"st KiB Mem : 12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache",
"Reinitializat these so that you get the same results if",
"to collapse commands that start with the same words, i.e.",
"they are collapsed into unique prefixes. :sort_by: alphabetical|cpu|mem The commands",
"all of them into a single command ('all_commands') After the",
"import datetime class Top(object): \"\"\"class TopSeries stores the information of",
"command consumed the most cpu) \"\"\" # Reinitializat these so",
"not in self.processes: self.processes[timestamp] = [] process = Process(line) if",
"'ab' \"\"\" letters = list(string) return ''.join(letters[0:min(n_letters, len(letters))]) def as_dict(self):",
"to escape any double quotes that are present in the",
"'\\\"' @staticmethod def first_words(string, n_words): \"\"\"returns first n words of",
"exceeds this value, they are collapsed into unique prefixes. :sort_by:",
": 12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache KiB Swap: 0",
"in all_commands: reduced_commands.add(Top.first_words(cmd, collapsed_len)) n_commands = len(reduced_commands) collapsed_len -= 1",
"start splitting characters then if n_commands > max_n_commands: all_cmd_lengths =",
"in the same timestamp format (e.g. 01:23:45), The reports will",
"total, 0 free, 0 used. 10834606+avail Mem PID USER PR",
"i < len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock: fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i])",
"'w') as fo: # header # we have to escape",
"that makes the number of unique commands to be bounded",
"time. An example input content looks like below, or a",
"if line.lstrip().startswith('PID'): is_in_table = True continue if not line or",
"[-i] [-c] over short intervals to monitor the same set",
"from top output does not contain dates, which can screw",
"can be skipped but there should be no more than",
"timestamp timestamp_format = '%Y-%m-%d-%H:%M:%S' # These commands are excluded when",
"parse top output content, simply create an object. This will",
"timestamp in the same format (e.g. 01:23:45) :param timestamp_start: start",
"\"\"\"returns first n words of a string e.g. first_words('abc def",
"according to total cpu self.sort_commands(by=sort_by) def get_collapsed_commands(self, max_n_commands): \"\"\"If the",
"@staticmethod def first_characters(string, n_letters): \"\"\"returns first n letters of a",
"return ''.join(letters[0:min(n_letters, len(letters))]) def as_dict(self): return self.__dict__ class Process(object): def",
"reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len)) n_commands = len(reduced_commands) collapsed_len -= 1 if n_commands",
"timestamp in the same format (e.g. 01:23:45) \"\"\" return [self.as_minutes(t,",
"\"\"\" return [self.as_minutes(t, timestamp_start) for t in self.timestamps] @classmethod def",
"def as_minutes(cls, timestamp, timestamp_start): \"\"\"timestamp as minutes since timestamp_start. :param",
"After the collapse, commands that are unique to a collapsed",
"over short intervals to monitor the same set of processes",
"if collapsed_commands == 'all_commands': # collapsed to one command return",
"prefix) and use that as command. command = Top.convert_command_to_collapsed_command(process.command, self.commands)",
"with timestamps as keys and a list of Process class",
"performed by awsf3 on an AWSEM instance through cron jobs.",
"timepoint column (1st column). default 'timepoints' :param timestamp_start: start time",
"collapse commands that start with the same words, i.e. find",
"clock: fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in self.commands])) i +=",
"return '\\\"' + string + '\\\"' @staticmethod def first_words(string, n_words):",
"default, the max number of commands is 16, and if",
"negative numbers are not supported by timedelta, so do each",
"the beginning of the commands # to collapse commands starting",
"commands in processes (and collapse if needed.) self.commands = self.get_collapsed_commands(max_n_commands)",
"+ [Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"')) for cmd in self.commands])) fo.write('\\n') # contents",
"the first word is not sufficient, go down to the",
"sorted(self.processes): # sorted timestamps (columns) self.timestamps.append(timestamp) # commands (rows) for",
"'.join(words[0:min(n_words, len(words))]) @staticmethod def first_characters(string, n_letters): \"\"\"returns first n letters",
"_ in collapsed_commands if cmd.startswith(_)] longest_prefix = sorted(all_prefixes, key=lambda x:",
"def total_mem_per_command(self, command): return sum([v for v in self.mems[command]]) def",
"the metrics report. fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"')) for cmd in",
"[len(cmd.split()) for cmd in all_commands] # number of words per",
"start time in the same timestamp format (e.g. 01:23:45), time",
"'goofys', 'nodejs --eval', '/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim', '/usr/bin/python3 /bin/unattended-upgrade', '/usr/bin/python3 /usr/local/bin/awsf3',",
"# commands (rows) for process in self.processes[timestamp]: # find a",
"'goofys-latest', 'cwltool', '/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval', '/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim', '/usr/bin/python3",
"1). \"\"\" metric_array = getattr(self, metric + 's') if self.timestamps:",
"processes attribute. :param max_n_commands: When the number of unique commands",
"+ '\\\"' @staticmethod def first_words(string, n_words): \"\"\"returns first n words",
"line in contents.splitlines(): if line.startswith('Timestamp:'): timestamp = line.split()[1] continue if",
"words from the beginning of the commands # to collapse",
"len(letters))]) def as_dict(self): return self.__dict__ class Process(object): def __init__(self, top_line):",
"set() for cmd in all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len)) n_commands = len(reduced_commands)",
"return round((dt - dt_start).seconds / 60) else: return -round((dt_start -",
"(e.g. [perl]) are also excluded. \"\"\" first_word = Top.first_words(process.command, 1)",
"min_cmd_length = min(all_cmd_lengths) collapsed_len = max_cmd_length - 1 while(n_commands >",
"self.commands by total cpu (default) or mem in reverse order,",
"process should be skipped (excluded). It returns True if the",
"format (e.g. 01:23:45) \"\"\" return [self.as_minutes(t, timestamp_start) for t in",
"/usr/local/bin/cwltool', 'containerd-shim', '/usr/bin/python3 /bin/unattended-upgrade', '/usr/bin/python3 /usr/local/bin/awsf3', '/usr/bin/python3 /usr/local/bin/aws s3', 'java",
"712 root 20 0 36.464g 8.223g 19572 S 100.0 6.6",
"the first words but still too many commands - start",
"max_n_commands: # no need to collapse return list(all_commands) # decide",
"command): return sum([v for v in self.mems[command]]) def sort_commands(self, by='cpu'):",
"reports will be generated only up to the end time.",
"before each top command. To parse top output content, simply",
"prefix are extended back to the original command. \"\"\" all_commands",
"number of words per command max_cmd_length = max(all_cmd_lengths) min_cmd_length =",
"1-based (shifted by 1). \"\"\" metric_array = getattr(self, metric +",
"collapsed_commands: # not collapsed return cmd else: # collapsed to",
"also creates empty attributes timestamps, commands, cpus and mems which",
"cls.as_datetime(timestamp) dt_start = cls.as_datetime(timestamp_start) # negative numbers are not supported",
"into a single command ('all_commands') After the collapse, commands that",
"number of words that makes the number of unique commands",
"content looks like below, or a series of these. The",
"same results if you run it twice self.timestamps = []",
"commands according to total cpu self.sort_commands(by=sort_by) def get_collapsed_commands(self, max_n_commands): \"\"\"If",
"- dt).seconds / 60) def timestamps_as_minutes(self, timestamp_start): \"\"\"convert self.timestamps to",
"be used. :param base: default 0. If 0, minutes start",
"be used. :param timestamp_end: end time in the same timestamp",
"timestamp_start for i in range(0, len(timestamps_as_minutes)): if timestamps_as_minutes[i] >= 0:",
"obtained through ``tibanna log -j <job_id> -t`` or through API",
"0 free, 0 used. 10834606+avail Mem PID USER PR NI",
"you get the same results if you run it twice",
"as_datetime(cls, timestamp): return datetime.datetime.strptime(timestamp, cls.timestamp_format) @staticmethod def wrap_in_double_quotes(string): \"\"\"wrap a",
"for cmd in all_commands] # number of words per command",
"in the cmd, before wrapping it in double quotes. Otherwise",
"as minutes since timestamp_start. :param timestamp: given timestamp in the",
"n_commands > max_n_commands: return ['all_commands'] else: # extend reduced commands",
"total cpu (default) or mem in reverse order, or alphabetically",
"by='cpu'): \"\"\"sort self.commands by total cpu (default) or mem in",
"\"\"\" letters = list(string) return ''.join(letters[0:min(n_letters, len(letters))]) def as_dict(self): return",
"only up to the end time. Time points with no",
"01:23:45), The reports will be generated only up to the",
"to the original command. \"\"\" all_commands = set() for timestamp",
"collapsed return cmd else: # collapsed to prefix all_prefixes =",
"/usr/local/bin/aws s3', 'java -jar /usr/local/bin/cromwell.jar', 'java -jar /usr/local/bin/cromwell-35.jar'] def __init__(self,",
"the same format (e.g. 01:20:45) In the above example, 3",
"01:23:45) \"\"\" return [self.as_minutes(t, timestamp_start) for t in self.timestamps] @classmethod",
"commands will be used. :param base: default 0. If 0,",
"clock in range(0, last_minute + 1): clock_shifted = clock +",
"5308056 buff/cache KiB Swap: 0 total, 0 free, 0 used.",
"to be excluded is in self.exclude_list. It compares either first",
"from processes, matching collapsed commands. self.nTimepoints = len(self.processes) timestamp_ind =",
"\"\"\" # assume this format for timestamp timestamp_format = '%Y-%m-%d-%H:%M:%S'",
"in processes (and collapse if needed.) self.commands = self.get_collapsed_commands(max_n_commands) #",
"2) ==> 'abc def' \"\"\" words = string.split() return '",
"a collapsed prefix are extended back to the original command.",
"'containerd', 'goofys-latest', 'cwltool', '/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval', '/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim',",
"default timestamp from top output does not contain dates, which",
"in self.commands])) fo.write('\\n') # contents # skip timepoints earlier than",
"return True return False @staticmethod def convert_command_to_collapsed_command(cmd, collapsed_commands): if collapsed_commands",
"process.cpu self.mems[command][timestamp_ind] += process.mem timestamp_ind += 1 # sort commands",
"= [_ for _ in collapsed_commands if cmd.startswith(_)] longest_prefix =",
"r_cmd in list(reduced_commands): # wrap in list so that we",
"same format (e.g. 01:20:45) In the above example, 3 will",
"1 if n_commands > max_n_commands: return ['all_commands'] else: # extend",
"assume this format for timestamp timestamp_format = '%Y-%m-%d-%H:%M:%S' # These",
"reported fo.write('\\n') def should_skip_process(self, process): \"\"\"A predicate function to check",
"1): reduced_commands = set() for cmd in all_commands: reduced_commands.add(Top.first_words(cmd, collapsed_len))",
"6.6 us, 0.1 sy, 0.0 ni, 93.2 id, 0.0 wa,",
"+ string + '\\\"' @staticmethod def first_words(string, n_words): \"\"\"returns first",
"more than 1 per minute). This top output can be",
"as csv file with commands as columns :param metric: 'cpu'",
"to the end time. Time points with no top records",
"timedelta, so do each case separately if dt > dt_start:",
"In the above example, 3 will be the return value.",
"def first_characters(string, n_letters): \"\"\"returns first n letters of a string",
"collapse return list(all_commands) # decide the number of words from",
"[cmd for cmd in all_commands if cmd.startswith(r_cmd)] if len(uniq_cmds) ==",
"\"\"\" return '\\\"' + string + '\\\"' @staticmethod def first_words(string,",
"timestamp_start: start time in the same timestamp format (e.g. 01:23:45),",
"since timestamp_start. :param timestamp: given timestamp in the same format",
"example, 3 will be the return value. \"\"\" dt =",
"-> \"haha\") \"\"\" return '\\\"' + string + '\\\"' @staticmethod",
"else: # extend reduced commands that don't need to be",
"base=0): \"\"\"write metrics as csv file with commands as columns",
"in self.exclude_list: return True if first_word.startswith('[') and first_word.endswith(']'): return True",
"command return 'all_commands' elif cmd in collapsed_commands: # not collapsed",
"total cpu self.sort_commands(by=sort_by) def get_collapsed_commands(self, max_n_commands): \"\"\"If the number of",
"any double quotes that are present in the cmd, before",
"Fill in timestamps, cpus and mems from processes, matching collapsed",
"= float(prinfo_as_list[9]) self.command = ' '.join(prinfo_as_list[11:]) def as_dict(self): return self.__dict__",
"commands (rows) for process in self.processes[timestamp]: # find a matching",
"== 'mem': self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True) elif",
"Top.first_words(process.command, 1) first_two_words = Top.first_words(process.command, 2) first_three_words = Top.first_words(process.command, 3)",
"datetime class Top(object): \"\"\"class TopSeries stores the information of a",
"top records will be filled with 0. If not specified,",
"by == 'mem': self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True)",
"# negative numbers are not supported by timedelta, so do",
"total cpus or total mem (in reverser order) (e.g. the",
"separately if dt > dt_start: return round((dt - dt_start).seconds /",
"self.pid = prinfo_as_list[0] self.user = prinfo_as_list[1] self.cpu = float(prinfo_as_list[8]) self.mem",
"return reduced_commands def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints', timestamp_start=None, timestamp_end=None,",
"return value. \"\"\" dt = cls.as_datetime(timestamp) dt_start = cls.as_datetime(timestamp_start) #",
"will get incorrect column counts when creating the metrics report.",
"reduced_commands.add(Top.first_words(cmd, collapsed_len)) n_commands = len(reduced_commands) collapsed_len -= 1 # went",
"delimiter=',', colname_for_timestamps='timepoints', timestamp_start=None, timestamp_end=None, base=0): \"\"\"write metrics as csv file",
"up to the end time. Time points with no top",
"0:00.01 top -b -n1 -c -i -w 10000 The default",
"reduced_commands = set() for cmd in all_commands: reduced_commands.add(Top.first_words(cmd, collapsed_len)) n_commands",
"a dictionary with timestamps as keys and a list of",
"as well as docker, awsf3, cwltool, etc. the list to",
"(columns), use :func: write_to_csv. :: top.write_to_csv(...) \"\"\" # assume this",
"only 1-, 2- or 3-word prefixes work. exclude_list = ['top',",
":param max_n_commands: When the number of unique commands exceeds this",
"used. :param timestamp_end: end time in the same timestamp format",
"the class is designed for the output of a regular",
"= sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True) elif by == 'mem':",
"the number of unique commands exceeds this value, they are",
"Process(object): def __init__(self, top_line): prinfo_as_list = top_line.lstrip().split() self.pid = prinfo_as_list[0]",
"sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True) elif by == 'alphabetical': self.commands",
"3144 R 6.2 0.0 0:00.01 top -b -n1 -c -i",
"self.nTimepoints = len(self.processes) timestamp_ind = 0 for timestamp in sorted(self.processes):",
"First fill in commands from commands in processes (and collapse",
"command): return sum([v for v in self.cpus[command]]) def total_mem_per_command(self, command):",
"2) ==> 'ab' \"\"\" letters = list(string) return ''.join(letters[0:min(n_letters, len(letters))])",
"\"\"\" all_commands = set() for timestamp in self.processes: all_commands.update(set([pr.command for",
"letters of a string e.g. first_characters('abc def ghi', 2) ==>",
"dates, which can screw up multi-day processes which is common",
"else: # collapsed to prefix all_prefixes = [_ for _",
"Top.first_words(process.command, 2) first_three_words = Top.first_words(process.command, 3) if first_word in self.exclude_list:",
"\"\"\" metric_array = getattr(self, metric + 's') if self.timestamps: if",
"20613644 used, 5308056 buff/cache KiB Swap: 0 total, 0 free,",
"by time stamps. :: top = Top(top_output_content) To reorganize the",
"first words but still too many commands - start splitting",
"of words that makes the number of unique commands to",
"def ghi', 2) ==> 'abc def' \"\"\" words = string.split()",
"if you run it twice self.timestamps = [] self.commands =",
"same timestamp format (e.g. 01:23:45), The reports will be generated",
"all_cmd_lengths = [len(cmd.split()[0]) for cmd in all_commands] # number of",
"with the same words, i.e. find the maximum number of",
"default when timestamps is not available (empty object) timestamps_as_minutes =",
"be skipped but there should be no more than 1",
"e.g. first_characters('abc def ghi', 2) ==> 'ab' \"\"\" letters =",
"timestamp_start. :param timestamp: given timestamp in the same format (e.g.",
"commands exceeds this value, they are collapsed into unique prefixes.",
"[Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"')) for cmd in self.commands])) fo.write('\\n') # contents #",
"1): clock_shifted = clock + base if i < len(timestamps_as_minutes)",
"regular top commands above run at about 1-minute intervals, which",
"self.mems[command]]) def sort_commands(self, by='cpu'): \"\"\"sort self.commands by total cpu (default)",
"file with commands as columns :param metric: 'cpu' or 'mem'",
"set of commands that consists of prefixes of commands so",
"since start time. The report starts with minute 0. Time",
"of a regular top commands above run at about 1-minute",
"can remove elements in the loop uniq_cmds = [cmd for",
"cpus and mems attributes from processes attribute. :param max_n_commands: When",
"colunm name for the timepoint column (1st column). default 'timepoints'",
":param metric: 'cpu' or 'mem' :param delimiter: default ',' :param",
"from the beginning of the commands to collapse commands that",
"per minute). This top output can be obtained through ``tibanna",
"top output can be obtained through ``tibanna log -j <job_id>",
"0. If 0, minutes start with 0, if 1, minutes",
"content and creates processes which is a dictionary with timestamps",
"attributes from processes attribute. :param max_n_commands: When the number of",
"If not specified, the first timestamp in the top commands",
"\"\"\"initialization parsed top output content and creates processes which is",
"if command not in self.cpus: self.cpus[command] = [0] * self.nTimepoints",
"by == 'cpu': self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True)",
"5 with open(csv_file, 'w') as fo: # header # we",
"collapsed_commands == 'all_commands': # collapsed to one command return 'all_commands'",
"'nodejs --eval', '/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim', '/usr/bin/python3 /bin/unattended-upgrade', '/usr/bin/python3 /usr/local/bin/awsf3', '/usr/bin/python3",
"with 0. If not specified, the last timestamp in the",
"process should be skipped. e.g. the top command itself is",
"elif by == 'alphabetical': self.commands = sorted(self.commands) @classmethod def as_minutes(cls,",
"an object. This will create processes attribute, which is a",
"will be converted to minutes since start time. The report",
"etc. the list to be excluded is in self.exclude_list. It",
"(e.g. 01:23:45) \"\"\" return [self.as_minutes(t, timestamp_start) for t in self.timestamps]",
"not timestamp_end: timestamp_end = self.timestamps[-1] timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start) last_minute =",
"above run at about 1-minute intervals, which is performed by",
"def __init__(self, top_line): prinfo_as_list = top_line.lstrip().split() self.pid = prinfo_as_list[0] self.user",
"letters = list(string) return ''.join(letters[0:min(n_letters, len(letters))]) def as_dict(self): return self.__dict__",
"with the same words all_cmd_lengths = [len(cmd.split()) for cmd in",
"timepoints not reported fo.write('\\n') def should_skip_process(self, process): \"\"\"A predicate function",
"<filename>tibanna/top.py<gh_stars>10-100 import datetime class Top(object): \"\"\"class TopSeries stores the information",
"* self.nTimepoints self.cpus[command][timestamp_ind] += process.cpu self.mems[command][timestamp_ind] += process.mem timestamp_ind +=",
"cmd else: # collapsed to prefix all_prefixes = [_ for",
"one command return 'all_commands' elif cmd in collapsed_commands: # not",
"using only the first word is not sufficient, go down",
"same timestamp format (e.g. 01:23:45), time stamps will be converted",
"went down to the first words but still too many",
"counts when creating the metrics report. fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"'))",
"collapsed into unique prefixes. :sort_by: alphabetical|cpu|mem The commands are by",
"per command max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len =",
"True if the input process should be skipped. e.g. the",
"0 stopped, 0 zombie %Cpu(s): 6.6 us, 0.1 sy, 0.0",
"minutes since timestamp_start. :param timestamp: given timestamp in the same",
"command. \"\"\" all_commands = set() for timestamp in self.processes: all_commands.update(set([pr.command",
"organized by both timestamps (rows) and commands (columns), use :func:",
"exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron', 'docker-untar', 'containerd', 'goofys-latest',",
"self.timestamps to a list of minutes since timestamp_start :param timestamp_start:",
"a generic class, but the class is designed for the",
"timestamp_start) for t in self.timestamps] @classmethod def as_datetime(cls, timestamp): return",
"(e.g. 01:23:45), The reports will be generated only up to",
"contents): \"\"\"initialization parsed top output content and creates processes which",
"# number of characters of the first word max_cmd_length =",
"self.cpu = float(prinfo_as_list[8]) self.mem = float(prinfo_as_list[9]) self.command = ' '.join(prinfo_as_list[11:])",
"timestamps, commands, cpus and mems attributes from processes attribute. :param",
"2020-12-18-18:55:37 top - 18:55:37 up 4 days, 2:37, 0 users,",
"with double quotes (e.g. haha -> \"haha\") \"\"\" return '\\\"'",
"i += 1 else: fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in",
"and mems from processes, matching collapsed commands. self.nTimepoints = len(self.processes)",
"in collapsed_commands if cmd.startswith(_)] longest_prefix = sorted(all_prefixes, key=lambda x: len(x),",
"multi-day processes which is common for bioinformatics pipelines. So, an",
"If 0, minutes start with 0, if 1, minutes are",
"timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start) last_minute = self.as_minutes(timestamp_end, timestamp_start) else: # default",
"# we have to escape any double quotes that are",
"down to the first words but still too many commands",
"function to check if the process should be skipped (excluded).",
"Top(object): \"\"\"class TopSeries stores the information of a series of",
"need to be reduced for r_cmd in list(reduced_commands): # wrap",
"at about 1-minute intervals, which is performed by awsf3 on",
"metrics as csv file with commands as columns :param metric:",
"also excluded. \"\"\" first_word = Top.first_words(process.command, 1) first_two_words = Top.first_words(process.command,",
"in reverse order, or alphabetically (by='alphabetical')\"\"\" if by == 'cpu':",
"19572 S 100.0 6.6 125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar",
":: top.digest() To write a csv / tsv file organized",
"first_two_words = Top.first_words(process.command, 2) first_three_words = Top.first_words(process.command, 3) if first_word",
"be reduced for r_cmd in list(reduced_commands): # wrap in list",
"These commands are excluded when parsing the top output #",
"time stamps will be converted to minutes since start time.",
"number of words from the beginning of the commands to",
"output does not contain dates, which can screw up multi-day",
"self.timestamps[0] if not timestamp_end: timestamp_end = self.timestamps[-1] timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start)",
"parsing the top output # Currently only 1-, 2- or",
"in the top commands will be used. :param timestamp_end: end",
"max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len = max_cmd_length - 1 n_commands",
"= Top(top_output_content) To reorganize the contents by commands, run digest.",
"as a value. It also creates empty attributes timestamps, commands,",
"set() for cmd in all_commands: reduced_commands.add(Top.first_words(cmd, collapsed_len)) n_commands = len(reduced_commands)",
"series of top commands :: echo -n 'Timestamp: '; date",
"we can remove elements in the loop uniq_cmds = [cmd",
"collapsed_len)) n_commands = len(reduced_commands) collapsed_len -= 1 # went down",
"empty attributes timestamps, commands, cpus and mems which can be",
"# collapsed to prefix all_prefixes = [_ for _ in",
"self.timestamps[-1] timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start) last_minute = self.as_minutes(timestamp_end, timestamp_start) else: #",
"when creating the metrics report. fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"')) for",
"will be filled with 0. If not specified, the first",
"sum([v for v in self.mems[command]]) def sort_commands(self, by='cpu'): \"\"\"sort self.commands",
"simply create an object. This will create processes attribute, which",
"will be used. :param timestamp_end: end time in the same",
"sort commands according to total cpu self.sort_commands(by=sort_by) def get_collapsed_commands(self, max_n_commands):",
"beginning of the commands to collapse commands that start with",
"that start with the same words, i.e. find the maximum",
"/ 60) def timestamps_as_minutes(self, timestamp_start): \"\"\"convert self.timestamps to a list",
"the information of a series of top commands :: echo",
"each case separately if dt > dt_start: return round((dt -",
"+= process.mem timestamp_ind += 1 # sort commands according to",
"The reports will be generated only up to the end",
"collapsed command (i.e. command prefix) and use that as command.",
"timestamps, cpus and mems from processes, matching collapsed commands. self.nTimepoints",
"range(0, last_minute + 1): clock_shifted = clock + base if",
"in self.processes[timestamp]: # find a matching collapsed command (i.e. command",
"result organized by time stamps. :: top = Top(top_output_content) To",
"self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True) elif by ==",
"check if the process should be skipped (excluded). It returns",
"all_commands] # number of words per command max_cmd_length = max(all_cmd_lengths)",
"def parse_contents(self, contents): is_in_table = False for line in contents.splitlines():",
"in timestamps, cpus and mems from processes, matching collapsed commands.",
"= min(all_cmd_lengths) collapsed_len = max_cmd_length - 1 while(n_commands > max_n_commands",
"not contain dates, which can screw up multi-day processes which",
"format (e.g. 01:23:45) :param timestamp_start: start timestamp in the same",
"self.exclude_list: return True if first_word.startswith('[') and first_word.endswith(']'): return True return",
"= 5 with open(csv_file, 'w') as fo: # header #",
"list so that we can remove elements in the loop",
"== 'cpu': self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True) elif",
"cmd in all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len)) n_commands = len(reduced_commands) collapsed_len -=",
"should be skipped (excluded). It returns True if the input",
"self.exclude_list. It compares either first word or first two or",
"collapsed prefix are extended back to the original command. \"\"\"",
"Top(top_output_content) To reorganize the contents by commands, run digest. By",
"to collapse commands starting with the same words all_cmd_lengths =",
"base if i < len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock: fo.write(delimiter.join([str(clock_shifted)]",
"minutes start with 0, if 1, minutes are 1-based (shifted",
"three words only. Kernel threads (single-word commands wrapped in bracket",
"characters of the first word. If that's still not sufficient,",
"timestamps_as_minutes = range(0, 5) last_minute = 5 with open(csv_file, 'w')",
"top output # Currently only 1-, 2- or 3-word prefixes",
"to be reduced for r_cmd in list(reduced_commands): # wrap in",
"of a string e.g. first_words('abc def ghi', 2) ==> 'abc",
"dt > dt_start: return round((dt - dt_start).seconds / 60) else:",
"(excluded). It returns True if the input process should be",
"- 18:55:37 up 4 days, 2:37, 0 users, load average:",
"than 16 unique commands, they will be collapsed into prefixes.",
"(e.g. the first command consumed the most cpu) \"\"\" #",
"bounded by max_n_commands. If using only the first word is",
"of processes over time. An example input content looks like",
"in timestamps, commands, cpus and mems attributes from processes attribute.",
"timestamp_ind += 1 # sort commands according to total cpu",
"which is a raw parsed result organized by time stamps.",
"organized by time stamps. :: top = Top(top_output_content) To reorganize",
"is 16, and if there are more than 16 unique",
"of minutes since timestamp_start :param timestamp_start: timestamp in the same",
"1 while(n_commands > max_n_commands and collapsed_len > 1): reduced_commands =",
"wa, 0.0 hi, 0.0 si, 0.0 st KiB Mem :",
"these. The initialization works at any time interval and can",
"= sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True) elif by == 'alphabetical':",
"maximum number of words that makes the number of unique",
"timestamp in the top commands will be used. :param timestamp_end:",
"top = Top(top_output_content) To reorganize the contents by commands, run",
"top output content and creates processes which is a dictionary",
"wrapped in bracket (e.g. [perl]) are also excluded. \"\"\" first_word",
"not available (empty object) timestamps_as_minutes = range(0, 5) last_minute =",
"collapse, commands that are unique to a collapsed prefix are",
"timestamp_end = self.timestamps[-1] timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start) last_minute = self.as_minutes(timestamp_end, timestamp_start)",
"and timestamps_as_minutes[i] == clock: fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in",
"number of unique commands to be bounded by max_n_commands. If",
"reorganize the contents by commands, run digest. By default, the",
"minute). This top output can be obtained through ``tibanna log",
"either first word or first two or three words only.",
"are 1-based (shifted by 1). \"\"\" metric_array = getattr(self, metric",
"run digest. By default, the max number of commands is",
"< len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock: fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for",
"first_word.startswith('[') and first_word.endswith(']'): return True return False @staticmethod def convert_command_to_collapsed_command(cmd,",
"both timestamps (rows) and commands (columns), use :func: write_to_csv. ::",
"-n 'Timestamp: '; date +%F-%H:%M:%S top -b -n1 [-i] [-c]",
"unique commands to be bounded by max_n_commands. If using only",
"max number of commands is 16, and if there are",
"all_commands: reduced_commands.add(Top.first_words(cmd, collapsed_len)) n_commands = len(reduced_commands) collapsed_len -= 1 #",
"si, 0.0 st KiB Mem : 12971188+total, 10379019+free, 20613644 used,",
"from the beginning of the commands # to collapse commands",
"3 will be the return value. \"\"\" dt = cls.as_datetime(timestamp)",
"top -b -n1 -c -i -w 10000 The default timestamp",
"an AWSEM instance through cron jobs. (some can be skipped",
"0 36.464g 8.223g 19572 S 100.0 6.6 125:55.12 java -Xmx32g",
"commands, run digest. By default, the max number of commands",
"of unique commands to be bounded by max_n_commands. If using",
"to total cpu self.sort_commands(by=sort_by) def get_collapsed_commands(self, max_n_commands): \"\"\"If the number",
"used. 10834606+avail Mem PID USER PR NI VIRT RES SHR",
"words, i.e. find the maximum number of words that makes",
"the same words, i.e. find the maximum number of words",
"the top command itself is excluded, as well as docker,",
"prefixes work. exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron', 'docker-untar',",
"self.mems[command] = [0] * self.nTimepoints self.cpus[command][timestamp_ind] += process.cpu self.mems[command][timestamp_ind] +=",
"order, or alphabetically (by='alphabetical')\"\"\" if by == 'cpu': self.commands =",
"total number is within max_n_commands. First decide the number of",
"write a csv / tsv file organized by both timestamps",
"dt).seconds / 60) def timestamps_as_minutes(self, timestamp_start): \"\"\"convert self.timestamps to a",
"# number of words per command max_cmd_length = max(all_cmd_lengths) min_cmd_length",
"to monitor the same set of processes over time. An",
"datetime.datetime.strptime(timestamp, cls.timestamp_format) @staticmethod def wrap_in_double_quotes(string): \"\"\"wrap a given string with",
"used as a generic class, but the class is designed",
"n_commands = len(reduced_commands) collapsed_len -= 1 if n_commands > max_n_commands:",
"self.timestamps: if not timestamp_start: timestamp_start = self.timestamps[0] if not timestamp_end:",
"clock_shifted = clock + base if i < len(timestamps_as_minutes) and",
"if cmd.startswith(_)] longest_prefix = sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0] return",
"'alphabetical': self.commands = sorted(self.commands) @classmethod def as_minutes(cls, timestamp, timestamp_start): \"\"\"timestamp",
"if n_commands > max_n_commands: return ['all_commands'] else: # extend reduced",
"> max_n_commands and collapsed_len > 1): reduced_commands = set() for",
"command = Top.convert_command_to_collapsed_command(process.command, self.commands) if command not in self.cpus: self.cpus[command]",
"last timestamp in the top commands will be used. :param",
"points with no top records will be filled with 0.",
"list of Process class objects as a value. It also",
"The initialization works at any time interval and can be",
"the first command consumed the most cpu) \"\"\" # Reinitializat",
"with minute 0. Time points with no top records will",
"zombie %Cpu(s): 6.6 us, 0.1 sy, 0.0 ni, 93.2 id,",
"top output content, simply create an object. This will create",
"creating the metrics report. fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"')) for cmd",
"supported by timedelta, so do each case separately if dt",
"len(words))]) @staticmethod def first_characters(string, n_letters): \"\"\"returns first n letters of",
"with 0, if 1, minutes are 1-based (shifted by 1).",
"USER PR NI VIRT RES SHR S %CPU %MEM TIME+",
"['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron', 'docker-untar', 'containerd', 'goofys-latest', 'cwltool', '/usr/bin/containerd-shim-runc-v2',",
"to a collapsed prefix are extended back to the original",
"# to collapse commands starting with the same words all_cmd_lengths",
"ghi', 2) ==> 'ab' \"\"\" letters = list(string) return ''.join(letters[0:min(n_letters,",
"else: # default when timestamps is not available (empty object)",
"converted to minutes since start time. The report starts with",
"header # we have to escape any double quotes that",
"be used as a generic class, but the class is",
"of the first word. If that's still not sufficient, collapse",
"by default sorted alphabetically, but optionally can be sorted by",
"def get_collapsed_commands(self, max_n_commands): \"\"\"If the number of commands exceeds max_n_commands,",
"which is common for bioinformatics pipelines. So, an extra timestamp",
"a raw parsed result organized by time stamps. :: top",
"commands to collapse commands that start with the same words,",
"top commands above run at about 1-minute intervals, which is",
"So, an extra timestamp is added before each top command.",
"as_minutes(cls, timestamp, timestamp_start): \"\"\"timestamp as minutes since timestamp_start. :param timestamp:",
"timestamp_start): \"\"\"convert self.timestamps to a list of minutes since timestamp_start",
"def __init__(self, contents): \"\"\"initialization parsed top output content and creates",
"__init__(self, top_line): prinfo_as_list = top_line.lstrip().split() self.pid = prinfo_as_list[0] self.user =",
"juicer_tools.jar addNorm -w 1000 -d -F out.hic 17919 ubuntu 20",
"create processes attribute, which is a raw parsed result organized",
"word max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len = max_cmd_length",
"but the class is designed for the output of a",
"(single-word commands wrapped in bracket (e.g. [perl]) are also excluded.",
"''.join(letters[0:min(n_letters, len(letters))]) def as_dict(self): return self.__dict__ class Process(object): def __init__(self,",
"commands above run at about 1-minute intervals, which is performed",
"within max_n_commands. First decide the number of words from the",
"if i < len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock: fo.write(delimiter.join([str(clock_shifted)] +",
"to check if the process should be skipped (excluded). It",
"'s') if self.timestamps: if not timestamp_start: timestamp_start = self.timestamps[0] if",
"TopSeries stores the information of a series of top commands",
"for _ in collapsed_commands if cmd.startswith(_)] longest_prefix = sorted(all_prefixes, key=lambda",
"only the first word is not sufficient, go down to",
"100.0 6.6 125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w",
"[self.as_minutes(t, timestamp_start) for t in self.timestamps] @classmethod def as_datetime(cls, timestamp):",
"self.commands])) i += 1 else: fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd",
"/usr/local/bin/cromwell.jar', 'java -jar /usr/local/bin/cromwell-35.jar'] def __init__(self, contents): \"\"\"initialization parsed top",
"start timestamp in the same format (e.g. 01:20:45) In the",
"-n1 -c -i -w 10000 The default timestamp from top",
"which is a dictionary with timestamps as keys and a",
"(columns) self.timestamps.append(timestamp) # commands (rows) for process in self.processes[timestamp]: #",
"compares either first word or first two or three words",
"= getattr(self, metric + 's') if self.timestamps: if not timestamp_start:",
"which can screw up multi-day processes which is common for",
"dt = cls.as_datetime(timestamp) dt_start = cls.as_datetime(timestamp_start) # negative numbers are",
"KiB Mem : 12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache KiB",
"loop uniq_cmds = [cmd for cmd in all_commands if cmd.startswith(r_cmd)]",
"commands that consists of prefixes of commands so that the",
"= False for line in contents.splitlines(): if line.startswith('Timestamp:'): timestamp =",
"R 6.2 0.0 0:00.01 top -b -n1 -c -i -w",
"first_word.endswith(']'): return True return False @staticmethod def convert_command_to_collapsed_command(cmd, collapsed_commands): if",
"== 'all_commands': # collapsed to one command return 'all_commands' elif",
"load average: 5.59, 5.28, 5.76 Tasks: 7 total, 1 running,",
"value. It also creates empty attributes timestamps, commands, cpus and",
"collapsed_commands): if collapsed_commands == 'all_commands': # collapsed to one command",
"longest_prefix def total_cpu_per_command(self, command): return sum([v for v in self.cpus[command]])",
"input content looks like below, or a series of these.",
"Mem : 12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache KiB Swap:",
"as fo: # header # we have to escape any",
"is not sufficient, go down to the characters of the",
"alphabetically, but optionally can be sorted by total cpus or",
"timestamp): return datetime.datetime.strptime(timestamp, cls.timestamp_format) @staticmethod def wrap_in_double_quotes(string): \"\"\"wrap a given",
"line.startswith('Timestamp:'): timestamp = line.split()[1] continue if line.lstrip().startswith('PID'): is_in_table = True",
"n_commands = len(all_commands) while(n_commands > max_n_commands and collapsed_len > 1):",
"for cmd in all_commands: reduced_commands.add(Top.first_words(cmd, collapsed_len)) n_commands = len(reduced_commands) collapsed_len",
"monitor the same set of processes over time. An example",
"Otherwise we # will get incorrect column counts when creating",
"self.mems[command][timestamp_ind] += process.mem timestamp_ind += 1 # sort commands according",
"max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len = max_cmd_length - 1 while(n_commands",
"list to be excluded is in self.exclude_list. It compares either",
"echo -n 'Timestamp: '; date +%F-%H:%M:%S top -b -n1 [-i]",
"s3', 'java -jar /usr/local/bin/cromwell.jar', 'java -jar /usr/local/bin/cromwell-35.jar'] def __init__(self, contents):",
"time in the same timestamp format (e.g. 01:23:45), time stamps",
"parsed top output content and creates processes which is a",
"timestamp_ind = 0 for timestamp in sorted(self.processes): # sorted timestamps",
"minute 0. Time points with no top records will be",
"of top commands :: echo -n 'Timestamp: '; date +%F-%H:%M:%S",
"tsv file organized by both timestamps (rows) and commands (columns),",
"36.464g 8.223g 19572 S 100.0 6.6 125:55.12 java -Xmx32g -Xms32g",
"reverser order) (e.g. the first command consumed the most cpu)",
"-F out.hic 17919 ubuntu 20 0 40676 3828 3144 R",
"key=lambda x: len(x), reverse=True)[0] return longest_prefix def total_cpu_per_command(self, command): return",
"stores the information of a series of top commands ::",
"prefix all_prefixes = [_ for _ in collapsed_commands if cmd.startswith(_)]",
"# skip timepoints earlier than timestamp_start for i in range(0,",
"+ base if i < len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock:",
"twice self.timestamps = [] self.commands = [] self.cpus = dict()",
"starting with the same words all_cmd_lengths = [len(cmd.split()) for cmd",
"number of commands exceeds max_n_commands, return a collapsed set of",
"# Fill in timestamps, cpus and mems from processes, matching",
"by timedelta, so do each case separately if dt >",
"first word is not sufficient, go down to the characters",
"self.commands = [] self.cpus = dict() self.mems = dict() #",
"skipped but there should be no more than 1 per",
"KiB Swap: 0 total, 0 free, 0 used. 10834606+avail Mem",
"def as_dict(self): return self.__dict__ class Process(object): def __init__(self, top_line): prinfo_as_list",
"prinfo_as_list[1] self.cpu = float(prinfo_as_list[8]) self.mem = float(prinfo_as_list[9]) self.command = '",
"delimiter: default ',' :param colname_for_timestamps: colunm name for the timepoint",
"sort_by='alphabetical'): \"\"\"Fills in timestamps, commands, cpus and mems attributes from",
"cmd in collapsed_commands: # not collapsed return cmd else: #",
"reduced commands that don't need to be reduced for r_cmd",
"'mem': self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True) elif by",
"command itself is excluded, as well as docker, awsf3, cwltool,",
"timestamp_format = '%Y-%m-%d-%H:%M:%S' # These commands are excluded when parsing",
"string.split() return ' '.join(words[0:min(n_words, len(words))]) @staticmethod def first_characters(string, n_letters): \"\"\"returns",
"all_prefixes = [_ for _ in collapsed_commands if cmd.startswith(_)] longest_prefix",
"generic class, but the class is designed for the output",
"creates processes which is a dictionary with timestamps as keys",
":param base: default 0. If 0, minutes start with 0,",
"filled through method :func: digest. \"\"\" self.processes = dict() self.timestamps",
"top.write_to_csv(...) \"\"\" # assume this format for timestamp timestamp_format =",
"= dict() self.timestamps = [] self.commands = [] self.cpus =",
"results if you run it twice self.timestamps = [] self.commands",
"self.processes: self.processes[timestamp] = [] process = Process(line) if not self.should_skip_process(process):",
"= len(reduced_commands) collapsed_len -= 1 # went down to the",
"up multi-day processes which is common for bioinformatics pipelines. So,",
"below, or a series of these. The initialization works at",
"--eval', '/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim', '/usr/bin/python3 /bin/unattended-upgrade', '/usr/bin/python3 /usr/local/bin/awsf3', '/usr/bin/python3 /usr/local/bin/aws",
"10834606+avail Mem PID USER PR NI VIRT RES SHR S",
"contain dates, which can screw up multi-day processes which is",
"use that as command. command = Top.convert_command_to_collapsed_command(process.command, self.commands) if command",
"self.mems = dict() self.parse_contents(contents) def parse_contents(self, contents): is_in_table = False",
"== clock: fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in self.commands])) i",
"93.2 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st",
"by 1). \"\"\" metric_array = getattr(self, metric + 's') if",
"do each case separately if dt > dt_start: return round((dt",
"-jar /usr/local/bin/cromwell-35.jar'] def __init__(self, contents): \"\"\"initialization parsed top output content",
"self.get_collapsed_commands(max_n_commands) # Fill in timestamps, cpus and mems from processes,",
"self.commands])) # add 0 for timepoints not reported fo.write('\\n') def",
"Top.convert_command_to_collapsed_command(process.command, self.commands) if command not in self.cpus: self.cpus[command] = [0]",
"metrics report. fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('\"', '\"\"')) for cmd in self.commands]))",
"total_mem_per_command(self, command): return sum([v for v in self.mems[command]]) def sort_commands(self,",
"the top output # Currently only 1-, 2- or 3-word",
"the end time. Time points with no top records will",
"'%Y-%m-%d-%H:%M:%S' # These commands are excluded when parsing the top",
"go down to the characters of the first word. If",
"= len(reduced_commands) collapsed_len -= 1 if n_commands > max_n_commands: return",
"\"\"\"returns first n letters of a string e.g. first_characters('abc def",
"the last timestamp in the top commands will be used.",
"should_skip_process(self, process): \"\"\"A predicate function to check if the process",
"timestamps, commands, cpus and mems which can be filled through",
"(some can be skipped but there should be no more",
"excluded, as well as docker, awsf3, cwltool, etc. the list",
"timestamp_start: timestamp in the same format (e.g. 01:23:45) \"\"\" return",
"self.processes[timestamp].append(Process(line)) def digest(self, max_n_commands=16, sort_by='alphabetical'): \"\"\"Fills in timestamps, commands, cpus",
"if n_commands > max_n_commands: all_cmd_lengths = [len(cmd.split()[0]) for cmd in",
"will create processes attribute, which is a raw parsed result",
"of commands so that the total number is within max_n_commands.",
"else: fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in self.commands])) # add",
"command. command = Top.convert_command_to_collapsed_command(process.command, self.commands) if command not in self.cpus:",
"[-c] over short intervals to monitor the same set of",
"does not contain dates, which can screw up multi-day processes",
"max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len = max_cmd_length -",
"S 100.0 6.6 125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm",
"1 # sort commands according to total cpu self.sort_commands(by=sort_by) def",
"are extended back to the original command. \"\"\" all_commands =",
"be converted to minutes since start time. The report starts",
"6 sleeping, 0 stopped, 0 zombie %Cpu(s): 6.6 us, 0.1",
":param timestamp_start: start timestamp in the same format (e.g. 01:20:45)",
"commands # to collapse commands starting with the same words",
"\"haha\") \"\"\" return '\\\"' + string + '\\\"' @staticmethod def",
"the first word. If that's still not sufficient, collapse all",
"incorrect column counts when creating the metrics report. fo.write(delimiter.join([colname_for_timestamps] +",
"minutes are 1-based (shifted by 1). \"\"\" metric_array = getattr(self,",
"root 20 0 36.464g 8.223g 19572 S 100.0 6.6 125:55.12",
"makes the number of unique commands to be bounded by",
"the top commands will be used. :param timestamp_end: end time",
"this format for timestamp timestamp_format = '%Y-%m-%d-%H:%M:%S' # These commands",
"-Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w 1000 -d -F out.hic",
"will be generated only up to the end time. Time",
"for timestamp in self.processes: all_commands.update(set([pr.command for pr in self.processes[timestamp]])) if",
"\"\"\"convert self.timestamps to a list of minutes since timestamp_start :param",
"used. :param base: default 0. If 0, minutes start with",
"self.timestamps = [] self.commands = [] self.cpus = dict() self.mems",
"are unique to a collapsed prefix are extended back to",
"First decide the number of words from the beginning of",
"reverse order, or alphabetically (by='alphabetical')\"\"\" if by == 'cpu': self.commands",
"n letters of a string e.g. first_characters('abc def ghi', 2)",
"so that the total number is within max_n_commands. First decide",
"same set of processes over time. An example input content",
"that the total number is within max_n_commands. First decide the",
"number of words from the beginning of the commands #",
"be obtained through ``tibanna log -j <job_id> -t`` or through",
"commands that start with the same words, i.e. find the",
"-b -n1 -c -i -w 10000 The default timestamp from",
"max_n_commands: When the number of unique commands exceeds this value,",
"first n letters of a string e.g. first_characters('abc def ghi',",
"that you get the same results if you run it",
"not line or line.isspace(): is_in_table = False if is_in_table: if",
"return cmd else: # collapsed to prefix all_prefixes = [_",
"us, 0.1 sy, 0.0 ni, 93.2 id, 0.0 wa, 0.0",
"return 'all_commands' elif cmd in collapsed_commands: # not collapsed return",
"+= 1 # sort commands according to total cpu self.sort_commands(by=sort_by)",
"\"\"\"A predicate function to check if the process should be",
"fo.write('\\n') def should_skip_process(self, process): \"\"\"A predicate function to check if",
"stopped, 0 zombie %Cpu(s): 6.6 us, 0.1 sy, 0.0 ni,",
"the contents by commands, run digest. By default, the max",
"= max_cmd_length - 1 n_commands = len(all_commands) while(n_commands > max_n_commands",
"the same results if you run it twice self.timestamps =",
"timestamps_as_minutes[i] == clock: fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in self.commands]))",
"the same set of processes over time. An example input",
"raw parsed result organized by time stamps. :: top =",
"commands that don't need to be reduced for r_cmd in",
"timestamps as keys and a list of Process class objects",
"timestamp_end: end time in the same timestamp format (e.g. 01:23:45),",
"time. The report starts with minute 0. Time points with",
"[] self.cpus = dict() self.mems = dict() # First fill",
"be skipped (excluded). It returns True if the input process",
"as a generic class, but the class is designed for",
"17919 ubuntu 20 0 40676 3828 3144 R 6.2 0.0",
"= False if is_in_table: if timestamp not in self.processes: self.processes[timestamp]",
"> dt_start: return round((dt - dt_start).seconds / 60) else: return",
"first word max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len =",
"60) def timestamps_as_minutes(self, timestamp_start): \"\"\"convert self.timestamps to a list of",
"3) if first_word in self.exclude_list: return True elif first_two_words in",
"with no top records will be filled with 0. If",
"v in self.mems[command]]) def sort_commands(self, by='cpu'): \"\"\"sort self.commands by total",
"timestamp format (e.g. 01:23:45), The reports will be generated only",
"collapsed into prefixes. :: top.digest() To write a csv /",
"only. Kernel threads (single-word commands wrapped in bracket (e.g. [perl])",
"if first_word.startswith('[') and first_word.endswith(']'): return True return False @staticmethod def",
"series of these. The initialization works at any time interval",
"'cpu': self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True) elif by",
"+ 1): clock_shifted = clock + base if i <",
"first_three_words = Top.first_words(process.command, 3) if first_word in self.exclude_list: return True",
"This will create processes attribute, which is a raw parsed",
"by commands, run digest. By default, the max number of",
"n_commands = len(reduced_commands) collapsed_len -= 1 # went down to",
"False if is_in_table: if timestamp not in self.processes: self.processes[timestamp] =",
"collapse all of them into a single command ('all_commands') After",
"top_line): prinfo_as_list = top_line.lstrip().split() self.pid = prinfo_as_list[0] self.user = prinfo_as_list[1]",
"date +%F-%H:%M:%S top -b -n1 [-i] [-c] over short intervals",
"no top records will be filled with 0. If not",
"+= process.cpu self.mems[command][timestamp_ind] += process.mem timestamp_ind += 1 # sort",
"the list to be excluded is in self.exclude_list. It compares",
"# default when timestamps is not available (empty object) timestamps_as_minutes",
"will be collapsed into prefixes. :: top.digest() To write a",
"through method :func: digest. \"\"\" self.processes = dict() self.timestamps =",
"TIME+ COMMAND 712 root 20 0 36.464g 8.223g 19572 S",
"out.hic 17919 ubuntu 20 0 40676 3828 3144 R 6.2",
"elif cmd in collapsed_commands: # not collapsed return cmd else:",
"skip timepoints earlier than timestamp_start for i in range(0, len(timestamps_as_minutes)):",
"VIRT RES SHR S %CPU %MEM TIME+ COMMAND 712 root",
"write_to_csv. :: top.write_to_csv(...) \"\"\" # assume this format for timestamp",
"be sorted by total cpus or total mem (in reverser",
"and if there are more than 16 unique commands, they",
"value. \"\"\" dt = cls.as_datetime(timestamp) dt_start = cls.as_datetime(timestamp_start) # negative",
"docker, awsf3, cwltool, etc. the list to be excluded is",
"max_n_commands): \"\"\"If the number of commands exceeds max_n_commands, return a",
"minutes since start time. The report starts with minute 0.",
"self.timestamps.append(timestamp) # commands (rows) for process in self.processes[timestamp]: # find",
"for the timepoint column (1st column). default 'timepoints' :param timestamp_start:",
"many commands - start splitting characters then if n_commands >",
"is_in_table = False if is_in_table: if timestamp not in self.processes:",
"It compares either first word or first two or three",
"(in reverser order) (e.g. the first command consumed the most",
"run at about 1-minute intervals, which is performed by awsf3",
"0 for timestamp in sorted(self.processes): # sorted timestamps (columns) self.timestamps.append(timestamp)",
"contents # skip timepoints earlier than timestamp_start for i in",
"0. Time points with no top records will be filled",
"return sum([v for v in self.cpus[command]]) def total_mem_per_command(self, command): return",
"the same format (e.g. 01:23:45) :param timestamp_start: start timestamp in",
"remove elements in the loop uniq_cmds = [cmd for cmd",
"/usr/local/bin/awsf3', '/usr/bin/python3 /usr/local/bin/aws s3', 'java -jar /usr/local/bin/cromwell.jar', 'java -jar /usr/local/bin/cromwell-35.jar']",
"not in self.cpus: self.cpus[command] = [0] * self.nTimepoints self.mems[command] =",
"they will be collapsed into prefixes. :: top.digest() To write",
"timestamp, timestamp_start): \"\"\"timestamp as minutes since timestamp_start. :param timestamp: given",
"top output does not contain dates, which can screw up",
"- dt_start).seconds / 60) else: return -round((dt_start - dt).seconds /",
"self.sort_commands(by=sort_by) def get_collapsed_commands(self, max_n_commands): \"\"\"If the number of commands exceeds",
"unique prefixes. :sort_by: alphabetical|cpu|mem The commands are by default sorted",
"# not collapsed return cmd else: # collapsed to prefix",
"[] process = Process(line) if not self.should_skip_process(process): self.processes[timestamp].append(Process(line)) def digest(self,",
"= dict() self.mems = dict() # First fill in commands",
"for cmd in all_commands if cmd.startswith(r_cmd)] if len(uniq_cmds) == 1:",
"metric + 's') if self.timestamps: if not timestamp_start: timestamp_start =",
"is in self.exclude_list. It compares either first word or first",
"short intervals to monitor the same set of processes over",
"fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in self.commands])) # add 0",
"# add 0 for timepoints not reported fo.write('\\n') def should_skip_process(self,",
"= set() for cmd in all_commands: reduced_commands.add(Top.first_words(cmd, collapsed_len)) n_commands =",
"or mem in reverse order, or alphabetically (by='alphabetical')\"\"\" if by",
"20 0 36.464g 8.223g 19572 S 100.0 6.6 125:55.12 java",
"= range(0, 5) last_minute = 5 with open(csv_file, 'w') as",
"for r_cmd in list(reduced_commands): # wrap in list so that",
"\"\"\" first_word = Top.first_words(process.command, 1) first_two_words = Top.first_words(process.command, 2) first_three_words",
"start with the same words, i.e. find the maximum number",
"of prefixes of commands so that the total number is",
"def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints', timestamp_start=None, timestamp_end=None, base=0): \"\"\"write",
"\"\"\"write metrics as csv file with commands as columns :param",
"column (1st column). default 'timepoints' :param timestamp_start: start time in",
"be filled with 0. If not specified, the first timestamp",
"continue if not line or line.isspace(): is_in_table = False if",
"still too many commands - start splitting characters then if",
"12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache KiB Swap: 0 total,",
"'/usr/bin/python3 /usr/local/bin/awsf3', '/usr/bin/python3 /usr/local/bin/aws s3', 'java -jar /usr/local/bin/cromwell.jar', 'java -jar",
"metric_array = getattr(self, metric + 's') if self.timestamps: if not",
"on an AWSEM instance through cron jobs. (some can be",
"optionally can be sorted by total cpus or total mem",
"= [len(cmd.split()) for cmd in all_commands] # number of words",
"If that's still not sufficient, collapse all of them into",
"-= 1 if n_commands > max_n_commands: return ['all_commands'] else: #",
"(by='alphabetical')\"\"\" if by == 'cpu': self.commands = sorted(self.commands, key=lambda x:",
"10000 The default timestamp from top output does not contain",
"add 0 for timepoints not reported fo.write('\\n') def should_skip_process(self, process):",
"be the return value. \"\"\" dt = cls.as_datetime(timestamp) dt_start =",
"the first timestamp in the top commands will be used.",
"time interval and can be used as a generic class,",
"1-, 2- or 3-word prefixes work. exclude_list = ['top', 'docker',",
"sorted timestamps (columns) self.timestamps.append(timestamp) # commands (rows) for process in",
"words per command max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len",
"the same words all_cmd_lengths = [len(cmd.split()) for cmd in all_commands]",
"uniq_cmds = [cmd for cmd in all_commands if cmd.startswith(r_cmd)] if",
"commands are by default sorted alphabetically, but optionally can be",
"max_cmd_length - 1 while(n_commands > max_n_commands and collapsed_len > 1):",
"self.commands = sorted(self.commands) @classmethod def as_minutes(cls, timestamp, timestamp_start): \"\"\"timestamp as",
"you run it twice self.timestamps = [] self.commands = []",
"125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w 1000 -d",
"in collapsed_commands: # not collapsed return cmd else: # collapsed",
"return False @staticmethod def convert_command_to_collapsed_command(cmd, collapsed_commands): if collapsed_commands == 'all_commands':",
"%CPU %MEM TIME+ COMMAND 712 root 20 0 36.464g 8.223g",
"in list(reduced_commands): # wrap in list so that we can",
"timestamp = line.split()[1] continue if line.lstrip().startswith('PID'): is_in_table = True continue",
"default 'timepoints' :param timestamp_start: start time in the same timestamp",
"digest. By default, the max number of commands is 16,",
"the number of unique commands to be bounded by max_n_commands.",
"# These commands are excluded when parsing the top output",
"cmd in all_commands if cmd.startswith(r_cmd)] if len(uniq_cmds) == 1: reduced_commands.remove(r_cmd)",
"the process should be skipped (excluded). It returns True if",
"collapse commands starting with the same words all_cmd_lengths = [len(cmd.split())",
"v in self.cpus[command]]) def total_mem_per_command(self, command): return sum([v for v",
"key=lambda x: self.total_mem_per_command(x), reverse=True) elif by == 'alphabetical': self.commands =",
"prefixes of commands so that the total number is within",
"before wrapping it in double quotes. Otherwise we # will",
"top commands :: echo -n 'Timestamp: '; date +%F-%H:%M:%S top",
"= sorted(self.commands) @classmethod def as_minutes(cls, timestamp, timestamp_start): \"\"\"timestamp as minutes",
"elements in the loop uniq_cmds = [cmd for cmd in",
"return [self.as_minutes(t, timestamp_start) for t in self.timestamps] @classmethod def as_datetime(cls,",
"for v in self.mems[command]]) def sort_commands(self, by='cpu'): \"\"\"sort self.commands by",
"def ghi', 2) ==> 'ab' \"\"\" letters = list(string) return",
"0 40676 3828 3144 R 6.2 0.0 0:00.01 top -b",
"to the first words but still too many commands -",
"0, minutes start with 0, if 1, minutes are 1-based",
"sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True) elif by == 'mem': self.commands",
"the most cpu) \"\"\" # Reinitializat these so that you",
"through API ``API().log(job_id=<job_id>, top=True)``. :: Timestamp: 2020-12-18-18:55:37 top - 18:55:37",
"the maximum number of words that makes the number of",
"'/usr/bin/dockerd', 'cron', 'docker-untar', 'containerd', 'goofys-latest', 'cwltool', '/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval',",
"timestamp_start): \"\"\"timestamp as minutes since timestamp_start. :param timestamp: given timestamp",
"if not self.should_skip_process(process): self.processes[timestamp].append(Process(line)) def digest(self, max_n_commands=16, sort_by='alphabetical'): \"\"\"Fills in",
"sy, 0.0 ni, 93.2 id, 0.0 wa, 0.0 hi, 0.0",
"interval and can be used as a generic class, but",
"the output of a regular top commands above run at",
"# find a matching collapsed command (i.e. command prefix) and",
"# header # we have to escape any double quotes",
"max_n_commands. If using only the first word is not sufficient,",
"'/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval', '/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim', '/usr/bin/python3 /bin/unattended-upgrade', '/usr/bin/python3",
"commands - start splitting characters then if n_commands > max_n_commands:",
"in self.processes[timestamp]])) if len(all_commands) <= max_n_commands: # no need to",
"not specified, the first timestamp in the top commands will",
"like below, or a series of these. The initialization works",
"be filled with 0. If not specified, the last timestamp",
"2- or 3-word prefixes work. exclude_list = ['top', 'docker', 'dockerd',",
"by max_n_commands. If using only the first word is not",
"above example, 3 will be the return value. \"\"\" dt",
"no more than 1 per minute). This top output can",
"no need to collapse return list(all_commands) # decide the number",
"commands to be bounded by max_n_commands. If using only the",
"of words per command max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths)",
"end time. Time points with no top records will be",
"to the characters of the first word. If that's still",
"characters then if n_commands > max_n_commands: all_cmd_lengths = [len(cmd.split()[0]) for",
"a list of minutes since timestamp_start :param timestamp_start: timestamp in",
"attribute, which is a raw parsed result organized by time",
"added before each top command. To parse top output content,",
"self.cpus = dict() self.mems = dict() self.parse_contents(contents) def parse_contents(self, contents):",
"collapsed_len = max_cmd_length - 1 while(n_commands > max_n_commands and collapsed_len",
"create an object. This will create processes attribute, which is",
"top.digest() To write a csv / tsv file organized by",
"are excluded when parsing the top output # Currently only",
"-d -F out.hic 17919 ubuntu 20 0 40676 3828 3144",
"in self.processes: self.processes[timestamp] = [] process = Process(line) if not",
"about 1-minute intervals, which is performed by awsf3 on an",
"+= 1 else: fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in self.commands]))",
"fo: # header # we have to escape any double",
"format (e.g. 01:20:45) In the above example, 3 will be",
"with open(csv_file, 'w') as fo: # header # we have",
"NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 712",
"(rows) for process in self.processes[timestamp]: # find a matching collapsed",
"still not sufficient, collapse all of them into a single",
"back to the original command. \"\"\" all_commands = set() for",
"%MEM TIME+ COMMAND 712 root 20 0 36.464g 8.223g 19572",
"csv / tsv file organized by both timestamps (rows) and",
"(e.g. 01:23:45) :param timestamp_start: start timestamp in the same format",
"first_words('abc def ghi', 2) ==> 'abc def' \"\"\" words =",
"1 else: fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in self.commands])) #",
"list(string) return ''.join(letters[0:min(n_letters, len(letters))]) def as_dict(self): return self.__dict__ class Process(object):",
"so do each case separately if dt > dt_start: return",
"bracket (e.g. [perl]) are also excluded. \"\"\" first_word = Top.first_words(process.command,",
"over time. An example input content looks like below, or",
"1 n_commands = len(all_commands) while(n_commands > max_n_commands and collapsed_len >",
"of the first word max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths)",
"> max_n_commands: return ['all_commands'] else: # extend reduced commands that",
"then if n_commands > max_n_commands: all_cmd_lengths = [len(cmd.split()[0]) for cmd",
"self.mems = dict() # First fill in commands from commands",
"a collapsed set of commands that consists of prefixes of",
"self.as_minutes(timestamp_end, timestamp_start) else: # default when timestamps is not available",
"wrap_in_double_quotes(string): \"\"\"wrap a given string with double quotes (e.g. haha",
"are not supported by timedelta, so do each case separately",
"case separately if dt > dt_start: return round((dt - dt_start).seconds",
"line.split()[1] continue if line.lstrip().startswith('PID'): is_in_table = True continue if not",
"contents by commands, run digest. By default, the max number",
"\"\"\" self.processes = dict() self.timestamps = [] self.commands = []",
"mem (in reverser order) (e.g. the first command consumed the",
"= [] self.cpus = dict() self.mems = dict() self.parse_contents(contents) def",
"minutes since timestamp_start :param timestamp_start: timestamp in the same format",
"sum([v for v in self.cpus[command]]) def total_mem_per_command(self, command): return sum([v",
"a series of these. The initialization works at any time",
"file organized by both timestamps (rows) and commands (columns), use",
"= [] self.cpus = dict() self.mems = dict() # First",
"command not in self.cpus: self.cpus[command] = [0] * self.nTimepoints self.mems[command]",
"the original command. \"\"\" all_commands = set() for timestamp in",
"is performed by awsf3 on an AWSEM instance through cron",
"return True elif first_two_words in self.exclude_list: return True elif first_three_words",
"max_n_commands. First decide the number of words from the beginning",
"cmd in self.commands])) fo.write('\\n') # contents # skip timepoints earlier",
"that we can remove elements in the loop uniq_cmds =",
"in self.commands])) i += 1 else: fo.write(delimiter.join([str(clock_shifted)] + ['0' for",
"words but still too many commands - start splitting characters",
"> max_n_commands: all_cmd_lengths = [len(cmd.split()[0]) for cmd in all_commands] #",
"= len(all_commands) while(n_commands > max_n_commands and collapsed_len > 1): reduced_commands",
"all_commands if cmd.startswith(r_cmd)] if len(uniq_cmds) == 1: reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0]) return",
"sorted alphabetically, but optionally can be sorted by total cpus",
"min(all_cmd_lengths) collapsed_len = max_cmd_length - 1 while(n_commands > max_n_commands and",
"for t in self.timestamps] @classmethod def as_datetime(cls, timestamp): return datetime.datetime.strptime(timestamp,",
"first_two_words in self.exclude_list: return True elif first_three_words in self.exclude_list: return",
"records will be filled with 0. If not specified, the",
"can be used as a generic class, but the class",
"-j <job_id> -t`` or through API ``API().log(job_id=<job_id>, top=True)``. :: Timestamp:",
"timestamps (columns) self.timestamps.append(timestamp) # commands (rows) for process in self.processes[timestamp]:",
"max_n_commands=16, sort_by='alphabetical'): \"\"\"Fills in timestamps, commands, cpus and mems attributes",
"Tasks: 7 total, 1 running, 6 sleeping, 0 stopped, 0",
"1 running, 6 sleeping, 0 stopped, 0 zombie %Cpu(s): 6.6",
"/ tsv file organized by both timestamps (rows) and commands",
"= dict() # First fill in commands from commands in",
"timestamp is added before each top command. To parse top",
"commands so that the total number is within max_n_commands. First",
"each top command. To parse top output content, simply create",
"-i -w 10000 The default timestamp from top output does",
"name for the timepoint column (1st column). default 'timepoints' :param",
"@staticmethod def convert_command_to_collapsed_command(cmd, collapsed_commands): if collapsed_commands == 'all_commands': # collapsed",
"base: default 0. If 0, minutes start with 0, if",
"cmd in self.commands])) i += 1 else: fo.write(delimiter.join([str(clock_shifted)] + ['0'",
"number of commands is 16, and if there are more",
"if the process should be skipped (excluded). It returns True",
"dict() self.mems = dict() # First fill in commands from",
"in self.cpus[command]]) def total_mem_per_command(self, command): return sum([v for v in",
"time. Time points with no top records will be filled",
"that's still not sufficient, collapse all of them into a",
"single command ('all_commands') After the collapse, commands that are unique",
"number of characters of the first word max_cmd_length = max(all_cmd_lengths)",
"['all_commands'] else: # extend reduced commands that don't need to",
"for pr in self.processes[timestamp]])) if len(all_commands) <= max_n_commands: # no",
"prinfo_as_list = top_line.lstrip().split() self.pid = prinfo_as_list[0] self.user = prinfo_as_list[1] self.cpu",
"a single command ('all_commands') After the collapse, commands that are",
"The default timestamp from top output does not contain dates,",
"the same format (e.g. 01:23:45) \"\"\" return [self.as_minutes(t, timestamp_start) for",
"ni, 93.2 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0",
"top_line.lstrip().split() self.pid = prinfo_as_list[0] self.user = prinfo_as_list[1] self.cpu = float(prinfo_as_list[8])",
"excluded when parsing the top output # Currently only 1-,",
"for line in contents.splitlines(): if line.startswith('Timestamp:'): timestamp = line.split()[1] continue",
"awsf3, cwltool, etc. the list to be excluded is in",
"dict() self.timestamps = [] self.commands = [] self.cpus = dict()",
"cmd in all_commands] # number of words per command max_cmd_length",
"e.g. first_words('abc def ghi', 2) ==> 'abc def' \"\"\" words",
"- start splitting characters then if n_commands > max_n_commands: all_cmd_lengths",
"string with double quotes (e.g. haha -> \"haha\") \"\"\" return",
"= dict() self.mems = dict() self.parse_contents(contents) def parse_contents(self, contents): is_in_table",
"designed for the output of a regular top commands above",
":param timestamp_start: start time in the same timestamp format (e.g.",
"objects as a value. It also creates empty attributes timestamps,",
"continue if line.lstrip().startswith('PID'): is_in_table = True continue if not line",
"line.isspace(): is_in_table = False if is_in_table: if timestamp not in",
"csv file with commands as columns :param metric: 'cpu' or",
"0.0 st KiB Mem : 12971188+total, 10379019+free, 20613644 used, 5308056",
"timestamps_as_minutes[i] >= 0: break for clock in range(0, last_minute +",
"so that we can remove elements in the loop uniq_cmds",
"def sort_commands(self, by='cpu'): \"\"\"sort self.commands by total cpu (default) or",
"==> 'ab' \"\"\" letters = list(string) return ''.join(letters[0:min(n_letters, len(letters))]) def",
"timestamps_as_minutes(self, timestamp_start): \"\"\"convert self.timestamps to a list of minutes since",
"= [cmd for cmd in all_commands if cmd.startswith(r_cmd)] if len(uniq_cmds)",
"too many commands - start splitting characters then if n_commands",
":func: write_to_csv. :: top.write_to_csv(...) \"\"\" # assume this format for",
"1: reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0]) return reduced_commands def write_to_csv(self, csv_file, metric='cpu', delimiter=',',",
"a csv / tsv file organized by both timestamps (rows)",
"timestamps is not available (empty object) timestamps_as_minutes = range(0, 5)",
"for process in self.processes[timestamp]: # find a matching collapsed command",
"itself is excluded, as well as docker, awsf3, cwltool, etc.",
"can screw up multi-day processes which is common for bioinformatics",
":param timestamp: given timestamp in the same format (e.g. 01:23:45)",
"the top commands will be used. :param base: default 0.",
"by awsf3 on an AWSEM instance through cron jobs. (some",
"cpus or total mem (in reverser order) (e.g. the first",
"most cpu) \"\"\" # Reinitializat these so that you get",
"len(uniq_cmds) == 1: reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0]) return reduced_commands def write_to_csv(self, csv_file,",
"collapsed_len)) n_commands = len(reduced_commands) collapsed_len -= 1 if n_commands >",
"len(reduced_commands) collapsed_len -= 1 # went down to the first",
"get_collapsed_commands(self, max_n_commands): \"\"\"If the number of commands exceeds max_n_commands, return",
"from commands in processes (and collapse if needed.) self.commands =",
"timestamp_start: start timestamp in the same format (e.g. 01:20:45) In",
"min_cmd_length = min(all_cmd_lengths) collapsed_len = max_cmd_length - 1 n_commands =",
"commands :: echo -n 'Timestamp: '; date +%F-%H:%M:%S top -b",
"column counts when creating the metrics report. fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('\"',",
"= prinfo_as_list[0] self.user = prinfo_as_list[1] self.cpu = float(prinfo_as_list[8]) self.mem =",
"that don't need to be reduced for r_cmd in list(reduced_commands):",
"a list of Process class objects as a value. It",
"+ [str(metric_array[cmd][i]) for cmd in self.commands])) i += 1 else:",
"alphabetically (by='alphabetical')\"\"\" if by == 'cpu': self.commands = sorted(self.commands, key=lambda",
"\"\"\"timestamp as minutes since timestamp_start. :param timestamp: given timestamp in",
"commands is 16, and if there are more than 16",
"class Top(object): \"\"\"class TopSeries stores the information of a series",
"creates empty attributes timestamps, commands, cpus and mems which can",
"can be sorted by total cpus or total mem (in",
"collapsed to prefix all_prefixes = [_ for _ in collapsed_commands",
"cpus and mems from processes, matching collapsed commands. self.nTimepoints =",
"for cmd in self.commands])) i += 1 else: fo.write(delimiter.join([str(clock_shifted)] +",
"5) last_minute = 5 with open(csv_file, 'w') as fo: #",
"if the input process should be skipped. e.g. the top",
"1, minutes are 1-based (shifted by 1). \"\"\" metric_array =",
"intervals, which is performed by awsf3 on an AWSEM instance",
"= [] process = Process(line) if not self.should_skip_process(process): self.processes[timestamp].append(Process(line)) def",
"addNorm -w 1000 -d -F out.hic 17919 ubuntu 20 0",
"processes (and collapse if needed.) self.commands = self.get_collapsed_commands(max_n_commands) # Fill",
"self.exclude_list: return True elif first_three_words in self.exclude_list: return True if",
"have to escape any double quotes that are present in",
"collapsed_len -= 1 if n_commands > max_n_commands: return ['all_commands'] else:",
"\"\"\"wrap a given string with double quotes (e.g. haha ->",
"PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND",
"don't need to be reduced for r_cmd in list(reduced_commands): #",
"processes, matching collapsed commands. self.nTimepoints = len(self.processes) timestamp_ind = 0",
"= cls.as_datetime(timestamp) dt_start = cls.as_datetime(timestamp_start) # negative numbers are not",
"+ ['0' for cmd in self.commands])) # add 0 for",
"command. To parse top output content, simply create an object.",
"for cmd in all_commands] # number of characters of the",
"(e.g. 01:20:45) In the above example, 3 will be the",
"as_dict(self): return self.__dict__ class Process(object): def __init__(self, top_line): prinfo_as_list =",
"if len(uniq_cmds) == 1: reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0]) return reduced_commands def write_to_csv(self,",
"-jar juicer_tools.jar addNorm -w 1000 -d -F out.hic 17919 ubuntu",
"of the commands to collapse commands that start with the",
"string e.g. first_characters('abc def ghi', 2) ==> 'ab' \"\"\" letters",
"log -j <job_id> -t`` or through API ``API().log(job_id=<job_id>, top=True)``. ::",
"timestamp format (e.g. 01:23:45), time stamps will be converted to",
"getattr(self, metric + 's') if self.timestamps: if not timestamp_start: timestamp_start",
"def should_skip_process(self, process): \"\"\"A predicate function to check if the",
"contents): is_in_table = False for line in contents.splitlines(): if line.startswith('Timestamp:'):",
"@staticmethod def first_words(string, n_words): \"\"\"returns first n words of a",
"01:23:45), time stamps will be converted to minutes since start",
"command (i.e. command prefix) and use that as command. command",
"first command consumed the most cpu) \"\"\" # Reinitializat these",
"so that you get the same results if you run",
"= set() for cmd in all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len)) n_commands =",
"'timepoints' :param timestamp_start: start time in the same timestamp format",
"return self.__dict__ class Process(object): def __init__(self, top_line): prinfo_as_list = top_line.lstrip().split()",
"pipelines. So, an extra timestamp is added before each top",
"consumed the most cpu) \"\"\" # Reinitializat these so that",
"original command. \"\"\" all_commands = set() for timestamp in self.processes:",
"object) timestamps_as_minutes = range(0, 5) last_minute = 5 with open(csv_file,",
"def as_datetime(cls, timestamp): return datetime.datetime.strptime(timestamp, cls.timestamp_format) @staticmethod def wrap_in_double_quotes(string): \"\"\"wrap",
"that are present in the cmd, before wrapping it in",
":: Timestamp: 2020-12-18-18:55:37 top - 18:55:37 up 4 days, 2:37,",
"reduced_commands.add(uniq_cmds[0]) return reduced_commands def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints', timestamp_start=None,",
"# assume this format for timestamp timestamp_format = '%Y-%m-%d-%H:%M:%S' #",
"attribute. :param max_n_commands: When the number of unique commands exceeds",
"output content and creates processes which is a dictionary with",
"True return False @staticmethod def convert_command_to_collapsed_command(cmd, collapsed_commands): if collapsed_commands ==",
"is_in_table = True continue if not line or line.isspace(): is_in_table",
"awsf3 on an AWSEM instance through cron jobs. (some can",
"0. If not specified, the first timestamp in the top",
"for i in range(0, len(timestamps_as_minutes)): if timestamps_as_minutes[i] >= 0: break",
"= cls.as_datetime(timestamp_start) # negative numbers are not supported by timedelta,",
"should be no more than 1 per minute). This top",
"cls.timestamp_format) @staticmethod def wrap_in_double_quotes(string): \"\"\"wrap a given string with double",
"is a raw parsed result organized by time stamps. ::",
"cpus and mems which can be filled through method :func:",
"processes attribute, which is a raw parsed result organized by",
"timestamp in the top commands will be used. :param base:",
"def digest(self, max_n_commands=16, sort_by='alphabetical'): \"\"\"Fills in timestamps, commands, cpus and",
"6.6 125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w 1000",
"command prefix) and use that as command. command = Top.convert_command_to_collapsed_command(process.command,",
"return ['all_commands'] else: # extend reduced commands that don't need",
"write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints', timestamp_start=None, timestamp_end=None, base=0): \"\"\"write metrics",
"('all_commands') After the collapse, commands that are unique to a",
"we have to escape any double quotes that are present",
"cwltool, etc. the list to be excluded is in self.exclude_list.",
"1 # went down to the first words but still",
"collapsed_commands if cmd.startswith(_)] longest_prefix = sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0]",
"if timestamp not in self.processes: self.processes[timestamp] = [] process =",
"01:23:45) :param timestamp_start: start timestamp in the same format (e.g.",
"[str(metric_array[cmd][i]) for cmd in self.commands])) i += 1 else: fo.write(delimiter.join([str(clock_shifted)]",
"is designed for the output of a regular top commands",
":param delimiter: default ',' :param colname_for_timestamps: colunm name for the",
"# extend reduced commands that don't need to be reduced",
"By default, the max number of commands is 16, and",
"max_n_commands and collapsed_len > 1): reduced_commands = set() for cmd",
"commands wrapped in bracket (e.g. [perl]) are also excluded. \"\"\"",
"16, and if there are more than 16 unique commands,",
"# sort commands according to total cpu self.sort_commands(by=sort_by) def get_collapsed_commands(self,",
"'all_commands' elif cmd in collapsed_commands: # not collapsed return cmd",
"= Top.first_words(process.command, 2) first_three_words = Top.first_words(process.command, 3) if first_word in",
"range(0, len(timestamps_as_minutes)): if timestamps_as_minutes[i] >= 0: break for clock in",
"0.0 ni, 93.2 id, 0.0 wa, 0.0 hi, 0.0 si,",
"# no need to collapse return list(all_commands) # decide the",
"= string.split() return ' '.join(words[0:min(n_words, len(words))]) @staticmethod def first_characters(string, n_letters):",
"bioinformatics pipelines. So, an extra timestamp is added before each",
"a series of top commands :: echo -n 'Timestamp: ';",
"Kernel threads (single-word commands wrapped in bracket (e.g. [perl]) are",
"Process(line) if not self.should_skip_process(process): self.processes[timestamp].append(Process(line)) def digest(self, max_n_commands=16, sort_by='alphabetical'): \"\"\"Fills",
"cmd in all_commands] # number of characters of the first",
"format (e.g. 01:23:45), time stamps will be converted to minutes",
"-round((dt_start - dt).seconds / 60) def timestamps_as_minutes(self, timestamp_start): \"\"\"convert self.timestamps",
"'cwltool', '/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval', '/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim', '/usr/bin/python3 /bin/unattended-upgrade',",
"= self.timestamps[-1] timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start) last_minute = self.as_minutes(timestamp_end, timestamp_start) else:",
":sort_by: alphabetical|cpu|mem The commands are by default sorted alphabetically, but",
"jobs. (some can be skipped but there should be no",
"= sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0] return longest_prefix def total_cpu_per_command(self,",
"0, if 1, minutes are 1-based (shifted by 1). \"\"\"",
"be no more than 1 per minute). This top output",
"output can be obtained through ``tibanna log -j <job_id> -t``",
"parse_contents(self, contents): is_in_table = False for line in contents.splitlines(): if",
"cmd in all_commands: reduced_commands.add(Top.first_words(cmd, collapsed_len)) n_commands = len(reduced_commands) collapsed_len -=",
"get the same results if you run it twice self.timestamps",
"len(all_commands) <= max_n_commands: # no need to collapse return list(all_commands)",
"for cmd in self.commands])) # add 0 for timepoints not",
":func: digest. \"\"\" self.processes = dict() self.timestamps = [] self.commands",
"digest. \"\"\" self.processes = dict() self.timestamps = [] self.commands =",
"be bounded by max_n_commands. If using only the first word",
"in the same format (e.g. 01:20:45) In the above example,",
"S %CPU %MEM TIME+ COMMAND 712 root 20 0 36.464g",
"- 1 n_commands = len(all_commands) while(n_commands > max_n_commands and collapsed_len",
"not reported fo.write('\\n') def should_skip_process(self, process): \"\"\"A predicate function to",
"commands, they will be collapsed into prefixes. :: top.digest() To",
"-b -n1 [-i] [-c] over short intervals to monitor the",
"self.nTimepoints self.cpus[command][timestamp_ind] += process.cpu self.mems[command][timestamp_ind] += process.mem timestamp_ind += 1",
"max_cmd_length - 1 n_commands = len(all_commands) while(n_commands > max_n_commands and",
"the commands # to collapse commands starting with the same",
"When the number of unique commands exceeds this value, they",
"commands exceeds max_n_commands, return a collapsed set of commands that",
"need to collapse return list(all_commands) # decide the number of",
"'cpu' or 'mem' :param delimiter: default ',' :param colname_for_timestamps: colunm",
"it twice self.timestamps = [] self.commands = [] self.cpus =",
"'/usr/bin/python3 /bin/unattended-upgrade', '/usr/bin/python3 /usr/local/bin/awsf3', '/usr/bin/python3 /usr/local/bin/aws s3', 'java -jar /usr/local/bin/cromwell.jar',",
"'all_commands': # collapsed to one command return 'all_commands' elif cmd",
"self.exclude_list: return True elif first_two_words in self.exclude_list: return True elif",
"# Currently only 1-, 2- or 3-word prefixes work. exclude_list",
"string e.g. first_words('abc def ghi', 2) ==> 'abc def' \"\"\"",
"in sorted(self.processes): # sorted timestamps (columns) self.timestamps.append(timestamp) # commands (rows)",
"for bioinformatics pipelines. So, an extra timestamp is added before",
"1) first_two_words = Top.first_words(process.command, 2) first_three_words = Top.first_words(process.command, 3) if",
"reverse=True) elif by == 'alphabetical': self.commands = sorted(self.commands) @classmethod def",
"3-word prefixes work. exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron',",
"0. If not specified, the last timestamp in the top",
"# contents # skip timepoints earlier than timestamp_start for i",
"digest(self, max_n_commands=16, sort_by='alphabetical'): \"\"\"Fills in timestamps, commands, cpus and mems",
"for cmd in all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len)) n_commands = len(reduced_commands) collapsed_len",
"is added before each top command. To parse top output",
"timestamp in self.processes: all_commands.update(set([pr.command for pr in self.processes[timestamp]])) if len(all_commands)",
"\"\"\"class TopSeries stores the information of a series of top",
"self.commands = self.get_collapsed_commands(max_n_commands) # Fill in timestamps, cpus and mems",
"be excluded is in self.exclude_list. It compares either first word",
">= 0: break for clock in range(0, last_minute + 1):",
"0 users, load average: 5.59, 5.28, 5.76 Tasks: 7 total,",
"first_three_words in self.exclude_list: return True if first_word.startswith('[') and first_word.endswith(']'): return",
"wrapping it in double quotes. Otherwise we # will get",
"dictionary with timestamps as keys and a list of Process",
"or a series of these. The initialization works at any",
"for timestamp timestamp_format = '%Y-%m-%d-%H:%M:%S' # These commands are excluded",
"commands that are unique to a collapsed prefix are extended",
"commands will be used. :param timestamp_end: end time in the",
"# went down to the first words but still too",
"set of processes over time. An example input content looks",
"be skipped. e.g. the top command itself is excluded, as",
"PID USER PR NI VIRT RES SHR S %CPU %MEM",
"of these. The initialization works at any time interval and",
"is a dictionary with timestamps as keys and a list",
"commands, cpus and mems which can be filled through method",
"top=True)``. :: Timestamp: 2020-12-18-18:55:37 top - 18:55:37 up 4 days,",
"x: len(x), reverse=True)[0] return longest_prefix def total_cpu_per_command(self, command): return sum([v",
"than 1 per minute). This top output can be obtained",
"of the commands # to collapse commands starting with the",
"command ('all_commands') After the collapse, commands that are unique to",
"<= max_n_commands: # no need to collapse return list(all_commands) #",
"sorted by total cpus or total mem (in reverser order)",
"a given string with double quotes (e.g. haha -> \"haha\")",
"end time in the same timestamp format (e.g. 01:23:45), The",
"the first word max_cmd_length = max(all_cmd_lengths) min_cmd_length = min(all_cmd_lengths) collapsed_len",
"= self.as_minutes(timestamp_end, timestamp_start) else: # default when timestamps is not",
"== 1: reduced_commands.remove(r_cmd) reduced_commands.add(uniq_cmds[0]) return reduced_commands def write_to_csv(self, csv_file, metric='cpu',",
"-jar /usr/local/bin/cromwell.jar', 'java -jar /usr/local/bin/cromwell-35.jar'] def __init__(self, contents): \"\"\"initialization parsed",
"``API().log(job_id=<job_id>, top=True)``. :: Timestamp: 2020-12-18-18:55:37 top - 18:55:37 up 4",
"which can be filled through method :func: digest. \"\"\" self.processes",
"self.cpus: self.cpus[command] = [0] * self.nTimepoints self.mems[command] = [0] *",
"the timepoint column (1st column). default 'timepoints' :param timestamp_start: start",
"down to the characters of the first word. If that's",
"top - 18:55:37 up 4 days, 2:37, 0 users, load",
"(e.g. haha -> \"haha\") \"\"\" return '\\\"' + string +",
"4 days, 2:37, 0 users, load average: 5.59, 5.28, 5.76",
"the number of words from the beginning of the commands",
"break for clock in range(0, last_minute + 1): clock_shifted =",
"'java -jar /usr/local/bin/cromwell-35.jar'] def __init__(self, contents): \"\"\"initialization parsed top output",
"the input process should be skipped. e.g. the top command",
"or alphabetically (by='alphabetical')\"\"\" if by == 'cpu': self.commands = sorted(self.commands,",
"same words all_cmd_lengths = [len(cmd.split()) for cmd in all_commands] #",
"but still too many commands - start splitting characters then",
"(1st column). default 'timepoints' :param timestamp_start: start time in the",
"18:55:37 up 4 days, 2:37, 0 users, load average: 5.59,",
"process.mem timestamp_ind += 1 # sort commands according to total",
"if len(all_commands) <= max_n_commands: # no need to collapse return",
"output # Currently only 1-, 2- or 3-word prefixes work.",
"'\\\"' + string + '\\\"' @staticmethod def first_words(string, n_words): \"\"\"returns",
"pr in self.processes[timestamp]])) if len(all_commands) <= max_n_commands: # no need",
"not collapsed return cmd else: # collapsed to prefix all_prefixes",
"in range(0, len(timestamps_as_minutes)): if timestamps_as_minutes[i] >= 0: break for clock",
"= Top.first_words(process.command, 1) first_two_words = Top.first_words(process.command, 2) first_three_words = Top.first_words(process.command,",
"of characters of the first word max_cmd_length = max(all_cmd_lengths) min_cmd_length",
"\"\"\"Fills in timestamps, commands, cpus and mems attributes from processes",
"default 0. If 0, minutes start with 0, if 1,",
"total, 1 running, 6 sleeping, 0 stopped, 0 zombie %Cpu(s):",
"2:37, 0 users, load average: 5.59, 5.28, 5.76 Tasks: 7",
"the cmd, before wrapping it in double quotes. Otherwise we",
":param timestamp_end: end time in the same timestamp format (e.g.",
"in bracket (e.g. [perl]) are also excluded. \"\"\" first_word =",
"Timestamp: 2020-12-18-18:55:37 top - 18:55:37 up 4 days, 2:37, 0",
"0 for timepoints not reported fo.write('\\n') def should_skip_process(self, process): \"\"\"A",
"min(all_cmd_lengths) collapsed_len = max_cmd_length - 1 n_commands = len(all_commands) while(n_commands",
"can be filled through method :func: digest. \"\"\" self.processes =",
"intervals to monitor the same set of processes over time.",
"top commands will be used. :param timestamp_end: end time in",
"range(0, 5) last_minute = 5 with open(csv_file, 'w') as fo:",
"# First fill in commands from commands in processes (and",
"words = string.split() return ' '.join(words[0:min(n_words, len(words))]) @staticmethod def first_characters(string,",
"is_in_table: if timestamp not in self.processes: self.processes[timestamp] = [] process",
"value, they are collapsed into unique prefixes. :sort_by: alphabetical|cpu|mem The",
"of unique commands exceeds this value, they are collapsed into",
"timestamp_start: timestamp_start = self.timestamps[0] if not timestamp_end: timestamp_end = self.timestamps[-1]",
"first_characters(string, n_letters): \"\"\"returns first n letters of a string e.g.",
"looks like below, or a series of these. The initialization",
"prinfo_as_list[0] self.user = prinfo_as_list[1] self.cpu = float(prinfo_as_list[8]) self.mem = float(prinfo_as_list[9])",
"the same timestamp format (e.g. 01:23:45), time stamps will be",
"a string e.g. first_words('abc def ghi', 2) ==> 'abc def'",
"self.nTimepoints self.mems[command] = [0] * self.nTimepoints self.cpus[command][timestamp_ind] += process.cpu self.mems[command][timestamp_ind]",
"mem in reverse order, or alphabetically (by='alphabetical')\"\"\" if by ==",
"which is performed by awsf3 on an AWSEM instance through",
"unique commands exceeds this value, they are collapsed into unique",
"self.cpus = dict() self.mems = dict() # First fill in",
"works at any time interval and can be used as",
"collapsed commands. self.nTimepoints = len(self.processes) timestamp_ind = 0 for timestamp",
"> 1): reduced_commands = set() for cmd in all_commands: reduced_commands.add(Top.first_characters(cmd.split()[0],",
"returns True if the input process should be skipped. e.g.",
":: echo -n 'Timestamp: '; date +%F-%H:%M:%S top -b -n1",
"first n words of a string e.g. first_words('abc def ghi',",
"open(csv_file, 'w') as fo: # header # we have to",
"line or line.isspace(): is_in_table = False if is_in_table: if timestamp",
"double quotes (e.g. haha -> \"haha\") \"\"\" return '\\\"' +",
"find the maximum number of words that makes the number",
"total mem (in reverser order) (e.g. the first command consumed",
"= clock + base if i < len(timestamps_as_minutes) and timestamps_as_minutes[i]",
"process): \"\"\"A predicate function to check if the process should",
"top command itself is excluded, as well as docker, awsf3,",
"timestamp not in self.processes: self.processes[timestamp] = [] process = Process(line)",
"while(n_commands > max_n_commands and collapsed_len > 1): reduced_commands = set()",
"are also excluded. \"\"\" first_word = Top.first_words(process.command, 1) first_two_words =",
"timestamp_end: timestamp_end = self.timestamps[-1] timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start) last_minute = self.as_minutes(timestamp_end,",
"(i.e. command prefix) and use that as command. command =",
"the characters of the first word. If that's still not",
"should be skipped. e.g. the top command itself is excluded,",
"as keys and a list of Process class objects as",
"= '%Y-%m-%d-%H:%M:%S' # These commands are excluded when parsing the",
"class is designed for the output of a regular top",
"in self.exclude_list: return True elif first_two_words in self.exclude_list: return True",
"\"\"\" dt = cls.as_datetime(timestamp) dt_start = cls.as_datetime(timestamp_start) # negative numbers",
"1-minute intervals, which is performed by awsf3 on an AWSEM",
"self.user = prinfo_as_list[1] self.cpu = float(prinfo_as_list[8]) self.mem = float(prinfo_as_list[9]) self.command",
"at any time interval and can be used as a",
"column). default 'timepoints' :param timestamp_start: start time in the same",
"['0' for cmd in self.commands])) # add 0 for timepoints",
"in self.exclude_list. It compares either first word or first two",
"sorted(self.commands) @classmethod def as_minutes(cls, timestamp, timestamp_start): \"\"\"timestamp as minutes since",
"list of minutes since timestamp_start :param timestamp_start: timestamp in the",
"(e.g. 01:23:45), time stamps will be converted to minutes since",
"are by default sorted alphabetically, but optionally can be sorted",
"processes which is common for bioinformatics pipelines. So, an extra"
] |
[
"coding: utf8 -*- # Copyright (C) PyZMQ Developers # Distributed",
"msg.routing_id is not None server.send(b'reply', routing_id=msg.routing_id) reply = self.recv(client) assert",
"None server.send(b'reply', routing_id=msg.routing_id) reply = self.recv(client) assert reply == b'reply'",
"zmq from zmq.tests import BaseZMQTestCase, skip_pypy class TestDraftSockets(BaseZMQTestCase): def setUp(self):",
"str(i).encode('ascii') sent.add(msg) radio.send(msg, group=group) try: recvd = dish.recv() except zmq.Again:",
"i in range(10): msg = str(i).encode('ascii') sent.add(msg) radio.send(msg, group=group) try:",
"0 received = set() sent = set() for i in",
"1 # assert that we got *something* assert len(received.intersection(sent)) >=",
"self.create_bound_pair(zmq.DISH, zmq.RADIO) dish.rcvtimeo = 250 group = 'mygroup' dish.join(group) received_count",
"BaseZMQTestCase, skip_pypy class TestDraftSockets(BaseZMQTestCase): def setUp(self): if not zmq.DRAFT_API: raise",
"== b'reply' def test_radio_dish(self): dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO) dish.rcvtimeo",
"= str(i).encode('ascii') sent.add(msg) radio.send(msg, group=group) try: recvd = dish.recv() except",
"for i in range(10): msg = str(i).encode('ascii') sent.add(msg) radio.send(msg, group=group)",
"= set() for i in range(10): msg = str(i).encode('ascii') sent.add(msg)",
"msg = str(i).encode('ascii') sent.add(msg) radio.send(msg, group=group) try: recvd = dish.recv()",
"received = set() sent = set() for i in range(10):",
"Distributed under the terms of the Modified BSD License. import",
"server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER) client.send(b'request') msg = self.recv(server, copy=False) assert",
"PyZMQ Developers # Distributed under the terms of the Modified",
"test_radio_dish(self): dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO) dish.rcvtimeo = 250 group",
"client.send(b'request') msg = self.recv(server, copy=False) assert msg.routing_id is not None",
"# Copyright (C) PyZMQ Developers # Distributed under the terms",
"Developers # Distributed under the terms of the Modified BSD",
"zmq.Again: time.sleep(0.1) else: received.add(recvd) received_count += 1 # assert that",
"# Distributed under the terms of the Modified BSD License.",
"set() for i in range(10): msg = str(i).encode('ascii') sent.add(msg) radio.send(msg,",
"(C) PyZMQ Developers # Distributed under the terms of the",
"250 group = 'mygroup' dish.join(group) received_count = 0 received =",
"not zmq.DRAFT_API: raise pytest.skip(\"draft api unavailable\") super(TestDraftSockets, self).setUp() def test_client_server(self):",
"self).setUp() def test_client_server(self): client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER) client.send(b'request') msg",
"pytest import zmq from zmq.tests import BaseZMQTestCase, skip_pypy class TestDraftSockets(BaseZMQTestCase):",
"radio.send(msg, group=group) try: recvd = dish.recv() except zmq.Again: time.sleep(0.1) else:",
"zmq.SERVER) client.send(b'request') msg = self.recv(server, copy=False) assert msg.routing_id is not",
"Copyright (C) PyZMQ Developers # Distributed under the terms of",
"zmq.tests import BaseZMQTestCase, skip_pypy class TestDraftSockets(BaseZMQTestCase): def setUp(self): if not",
"assert reply == b'reply' def test_radio_dish(self): dish, radio = self.create_bound_pair(zmq.DISH,",
"try: recvd = dish.recv() except zmq.Again: time.sleep(0.1) else: received.add(recvd) received_count",
"unavailable\") super(TestDraftSockets, self).setUp() def test_client_server(self): client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER)",
"copy=False) assert msg.routing_id is not None server.send(b'reply', routing_id=msg.routing_id) reply =",
"= 'mygroup' dish.join(group) received_count = 0 received = set() sent",
"received.add(recvd) received_count += 1 # assert that we got *something*",
"self.recv(client) assert reply == b'reply' def test_radio_dish(self): dish, radio =",
"dish.recv() except zmq.Again: time.sleep(0.1) else: received.add(recvd) received_count += 1 #",
"the terms of the Modified BSD License. import os import",
"+= 1 # assert that we got *something* assert len(received.intersection(sent))",
"assert msg.routing_id is not None server.send(b'reply', routing_id=msg.routing_id) reply = self.recv(client)",
"'mygroup' dish.join(group) received_count = 0 received = set() sent =",
"License. import os import platform import time import pytest import",
"-*- # Copyright (C) PyZMQ Developers # Distributed under the",
"class TestDraftSockets(BaseZMQTestCase): def setUp(self): if not zmq.DRAFT_API: raise pytest.skip(\"draft api",
"reply == b'reply' def test_radio_dish(self): dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO)",
"-*- coding: utf8 -*- # Copyright (C) PyZMQ Developers #",
"received_count += 1 # assert that we got *something* assert",
"= self.create_bound_pair(zmq.DISH, zmq.RADIO) dish.rcvtimeo = 250 group = 'mygroup' dish.join(group)",
"server.send(b'reply', routing_id=msg.routing_id) reply = self.recv(client) assert reply == b'reply' def",
"import pytest import zmq from zmq.tests import BaseZMQTestCase, skip_pypy class",
"utf8 -*- # Copyright (C) PyZMQ Developers # Distributed under",
"platform import time import pytest import zmq from zmq.tests import",
"in range(10): msg = str(i).encode('ascii') sent.add(msg) radio.send(msg, group=group) try: recvd",
"routing_id=msg.routing_id) reply = self.recv(client) assert reply == b'reply' def test_radio_dish(self):",
"dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO) dish.rcvtimeo = 250 group =",
"= self.recv(client) assert reply == b'reply' def test_radio_dish(self): dish, radio",
"terms of the Modified BSD License. import os import platform",
"import os import platform import time import pytest import zmq",
"msg = self.recv(server, copy=False) assert msg.routing_id is not None server.send(b'reply',",
"except zmq.Again: time.sleep(0.1) else: received.add(recvd) received_count += 1 # assert",
"Modified BSD License. import os import platform import time import",
"if not zmq.DRAFT_API: raise pytest.skip(\"draft api unavailable\") super(TestDraftSockets, self).setUp() def",
"= self.create_bound_pair(zmq.CLIENT, zmq.SERVER) client.send(b'request') msg = self.recv(server, copy=False) assert msg.routing_id",
"not None server.send(b'reply', routing_id=msg.routing_id) reply = self.recv(client) assert reply ==",
"test_client_server(self): client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER) client.send(b'request') msg = self.recv(server,",
"pytest.skip(\"draft api unavailable\") super(TestDraftSockets, self).setUp() def test_client_server(self): client, server =",
"zmq.RADIO) dish.rcvtimeo = 250 group = 'mygroup' dish.join(group) received_count =",
"dish.join(group) received_count = 0 received = set() sent = set()",
"received_count = 0 received = set() sent = set() for",
"# assert that we got *something* assert len(received.intersection(sent)) >= 5",
"import zmq from zmq.tests import BaseZMQTestCase, skip_pypy class TestDraftSockets(BaseZMQTestCase): def",
"is not None server.send(b'reply', routing_id=msg.routing_id) reply = self.recv(client) assert reply",
"from zmq.tests import BaseZMQTestCase, skip_pypy class TestDraftSockets(BaseZMQTestCase): def setUp(self): if",
"client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER) client.send(b'request') msg = self.recv(server, copy=False)",
"set() sent = set() for i in range(10): msg =",
"TestDraftSockets(BaseZMQTestCase): def setUp(self): if not zmq.DRAFT_API: raise pytest.skip(\"draft api unavailable\")",
"# -*- coding: utf8 -*- # Copyright (C) PyZMQ Developers",
"BSD License. import os import platform import time import pytest",
"= dish.recv() except zmq.Again: time.sleep(0.1) else: received.add(recvd) received_count += 1",
"of the Modified BSD License. import os import platform import",
"api unavailable\") super(TestDraftSockets, self).setUp() def test_client_server(self): client, server = self.create_bound_pair(zmq.CLIENT,",
"= self.recv(server, copy=False) assert msg.routing_id is not None server.send(b'reply', routing_id=msg.routing_id)",
"the Modified BSD License. import os import platform import time",
"def test_radio_dish(self): dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO) dish.rcvtimeo = 250",
"zmq.DRAFT_API: raise pytest.skip(\"draft api unavailable\") super(TestDraftSockets, self).setUp() def test_client_server(self): client,",
"= 0 received = set() sent = set() for i",
"super(TestDraftSockets, self).setUp() def test_client_server(self): client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER) client.send(b'request')",
"skip_pypy class TestDraftSockets(BaseZMQTestCase): def setUp(self): if not zmq.DRAFT_API: raise pytest.skip(\"draft",
"time import pytest import zmq from zmq.tests import BaseZMQTestCase, skip_pypy",
"os import platform import time import pytest import zmq from",
"def setUp(self): if not zmq.DRAFT_API: raise pytest.skip(\"draft api unavailable\") super(TestDraftSockets,",
"dish.rcvtimeo = 250 group = 'mygroup' dish.join(group) received_count = 0",
"range(10): msg = str(i).encode('ascii') sent.add(msg) radio.send(msg, group=group) try: recvd =",
"raise pytest.skip(\"draft api unavailable\") super(TestDraftSockets, self).setUp() def test_client_server(self): client, server",
"radio = self.create_bound_pair(zmq.DISH, zmq.RADIO) dish.rcvtimeo = 250 group = 'mygroup'",
"b'reply' def test_radio_dish(self): dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO) dish.rcvtimeo =",
"group = 'mygroup' dish.join(group) received_count = 0 received = set()",
"time.sleep(0.1) else: received.add(recvd) received_count += 1 # assert that we",
"sent = set() for i in range(10): msg = str(i).encode('ascii')",
"group=group) try: recvd = dish.recv() except zmq.Again: time.sleep(0.1) else: received.add(recvd)",
"= 250 group = 'mygroup' dish.join(group) received_count = 0 received",
"self.recv(server, copy=False) assert msg.routing_id is not None server.send(b'reply', routing_id=msg.routing_id) reply",
"self.create_bound_pair(zmq.CLIENT, zmq.SERVER) client.send(b'request') msg = self.recv(server, copy=False) assert msg.routing_id is",
"under the terms of the Modified BSD License. import os",
"import platform import time import pytest import zmq from zmq.tests",
"import time import pytest import zmq from zmq.tests import BaseZMQTestCase,",
"def test_client_server(self): client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER) client.send(b'request') msg =",
"sent.add(msg) radio.send(msg, group=group) try: recvd = dish.recv() except zmq.Again: time.sleep(0.1)",
"= set() sent = set() for i in range(10): msg",
"reply = self.recv(client) assert reply == b'reply' def test_radio_dish(self): dish,",
"setUp(self): if not zmq.DRAFT_API: raise pytest.skip(\"draft api unavailable\") super(TestDraftSockets, self).setUp()",
"import BaseZMQTestCase, skip_pypy class TestDraftSockets(BaseZMQTestCase): def setUp(self): if not zmq.DRAFT_API:",
"recvd = dish.recv() except zmq.Again: time.sleep(0.1) else: received.add(recvd) received_count +=",
"else: received.add(recvd) received_count += 1 # assert that we got"
] |
[
"failure. \"\"\" def __init__(self, error_code, error, request): self.error_code = error_code",
"message indicating failure. \"\"\" def __init__(self, error_code, error, request): self.error_code",
"\"\"\" def __init__(self, error_code, error, request): self.error_code = error_code self.error",
"error self.request = request Exception.__init__(self, error) def __str__(self): return \"APIError:",
"__init__(self, error_code, error, request): self.error_code = error_code self.error = error",
"raise APIError if receiving json message indicating failure. \"\"\" def",
"self.request = request Exception.__init__(self, error) def __str__(self): return \"APIError: %s:",
"indicating failure. \"\"\" def __init__(self, error_code, error, request): self.error_code =",
"error, request): self.error_code = error_code self.error = error self.request =",
"def __init__(self, error_code, error, request): self.error_code = error_code self.error =",
"if receiving json message indicating failure. \"\"\" def __init__(self, error_code,",
"json message indicating failure. \"\"\" def __init__(self, error_code, error, request):",
"\"APIError: %s: %s, request: %s\" % ( self.error_code, self.error, self.request,",
"request Exception.__init__(self, error) def __str__(self): return \"APIError: %s: %s, request:",
"error_code self.error = error self.request = request Exception.__init__(self, error) def",
"request): self.error_code = error_code self.error = error self.request = request",
"Exception.__init__(self, error) def __str__(self): return \"APIError: %s: %s, request: %s\"",
"\"\"\" raise APIError if receiving json message indicating failure. \"\"\"",
"self.error = error self.request = request Exception.__init__(self, error) def __str__(self):",
"__str__(self): return \"APIError: %s: %s, request: %s\" % ( self.error_code,",
"return \"APIError: %s: %s, request: %s\" % ( self.error_code, self.error,",
"error_code, error, request): self.error_code = error_code self.error = error self.request",
"APIError(Exception): \"\"\" raise APIError if receiving json message indicating failure.",
"APIError if receiving json message indicating failure. \"\"\" def __init__(self,",
"= error_code self.error = error self.request = request Exception.__init__(self, error)",
"error) def __str__(self): return \"APIError: %s: %s, request: %s\" %",
"def __str__(self): return \"APIError: %s: %s, request: %s\" % (",
"class APIError(Exception): \"\"\" raise APIError if receiving json message indicating",
"= request Exception.__init__(self, error) def __str__(self): return \"APIError: %s: %s,",
"= error self.request = request Exception.__init__(self, error) def __str__(self): return",
"self.error_code = error_code self.error = error self.request = request Exception.__init__(self,",
"%s: %s, request: %s\" % ( self.error_code, self.error, self.request, )",
"receiving json message indicating failure. \"\"\" def __init__(self, error_code, error,"
] |
[
"# 物业岗亭机 Others = 10 ValidatedList = (1,2,3,4,5) class Constants(object):",
"InnerScreen = 2 # 主屏分离的室内屏 OuterBox = 3 # 室外机",
"1 # 主屏分离的室内主机 InnerScreen = 2 # 主屏分离的室内屏 OuterBox =",
"主屏分离的室内屏 OuterBox = 3 # 室外机 PropCallApp = 4 #",
"3 # 室外机 PropCallApp = 4 # 物业值守 PropSentryApp =",
"class SystemDeviceType(object): InnerBox = 1 # 主屏分离的室内主机 InnerScreen = 2",
"#coding:utf-8 class SystemDeviceType(object): InnerBox = 1 # 主屏分离的室内主机 InnerScreen =",
"= 4 # 物业值守 PropSentryApp = 5 # 物业岗亭机 Others",
"2 # 主屏分离的室内屏 OuterBox = 3 # 室外机 PropCallApp =",
"SystemDeviceType(object): InnerBox = 1 # 主屏分离的室内主机 InnerScreen = 2 #",
"= 1 # 主屏分离的室内主机 InnerScreen = 2 # 主屏分离的室内屏 OuterBox",
"物业岗亭机 Others = 10 ValidatedList = (1,2,3,4,5) class Constants(object): SUPER_ACCESS_TOKEN",
"# 物业值守 PropSentryApp = 5 # 物业岗亭机 Others = 10",
"# 主屏分离的室内主机 InnerScreen = 2 # 主屏分离的室内屏 OuterBox = 3",
"主屏分离的室内主机 InnerScreen = 2 # 主屏分离的室内屏 OuterBox = 3 #",
"Others = 10 ValidatedList = (1,2,3,4,5) class Constants(object): SUPER_ACCESS_TOKEN =",
"# 主屏分离的室内屏 OuterBox = 3 # 室外机 PropCallApp = 4",
"OuterBox = 3 # 室外机 PropCallApp = 4 # 物业值守",
"5 # 物业岗亭机 Others = 10 ValidatedList = (1,2,3,4,5) class",
"PropSentryApp = 5 # 物业岗亭机 Others = 10 ValidatedList =",
"物业值守 PropSentryApp = 5 # 物业岗亭机 Others = 10 ValidatedList",
"= 5 # 物业岗亭机 Others = 10 ValidatedList = (1,2,3,4,5)",
"室外机 PropCallApp = 4 # 物业值守 PropSentryApp = 5 #",
"PropCallApp = 4 # 物业值守 PropSentryApp = 5 # 物业岗亭机",
"InnerBox = 1 # 主屏分离的室内主机 InnerScreen = 2 # 主屏分离的室内屏",
"= 3 # 室外机 PropCallApp = 4 # 物业值守 PropSentryApp",
"# 室外机 PropCallApp = 4 # 物业值守 PropSentryApp = 5",
"= 10 ValidatedList = (1,2,3,4,5) class Constants(object): SUPER_ACCESS_TOKEN = '<KEY>'",
"<filename>pythonlibs/mantis/templates/webapp/src/webapp/base.py #coding:utf-8 class SystemDeviceType(object): InnerBox = 1 # 主屏分离的室内主机 InnerScreen",
"= 2 # 主屏分离的室内屏 OuterBox = 3 # 室外机 PropCallApp",
"4 # 物业值守 PropSentryApp = 5 # 物业岗亭机 Others ="
] |
[
"-*- coding: utf-8 -*- from django.conf.urls.defaults import patterns, url from",
"django_qbe.exports import formats urlpatterns = patterns('django_qbe.views', url(r'^$', 'qbe_form', name=\"qbe_form\"), url(r'^js/$',",
"\"|\".join(formats.keys()), 'qbe_export', name=\"qbe_export\"), url(r'^results/proxy/$', 'qbe_proxy', name=\"qbe_proxy\"), url(r'^results/(?P<query_hash>(.*))/$', 'qbe_results', name=\"qbe_results\"), url(r'^auto/$',",
"<filename>base/site-packages/django_qbe/urls.py # -*- coding: utf-8 -*- from django.conf.urls.defaults import patterns,",
"from django_qbe.exports import formats urlpatterns = patterns('django_qbe.views', url(r'^$', 'qbe_form', name=\"qbe_form\"),",
"urlpatterns = patterns('django_qbe.views', url(r'^$', 'qbe_form', name=\"qbe_form\"), url(r'^js/$', 'qbe_js', name=\"qbe_js\"), url(r'^results/bookmark/$',",
"'qbe_form', name=\"qbe_form\"), url(r'^js/$', 'qbe_js', name=\"qbe_js\"), url(r'^results/bookmark/$', 'qbe_bookmark', name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$' %",
"patterns('django_qbe.views', url(r'^$', 'qbe_form', name=\"qbe_form\"), url(r'^js/$', 'qbe_js', name=\"qbe_js\"), url(r'^results/bookmark/$', 'qbe_bookmark', name=\"qbe_bookmark\"),",
"'qbe_bookmark', name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$' % \"|\".join(formats.keys()), 'qbe_export', name=\"qbe_export\"), url(r'^results/proxy/$', 'qbe_proxy', name=\"qbe_proxy\"),",
"# -*- coding: utf-8 -*- from django.conf.urls.defaults import patterns, url",
"url(r'^results/proxy/$', 'qbe_proxy', name=\"qbe_proxy\"), url(r'^results/(?P<query_hash>(.*))/$', 'qbe_results', name=\"qbe_results\"), url(r'^auto/$', 'qbe_autocomplete', name=\"qbe_autocomplete\"), )",
"% \"|\".join(formats.keys()), 'qbe_export', name=\"qbe_export\"), url(r'^results/proxy/$', 'qbe_proxy', name=\"qbe_proxy\"), url(r'^results/(?P<query_hash>(.*))/$', 'qbe_results', name=\"qbe_results\"),",
"name=\"qbe_js\"), url(r'^results/bookmark/$', 'qbe_bookmark', name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$' % \"|\".join(formats.keys()), 'qbe_export', name=\"qbe_export\"), url(r'^results/proxy/$',",
"django.conf.urls.defaults import patterns, url from django_qbe.exports import formats urlpatterns =",
"'qbe_js', name=\"qbe_js\"), url(r'^results/bookmark/$', 'qbe_bookmark', name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$' % \"|\".join(formats.keys()), 'qbe_export', name=\"qbe_export\"),",
"import formats urlpatterns = patterns('django_qbe.views', url(r'^$', 'qbe_form', name=\"qbe_form\"), url(r'^js/$', 'qbe_js',",
"url(r'^js/$', 'qbe_js', name=\"qbe_js\"), url(r'^results/bookmark/$', 'qbe_bookmark', name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$' % \"|\".join(formats.keys()), 'qbe_export',",
"url(r'^results/export/(?P<format>(%s))/$' % \"|\".join(formats.keys()), 'qbe_export', name=\"qbe_export\"), url(r'^results/proxy/$', 'qbe_proxy', name=\"qbe_proxy\"), url(r'^results/(?P<query_hash>(.*))/$', 'qbe_results',",
"= patterns('django_qbe.views', url(r'^$', 'qbe_form', name=\"qbe_form\"), url(r'^js/$', 'qbe_js', name=\"qbe_js\"), url(r'^results/bookmark/$', 'qbe_bookmark',",
"url from django_qbe.exports import formats urlpatterns = patterns('django_qbe.views', url(r'^$', 'qbe_form',",
"utf-8 -*- from django.conf.urls.defaults import patterns, url from django_qbe.exports import",
"name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$' % \"|\".join(formats.keys()), 'qbe_export', name=\"qbe_export\"), url(r'^results/proxy/$', 'qbe_proxy', name=\"qbe_proxy\"), url(r'^results/(?P<query_hash>(.*))/$',",
"patterns, url from django_qbe.exports import formats urlpatterns = patterns('django_qbe.views', url(r'^$',",
"name=\"qbe_form\"), url(r'^js/$', 'qbe_js', name=\"qbe_js\"), url(r'^results/bookmark/$', 'qbe_bookmark', name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$' % \"|\".join(formats.keys()),",
"url(r'^results/bookmark/$', 'qbe_bookmark', name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$' % \"|\".join(formats.keys()), 'qbe_export', name=\"qbe_export\"), url(r'^results/proxy/$', 'qbe_proxy',",
"'qbe_export', name=\"qbe_export\"), url(r'^results/proxy/$', 'qbe_proxy', name=\"qbe_proxy\"), url(r'^results/(?P<query_hash>(.*))/$', 'qbe_results', name=\"qbe_results\"), url(r'^auto/$', 'qbe_autocomplete',",
"-*- from django.conf.urls.defaults import patterns, url from django_qbe.exports import formats",
"coding: utf-8 -*- from django.conf.urls.defaults import patterns, url from django_qbe.exports",
"formats urlpatterns = patterns('django_qbe.views', url(r'^$', 'qbe_form', name=\"qbe_form\"), url(r'^js/$', 'qbe_js', name=\"qbe_js\"),",
"url(r'^$', 'qbe_form', name=\"qbe_form\"), url(r'^js/$', 'qbe_js', name=\"qbe_js\"), url(r'^results/bookmark/$', 'qbe_bookmark', name=\"qbe_bookmark\"), url(r'^results/export/(?P<format>(%s))/$'",
"name=\"qbe_export\"), url(r'^results/proxy/$', 'qbe_proxy', name=\"qbe_proxy\"), url(r'^results/(?P<query_hash>(.*))/$', 'qbe_results', name=\"qbe_results\"), url(r'^auto/$', 'qbe_autocomplete', name=\"qbe_autocomplete\"),",
"import patterns, url from django_qbe.exports import formats urlpatterns = patterns('django_qbe.views',",
"from django.conf.urls.defaults import patterns, url from django_qbe.exports import formats urlpatterns"
] |
[
"0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, np.random.randint(thresh)]) dst_pts.append([img_w, np.random.randint(thresh)]) dst_pts.append([img_w,",
"np.random.randint(thresh)]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst =",
"dtype=np.uint8) # # cut = img_w // segment # thresh",
"np.random.randint(thresh)]) dst_pts.append([img_w, img_h - np.random.randint(thresh)]) dst_pts.append([0, img_h - np.random.randint(thresh)]) trans",
"thresh * 0.5 # # for cut_idx in np.arange(1, segment,",
"half_thresh src_pts.append([cut * cut_idx, 0]) src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut",
"= np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)",
"= thresh * 0.5 for cut_idx in np.arange(1, segment, 1):",
"(cut, img_h)) # # print(mat) # # src_pts.append([img_w + np.random.randint(thresh)",
"img_h, img_w = src.shape[:2] cut = img_w // segment thresh",
"src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32) # #",
"half_thresh]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst =",
"segment, 1): move = np.random.randint(thresh) - half_thresh src_pts.append([cut * cut_idx,",
"import WarpMLS def distort(src, segment): img_h, img_w = src.shape[:2] cut",
"src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut * cut_idx + move, 0])",
"= get_perspective_transform(dst_box, src_box) # dst[:, cut * (segment - 1):]",
"img_w // segment thresh = cut * 4 // 5",
"- half_thresh, # img_h + np.random.randint(thresh) - half_thresh]) # #",
"np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), img_h -",
"distort(src, segment): # img_h, img_w = src.shape[:2] # dst =",
"0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32) # #",
"np.random.randint(thresh)]) half_thresh = thresh * 0.5 for cut_idx in np.arange(1,",
"src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) # src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)]) # # #",
"* i, img_h]) # # src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1]",
"# dst = np.zeros_like(src, dtype=np.uint8) # # cut = img_w",
"dst_pts.append([img_w, np.random.randint(thresh)]) dst_pts.append([img_w, img_h - np.random.randint(thresh)]) dst_pts.append([0, img_h - np.random.randint(thresh)])",
"5 # thresh = img_h // segment // 3 #",
"dst_pts.append([img_w, 0]) dst_pts.append([img_w, img_h]) dst_pts.append([0, img_h]) half_thresh = thresh *",
"img_w = src.shape[:2] cut = img_w // segment thresh =",
"0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, 0]) dst_pts.append([img_w,",
"# # print(mat) # # src_pts.append([img_w + np.random.randint(thresh) - half_thresh,",
"np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) - half_thresh]) # src_pts.append([img_w +",
"= img_h // 8 # # src_pts = list() #",
"- half_thresh, # img_h + np.random.randint(thresh) - half_thresh]) # src_box",
"0]) dst_pts.append([cut * cut_idx + move, img_h]) trans = WarpMLS(src,",
"# # src_pts.append([img_w + np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) -",
"# src_pts.append([img_w + np.random.randint(thresh) - half_thresh, # img_h + np.random.randint(thresh)",
"in np.arange(1, segment, 1): # src_pts.append([cut * cut_idx + np.random.randint(thresh)",
"np.random.randint(thresh) - half_thresh]) # src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] +",
"np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32) #",
"dst def stretch(src, segment): img_h, img_w = src.shape[:2] cut =",
"src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut * cut_idx + np.random.randint(thresh) -",
"4 // 5 # thresh = img_h // segment //",
"src_pts, dst_pts, img_w, img_h) dst = trans.generate() return dst #",
"src.shape[:2] thresh = img_h // 2 src_pts = list() dst_pts",
"# # mat = get_perspective_transform(dst_box, src_box) # dst[:, cut *",
"mat = cv2.getPerspectiveTransform(src_box, dst_box) # # dst[:, cut * (segment",
"cut * (segment - 1), img_h)) # mat = get_perspective_transform(dst_box,",
"# thresh = img_h // 5 src_pts = list() dst_pts",
"= cut * 4 // 5 # thresh = img_h",
"- half_thresh src_pts.append([cut * cut_idx, 0]) src_pts.append([cut * cut_idx, img_h])",
"list() dst_pts = list() src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h])",
"# img_h, img_w = src.shape[:2] # dst = np.zeros_like(src, dtype=np.uint8)",
"for cut_idx in np.arange(1, segment, 1): # src_pts.append([cut * cut_idx",
"# dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat,",
"= trans.generate() return dst # def distort(src, segment): # img_h,",
"img_h]) dst_pts.append([0, img_h]) half_thresh = thresh * 0.5 for cut_idx",
"# # for cut_idx in np.arange(1, segment, 1): # src_pts.append([cut",
"= list() src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h])",
"- np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)]) half_thresh = thresh *",
"np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh), img_h",
"np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32) # # # mat",
"+ np.random.randint(thresh) - half_thresh, np.random.randint(thresh) - half_thresh]) dst_pts.append([cut * cut_idx",
"0]) # # dst_pts.append([cut * i, img_h]) # # src_box",
"(cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))",
"# -*- coding:utf-8 -*- # Author: RubanSeven # import cv2",
"img_h // 2 src_pts = list() dst_pts = list() src_pts.append([0,",
"8 # # src_pts = list() # # dst_pts =",
"= warp_perspective(src, mat, (cut, img_h)) # # print(mat) # #",
"dst_pts, img_w, img_h) dst = trans.generate() return dst # def",
"# def distort(src, segment): # img_h, img_w = src.shape[:2] #",
"cut = img_w // segment thresh = cut // 3",
"i, 0]) # # dst_pts.append([cut * i, img_h]) # #",
"# # mat = cv2.getPerspectiveTransform(src_box, dst_box) # # print(mat) #",
"src_pts = list() # # dst_pts = list() # #",
"get_perspective_transform, warp_perspective from warp_mls import WarpMLS def distort(src, segment): img_h,",
"# # dst_pts = list() # # src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) #",
"- half_thresh, img_h + np.random.randint(thresh) - half_thresh]) trans = WarpMLS(src,",
"# img_h + np.random.randint(thresh) - half_thresh]) # src_box = np.array(src_pts[-4:-2]",
"+ src_pts[-2:-1] + src_pts[-1:], dtype=np.float32) # # # mat =",
"0]) dst_pts.append([img_w, 0]) dst_pts.append([img_w, img_h]) dst_pts.append([0, img_h]) half_thresh = thresh",
"cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w -",
"segment, 1): # src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,",
"// 3 # thresh = img_h // segment // 3",
"img_w // segment thresh = cut // 3 # thresh",
"as np # from transform import get_perspective_transform, warp_perspective from warp_mls",
"segment // 3 # thresh = img_h // 5 src_pts",
"list() # # dst_pts = list() # # src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])",
"half_thresh, # np.random.randint(thresh) - half_thresh]) # src_pts.append([cut * cut_idx +",
"half_thresh, # img_h + np.random.randint(thresh) - half_thresh]) # # #",
"import get_perspective_transform, warp_perspective from warp_mls import WarpMLS def distort(src, segment):",
"def perspective(src): img_h, img_w = src.shape[:2] thresh = img_h //",
"dst_pts = list() src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0,",
"half_thresh, # np.random.randint(thresh) - half_thresh]) # src_pts.append([img_w + np.random.randint(thresh) -",
"(segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut *",
"dst_pts.append([0, 0]) dst_pts.append([img_w, 0]) dst_pts.append([img_w, img_h]) dst_pts.append([0, img_h]) half_thresh =",
"0], [cut, img_h]], dtype=np.float32) # # half_thresh = thresh *",
"src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, np.random.randint(thresh)]) dst_pts.append([img_w, np.random.randint(thresh)])",
"def distort(src, segment): # img_h, img_w = src.shape[:2] # dst",
"dst # def distort(src, segment): # img_h, img_w = src.shape[:2]",
"np.random.randint(thresh)]) dst_pts.append([0, img_h - np.random.randint(thresh)]) trans = WarpMLS(src, src_pts, dst_pts,",
"cut_idx + np.random.randint(thresh) - half_thresh, # img_h + np.random.randint(thresh) -",
"= cv2.warpPerspective(src, mat, (cut, img_h)) # # mat = get_perspective_transform(dst_box,",
"# mat = cv2.getPerspectiveTransform(src_box, dst_box) # # dst[:, cut *",
"+ np.random.randint(thresh)]) # # # dst_pts.append([0, 0]) # # dst_pts.append([0,",
"segment # thresh = img_h // 8 # # src_pts",
"// segment // 3 # thresh = img_h // 5",
"img_h]) src_pts.append([0, img_h]) dst_pts.append([0, np.random.randint(thresh)]) dst_pts.append([img_w, np.random.randint(thresh)]) dst_pts.append([img_w, img_h -",
"np.random.randint(thresh) - half_thresh]) dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,",
"dst_pts.append([0, img_h - np.random.randint(thresh)]) trans = WarpMLS(src, src_pts, dst_pts, img_w,",
"(img_w - cut * (segment - 1), img_h)) # mat",
"# Author: RubanSeven # import cv2 import numpy as np",
"= trans.generate() return dst def stretch(src, segment): img_h, img_w =",
"dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src,",
"cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))",
"// 5 # thresh = img_h // segment // 3",
"* 4 // 5 # thresh = img_h // segment",
"src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])",
"[0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32) # # half_thresh",
"# cut = img_w // segment # thresh = img_h",
"(img_w - cut * (segment - 1), img_h)) # #",
"img_h - np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)]) half_thresh = thresh",
"dst_pts.append([0, np.random.randint(thresh)]) dst_pts.append([img_w, np.random.randint(thresh)]) dst_pts.append([img_w, img_h - np.random.randint(thresh)]) dst_pts.append([0, img_h",
"# src_pts = list() # # dst_pts = list() #",
"dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w",
"+ move, img_h]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)",
"// 2 src_pts = list() dst_pts = list() src_pts.append([0, 0])",
"- half_thresh]) dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, img_h",
"* cut_idx] = warp_perspective(src, mat, (cut, img_h)) # # print(mat)",
"# for cut_idx in np.arange(1, segment, 1): # src_pts.append([cut *",
"# thresh = img_h // segment // 3 # thresh",
"+ np.random.randint(thresh) - half_thresh]) trans = WarpMLS(src, src_pts, dst_pts, img_w,",
"dst = trans.generate() return dst # def distort(src, segment): #",
"WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst = trans.generate() return dst",
"cut_idx, img_h]) dst_pts.append([cut * cut_idx + move, 0]) dst_pts.append([cut *",
"cut_idx, 0]) src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut * cut_idx +",
"img_h) dst = trans.generate() return dst def stretch(src, segment): img_h,",
"cv2.getPerspectiveTransform(src_box, dst_box) # # dst[:, cut * (segment - 1):]",
"thresh = cut // 3 # thresh = img_h //",
"# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, # img_h",
"-np.random.randint(thresh)]) # src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)]) # # # dst_pts.append([0,",
"cut_idx in np.arange(1, segment, 1): # src_pts.append([cut * cut_idx +",
"1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment -",
"cv2.warpPerspective(src, mat, (cut, img_h)) # # mat = get_perspective_transform(dst_box, src_box)",
"dst_pts.append([cut * cut_idx + move, 0]) dst_pts.append([cut * cut_idx +",
"cv2.getPerspectiveTransform(src_box, dst_box) # # print(mat) # # dst[:, cut *",
"= cv2.getPerspectiveTransform(src_box, dst_box) # # dst[:, cut * (segment -",
"np.arange(1, segment, 1): src_pts.append([cut * cut_idx, 0]) src_pts.append([cut * cut_idx,",
"- 1), img_h)) # mat = get_perspective_transform(dst_box, src_box) # dst[:,",
"# print(mat) # # dst[:, cut * (cut_idx - 1):cut",
"dst_pts.append([0, 0]) # # dst_pts.append([0, img_h]) # dst_box = np.array([[0,",
"- 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h)) #",
"dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, img_h + np.random.randint(thresh)",
"np.random.randint(thresh) - half_thresh src_pts.append([cut * cut_idx, 0]) src_pts.append([cut * cut_idx,",
"cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h)) # # mat =",
"* (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut",
"segment): # img_h, img_w = src.shape[:2] # dst = np.zeros_like(src,",
"dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])",
"np.arange(1, segment, 1): move = np.random.randint(thresh) - half_thresh src_pts.append([cut *",
"- np.random.randint(thresh)]) half_thresh = thresh * 0.5 for cut_idx in",
"- half_thresh]) # src_pts.append([img_w + np.random.randint(thresh) - half_thresh, # img_h",
"- half_thresh]) # src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:],",
"- half_thresh, # np.random.randint(thresh) - half_thresh]) # src_pts.append([img_w + np.random.randint(thresh)",
"dst_pts.append([img_w, img_h]) dst_pts.append([0, img_h]) half_thresh = thresh * 0.5 for",
"= img_w // segment # thresh = img_h // 8",
"dst = trans.generate() return dst def perspective(src): img_h, img_w =",
"# # src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) # src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)]) #",
"# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32) #",
"0.5 for cut_idx in np.arange(1, segment, 1): src_pts.append([cut * cut_idx,",
"= trans.generate() return dst def perspective(src): img_h, img_w = src.shape[:2]",
"src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, # img_h +",
"dst_pts, img_w, img_h) dst = trans.generate() return dst def stretch(src,",
"# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut,",
"return dst def perspective(src): img_h, img_w = src.shape[:2] thresh =",
"= WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst = trans.generate() return",
"thresh * 0.5 for cut_idx in np.arange(1, segment, 1): src_pts.append([cut",
"= list() # # dst_pts = list() # # src_pts.append([-np.random.randint(thresh),",
"distort(src, segment): img_h, img_w = src.shape[:2] cut = img_w //",
"+ src_pts[-1:], dtype=np.float32) # # # mat = cv2.getPerspectiveTransform(src_box, dst_box)",
"half_thresh, # img_h + np.random.randint(thresh) - half_thresh]) # src_box =",
"thresh = cut * 4 // 5 # thresh =",
"dst_pts.append([cut * i, 0]) # # dst_pts.append([cut * i, img_h])",
"img_h) dst = trans.generate() return dst # def distort(src, segment):",
"np.random.randint(thresh) - half_thresh, np.random.randint(thresh) - half_thresh]) dst_pts.append([cut * cut_idx +",
"cut_idx + np.random.randint(thresh) - half_thresh, img_h + np.random.randint(thresh) - half_thresh])",
"trans.generate() return dst # def distort(src, segment): # img_h, img_w",
"cut * (segment - 1), img_h)) # # return dst",
"1): src_pts.append([cut * cut_idx, 0]) src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut",
"1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h)) # #",
"print(mat) # # dst[:, cut * (cut_idx - 1):cut *",
"np.random.randint(thresh) - half_thresh]) # # # dst_pts.append([cut * i, 0])",
"segment thresh = cut // 3 # thresh = img_h",
"* 0.5 for cut_idx in np.arange(1, segment, 1): move =",
"* cut_idx, 0]) src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut * cut_idx",
"for cut_idx in np.arange(1, segment, 1): src_pts.append([cut * cut_idx, 0])",
"src_pts.append([0, img_h]) dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w -",
"for cut_idx in np.arange(1, segment, 1): move = np.random.randint(thresh) -",
"- 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h)) #",
"src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, np.random.randint(thresh)]) dst_pts.append([img_w, np.random.randint(thresh)]) dst_pts.append([img_w, img_h",
"cut_idx + move, 0]) dst_pts.append([cut * cut_idx + move, img_h])",
"def stretch(src, segment): img_h, img_w = src.shape[:2] cut = img_w",
"2 src_pts = list() dst_pts = list() src_pts.append([0, 0]) src_pts.append([img_w,",
"img_h)) # # mat = get_perspective_transform(dst_box, src_box) # dst[:, cut",
"list() src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0,",
"trans.generate() return dst def stretch(src, segment): img_h, img_w = src.shape[:2]",
"list() src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([np.random.randint(thresh),",
"[cut, img_h]], dtype=np.float32) # # half_thresh = thresh * 0.5",
"- 1):] = warp_perspective(src, mat, (img_w - cut * (segment",
"img_h // 5 src_pts = list() dst_pts = list() src_pts.append([0,",
"# # dst_pts.append([cut * i, img_h]) # # src_box =",
"in np.arange(1, segment, 1): move = np.random.randint(thresh) - half_thresh src_pts.append([cut",
"# dst[:, cut * (cut_idx - 1):cut * cut_idx] =",
"* (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut,",
"# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) # src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)]) # #",
"= src.shape[:2] # dst = np.zeros_like(src, dtype=np.uint8) # # cut",
"img_h]) # # src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:],",
"* cut_idx, img_h]) dst_pts.append([cut * cut_idx + move, 0]) dst_pts.append([cut",
"+ np.random.randint(thresh) - half_thresh, # img_h + np.random.randint(thresh) - half_thresh])",
"transform import get_perspective_transform, warp_perspective from warp_mls import WarpMLS def distort(src,",
"3 # thresh = img_h // segment // 3 #",
"img_h)) # mat = get_perspective_transform(dst_box, src_box) # dst[:, cut *",
"src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, np.random.randint(thresh)])",
"(segment - 1), img_h)) # mat = get_perspective_transform(dst_box, src_box) #",
"+ move, 0]) dst_pts.append([cut * cut_idx + move, img_h]) trans",
"half_thresh, np.random.randint(thresh) - half_thresh]) dst_pts.append([cut * cut_idx + np.random.randint(thresh) -",
"* cut_idx + move, img_h]) trans = WarpMLS(src, src_pts, dst_pts,",
"img_h // 8 # # src_pts = list() # #",
"np.random.randint(thresh) - half_thresh]) # src_pts.append([cut * cut_idx + np.random.randint(thresh) -",
"np.random.randint(thresh) - half_thresh, img_h + np.random.randint(thresh) - half_thresh]) trans =",
"dst_pts, img_w, img_h) dst = trans.generate() return dst def perspective(src):",
"= cv2.getPerspectiveTransform(src_box, dst_box) # # print(mat) # # dst[:, cut",
"Author: RubanSeven # import cv2 import numpy as np #",
"get_perspective_transform(dst_box, src_box) # dst[:, cut * (segment - 1):] =",
"# # # mat = cv2.getPerspectiveTransform(src_box, dst_box) # # dst[:,",
"(segment - 1):] = warp_perspective(src, mat, (img_w - cut *",
"from transform import get_perspective_transform, warp_perspective from warp_mls import WarpMLS def",
"move, 0]) dst_pts.append([cut * cut_idx + move, img_h]) trans =",
"np # from transform import get_perspective_transform, warp_perspective from warp_mls import",
"in np.arange(1, segment, 1): src_pts.append([cut * cut_idx, 0]) src_pts.append([cut *",
"* cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h)) # # mat",
"* cut_idx + np.random.randint(thresh) - half_thresh, np.random.randint(thresh) - half_thresh]) dst_pts.append([cut",
"# # dst_pts.append([0, img_h]) # dst_box = np.array([[0, 0], [0,",
"= get_perspective_transform(dst_box, src_box) # dst[:, cut * (cut_idx - 1):cut",
"1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h)) # #",
"- half_thresh, np.random.randint(thresh) - half_thresh]) dst_pts.append([cut * cut_idx + np.random.randint(thresh)",
"# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src,",
"0.5 # # for cut_idx in np.arange(1, segment, 1): #",
"img_w = src.shape[:2] # dst = np.zeros_like(src, dtype=np.uint8) # #",
"# # dst_pts.append([0, 0]) # # dst_pts.append([0, img_h]) # dst_box",
"# from transform import get_perspective_transform, warp_perspective from warp_mls import WarpMLS",
"half_thresh]) # src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)",
"0]) dst_pts.append([img_w, img_h]) dst_pts.append([0, img_h]) half_thresh = thresh * 0.5",
"# # # dst_pts.append([0, 0]) # # dst_pts.append([0, img_h]) #",
"[cut, 0], [cut, img_h]], dtype=np.float32) # # half_thresh = thresh",
"img_w, img_h) dst = trans.generate() return dst def stretch(src, segment):",
"* 0.5 for cut_idx in np.arange(1, segment, 1): src_pts.append([cut *",
"mat = get_perspective_transform(dst_box, src_box) # dst[:, cut * (cut_idx -",
"* 0.5 # # for cut_idx in np.arange(1, segment, 1):",
"// segment # thresh = img_h // 8 # #",
"half_thresh]) dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, img_h +",
"# print(mat) # # src_pts.append([img_w + np.random.randint(thresh) - half_thresh, #",
"0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, 0]) dst_pts.append([img_w, 0]) dst_pts.append([img_w,",
"dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, np.random.randint(thresh) - half_thresh])",
"img_h - np.random.randint(thresh)]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)",
"# # mat = cv2.getPerspectiveTransform(src_box, dst_box) # # dst[:, cut",
"# # dst_pts.append([cut * i, 0]) # # dst_pts.append([cut *",
"0]) src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut * cut_idx + move,",
"-*- # Author: RubanSeven # import cv2 import numpy as",
"return dst def stretch(src, segment): img_h, img_w = src.shape[:2] cut",
"dst_pts.append([0, img_h]) half_thresh = thresh * 0.5 for cut_idx in",
"0.5 for cut_idx in np.arange(1, segment, 1): move = np.random.randint(thresh)",
"mat, (cut, img_h)) # # print(mat) # # src_pts.append([img_w +",
"src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, 0])",
"* i, 0]) # # dst_pts.append([cut * i, img_h]) #",
"# src_pts.append([img_w + np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) - half_thresh])",
"src_pts[-2:-1] + src_pts[-1:], dtype=np.float32) # # # mat = cv2.getPerspectiveTransform(src_box,",
"dtype=np.float32) # # # mat = cv2.getPerspectiveTransform(src_box, dst_box) # #",
"dst_pts = list() # # src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) # src_pts.append([-np.random.randint(thresh), img_h",
"img_h // segment // 3 # thresh = img_h //",
"np.zeros_like(src, dtype=np.uint8) # # cut = img_w // segment #",
"cv2 import numpy as np # from transform import get_perspective_transform,",
"np.random.randint(thresh)]) # # # dst_pts.append([0, 0]) # # dst_pts.append([0, img_h])",
"np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) - half_thresh]) # src_pts.append([cut *",
"dst_pts.append([img_w, img_h - np.random.randint(thresh)]) dst_pts.append([0, img_h - np.random.randint(thresh)]) trans =",
"src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])",
"- np.random.randint(thresh)]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst",
"img_h]) # dst_box = np.array([[0, 0], [0, img_h], [cut, 0],",
"# mat = get_perspective_transform(dst_box, src_box) # dst[:, cut * (cut_idx",
"dst = trans.generate() return dst def stretch(src, segment): img_h, img_w",
"def distort(src, segment): img_h, img_w = src.shape[:2] cut = img_w",
"dst_pts.append([cut * i, img_h]) # # src_box = np.array(src_pts[-4:-2] +",
"img_h]) dst_pts.append([0, 0]) dst_pts.append([img_w, 0]) dst_pts.append([img_w, img_h]) dst_pts.append([0, img_h]) half_thresh",
"src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, 0]) dst_pts.append([img_w, 0])",
"mat = get_perspective_transform(dst_box, src_box) # dst[:, cut * (segment -",
"0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w",
"- np.random.randint(thresh)]) dst_pts.append([0, img_h - np.random.randint(thresh)]) trans = WarpMLS(src, src_pts,",
"* cut_idx + move, 0]) dst_pts.append([cut * cut_idx + move,",
"- half_thresh]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst",
"segment, 1): src_pts.append([cut * cut_idx, 0]) src_pts.append([cut * cut_idx, img_h])",
"# # src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)",
"src_pts = list() dst_pts = list() src_pts.append([0, 0]) src_pts.append([img_w, 0])",
"src.shape[:2] cut = img_w // segment thresh = cut *",
"1): # src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, #",
"src_pts[-1:], dtype=np.float32) # # # mat = cv2.getPerspectiveTransform(src_box, dst_box) #",
"dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), img_h",
"= img_h // 5 src_pts = list() dst_pts = list()",
"cut_idx in np.arange(1, segment, 1): src_pts.append([cut * cut_idx, 0]) src_pts.append([cut",
"# dst[:, cut * (segment - 1):] = warp_perspective(src, mat,",
"img_h, img_w = src.shape[:2] # dst = np.zeros_like(src, dtype=np.uint8) #",
"= list() dst_pts = list() src_pts.append([0, 0]) src_pts.append([img_w, 0]) src_pts.append([img_w,",
"src_pts.append([img_w + np.random.randint(thresh) - half_thresh, # img_h + np.random.randint(thresh) -",
"dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src,",
"RubanSeven # import cv2 import numpy as np # from",
"-*- coding:utf-8 -*- # Author: RubanSeven # import cv2 import",
"np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)]) half_thresh = thresh * 0.5",
"src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) -",
"* (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut,",
"= thresh * 0.5 # # for cut_idx in np.arange(1,",
"img_h + np.random.randint(thresh) - half_thresh]) # src_box = np.array(src_pts[-4:-2] +",
"dst_pts.append([cut * cut_idx + move, img_h]) trans = WarpMLS(src, src_pts,",
"# # src_pts = list() # # dst_pts = list()",
"move = np.random.randint(thresh) - half_thresh src_pts.append([cut * cut_idx, 0]) src_pts.append([cut",
"numpy as np # from transform import get_perspective_transform, warp_perspective from",
"mat = cv2.getPerspectiveTransform(src_box, dst_box) # # print(mat) # # dst[:,",
"= np.random.randint(thresh) - half_thresh src_pts.append([cut * cut_idx, 0]) src_pts.append([cut *",
"# # cut = img_w // segment # thresh =",
"# mat = cv2.getPerspectiveTransform(src_box, dst_box) # # print(mat) # #",
"= list() # # src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) # src_pts.append([-np.random.randint(thresh), img_h +",
"img_h]], dtype=np.float32) # # half_thresh = thresh * 0.5 #",
"print(mat) # # src_pts.append([img_w + np.random.randint(thresh) - half_thresh, # np.random.randint(thresh)",
"img_h], [cut, 0], [cut, img_h]], dtype=np.float32) # # half_thresh =",
"img_h]) dst_pts.append([cut * cut_idx + move, 0]) dst_pts.append([cut * cut_idx",
"warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))",
"= src.shape[:2] cut = img_w // segment thresh = cut",
"+ np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) - half_thresh]) # src_pts.append([cut",
"# half_thresh = thresh * 0.5 # # for cut_idx",
"= img_h // 2 src_pts = list() dst_pts = list()",
"- half_thresh, # np.random.randint(thresh) - half_thresh]) # src_pts.append([cut * cut_idx",
"3 # thresh = img_h // 5 src_pts = list()",
"src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)]) # # # dst_pts.append([0, 0]) #",
"cut_idx + np.random.randint(thresh) - half_thresh, np.random.randint(thresh) - half_thresh]) dst_pts.append([cut *",
"img_h + np.random.randint(thresh) - half_thresh]) # # # dst_pts.append([cut *",
"img_h - np.random.randint(thresh)]) dst_pts.append([0, img_h - np.random.randint(thresh)]) trans = WarpMLS(src,",
"# dst_pts.append([0, img_h]) # dst_box = np.array([[0, 0], [0, img_h],",
"i, img_h]) # # src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] +",
"// segment thresh = cut * 4 // 5 #",
"- half_thresh]) # # # dst_pts.append([cut * i, 0]) #",
"cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat,",
"np.arange(1, segment, 1): # src_pts.append([cut * cut_idx + np.random.randint(thresh) -",
"# mat = get_perspective_transform(dst_box, src_box) # dst[:, cut * (segment",
"get_perspective_transform(dst_box, src_box) # dst[:, cut * (cut_idx - 1):cut *",
"* cut_idx + np.random.randint(thresh) - half_thresh, # img_h + np.random.randint(thresh)",
"cut_idx in np.arange(1, segment, 1): move = np.random.randint(thresh) - half_thresh",
"# # print(mat) # # dst[:, cut * (cut_idx -",
"cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat,",
"cut_idx + move, img_h]) trans = WarpMLS(src, src_pts, dst_pts, img_w,",
"* cut_idx, img_h]) dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,",
"* (segment - 1):] = warp_perspective(src, mat, (img_w - cut",
"img_h]) dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh),",
"trans.generate() return dst def perspective(src): img_h, img_w = src.shape[:2] thresh",
"= img_w // segment thresh = cut * 4 //",
"# # # dst_pts.append([cut * i, 0]) # # dst_pts.append([cut",
"img_w = src.shape[:2] thresh = img_h // 2 src_pts =",
"return dst # def distort(src, segment): # img_h, img_w =",
"// segment thresh = cut // 3 # thresh =",
"// 8 # # src_pts = list() # # dst_pts",
"segment thresh = cut * 4 // 5 # thresh",
"cut * 4 // 5 # thresh = img_h //",
"img_h]) half_thresh = thresh * 0.5 for cut_idx in np.arange(1,",
"img_h]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst =",
"half_thresh = thresh * 0.5 # # for cut_idx in",
"# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, # np.random.randint(thresh)",
"1), img_h)) # mat = get_perspective_transform(dst_box, src_box) # dst[:, cut",
"cut = img_w // segment thresh = cut * 4",
"# dst_pts.append([cut * i, 0]) # # dst_pts.append([cut * i,",
"- cut * (segment - 1), img_h)) # # return",
"// 5 src_pts = list() dst_pts = list() src_pts.append([0, 0])",
"- np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh),",
"trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst = trans.generate()",
"# dst_pts = list() # # src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) # src_pts.append([-np.random.randint(thresh),",
"cut_idx] = warp_perspective(src, mat, (cut, img_h)) # # print(mat) #",
"= img_w // segment thresh = cut // 3 #",
"src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, 0]) dst_pts.append([img_w, 0]) dst_pts.append([img_w, img_h])",
"* cut_idx + np.random.randint(thresh) - half_thresh, img_h + np.random.randint(thresh) -",
"np.random.randint(thresh) - half_thresh, # img_h + np.random.randint(thresh) - half_thresh]) #",
"5 src_pts = list() dst_pts = list() src_pts.append([0, 0]) src_pts.append([img_w,",
"- np.random.randint(thresh), img_h - np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)]) half_thresh",
"+ np.random.randint(thresh) - half_thresh, img_h + np.random.randint(thresh) - half_thresh]) trans",
"(cut, img_h)) # # mat = get_perspective_transform(dst_box, src_box) # dst[:,",
"dst_box) # # print(mat) # # dst[:, cut * (cut_idx",
"* cut_idx + np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) - half_thresh])",
"dst def perspective(src): img_h, img_w = src.shape[:2] thresh = img_h",
"cut * (segment - 1):] = warp_perspective(src, mat, (img_w -",
"src_pts, dst_pts, img_w, img_h) dst = trans.generate() return dst def",
"# # # mat = cv2.getPerspectiveTransform(src_box, dst_box) # # print(mat)",
"dst_pts.append([0, img_h]) # dst_box = np.array([[0, 0], [0, img_h], [cut,",
"dst = np.zeros_like(src, dtype=np.uint8) # # cut = img_w //",
"half_thresh = thresh * 0.5 for cut_idx in np.arange(1, segment,",
"# np.random.randint(thresh) - half_thresh]) # src_pts.append([img_w + np.random.randint(thresh) - half_thresh,",
"thresh = img_h // 8 # # src_pts = list()",
"cut_idx, img_h]) dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, np.random.randint(thresh)",
"img_h]) dst_pts.append([0, np.random.randint(thresh)]) dst_pts.append([img_w, np.random.randint(thresh)]) dst_pts.append([img_w, img_h - np.random.randint(thresh)]) dst_pts.append([0,",
"1):] = warp_perspective(src, mat, (img_w - cut * (segment -",
"img_h - np.random.randint(thresh)]) half_thresh = thresh * 0.5 for cut_idx",
"dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]],",
"= np.zeros_like(src, dtype=np.uint8) # # cut = img_w // segment",
"# # half_thresh = thresh * 0.5 # # for",
"+ np.random.randint(thresh) - half_thresh]) # src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1]",
"src_box) # dst[:, cut * (segment - 1):] = warp_perspective(src,",
"src.shape[:2] cut = img_w // segment thresh = cut //",
"# # dst[:, cut * (cut_idx - 1):cut * cut_idx]",
"cut // 3 # thresh = img_h // segment //",
"= cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1),",
"np.random.randint(thresh), img_h - np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)]) half_thresh =",
"import cv2 import numpy as np # from transform import",
"cut = img_w // segment # thresh = img_h //",
"half_thresh]) # src_pts.append([img_w + np.random.randint(thresh) - half_thresh, # img_h +",
"# img_h + np.random.randint(thresh) - half_thresh]) # # # dst_pts.append([cut",
"dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)]) half_thresh = thresh * 0.5 for",
"from warp_mls import WarpMLS def distort(src, segment): img_h, img_w =",
"thresh = img_h // 5 src_pts = list() dst_pts =",
"stretch(src, segment): img_h, img_w = src.shape[:2] cut = img_w //",
"mat, (cut, img_h)) # # mat = get_perspective_transform(dst_box, src_box) #",
"- half_thresh]) # src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,",
"img_h]) src_pts.append([0, img_h]) dst_pts.append([0, 0]) dst_pts.append([img_w, 0]) dst_pts.append([img_w, img_h]) dst_pts.append([0,",
"# import cv2 import numpy as np # from transform",
"img_h + np.random.randint(thresh)]) # # # dst_pts.append([0, 0]) # #",
"- cut * (segment - 1), img_h)) # mat =",
"1): move = np.random.randint(thresh) - half_thresh src_pts.append([cut * cut_idx, 0])",
"img_h, img_w = src.shape[:2] thresh = img_h // 2 src_pts",
"import numpy as np # from transform import get_perspective_transform, warp_perspective",
"img_w, img_h) dst = trans.generate() return dst def perspective(src): img_h,",
"perspective(src): img_h, img_w = src.shape[:2] thresh = img_h // 2",
"dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])",
"= cut // 3 # thresh = img_h // segment",
"# thresh = img_h // 8 # # src_pts =",
"half_thresh]) # # # dst_pts.append([cut * i, 0]) # #",
"np.random.randint(thresh) - half_thresh]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)",
"warp_perspective from warp_mls import WarpMLS def distort(src, segment): img_h, img_w",
"segment): img_h, img_w = src.shape[:2] cut = img_w // segment",
"src_pts.append([cut * cut_idx, 0]) src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut *",
"np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)]) dst_pts.append([np.random.randint(thresh), img_h -",
"src.shape[:2] # dst = np.zeros_like(src, dtype=np.uint8) # # cut =",
"+ np.random.randint(thresh) - half_thresh]) # # # dst_pts.append([cut * i,",
"src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w -",
"* (segment - 1), img_h)) # mat = get_perspective_transform(dst_box, src_box)",
"move, img_h]) trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h) dst",
"warp_mls import WarpMLS def distort(src, segment): img_h, img_w = src.shape[:2]",
"// 3 # thresh = img_h // 5 src_pts =",
"src_pts.append([0, img_h]) dst_pts.append([0, np.random.randint(thresh)]) dst_pts.append([img_w, np.random.randint(thresh)]) dst_pts.append([img_w, img_h - np.random.randint(thresh)])",
"list() # # src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)]) # src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])",
"0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh),",
"half_thresh, img_h + np.random.randint(thresh) - half_thresh]) trans = WarpMLS(src, src_pts,",
"0]) # # dst_pts.append([0, img_h]) # dst_box = np.array([[0, 0],",
"img_w // segment # thresh = img_h // 8 #",
"thresh = img_h // 2 src_pts = list() dst_pts =",
"= warp_perspective(src, mat, (img_w - cut * (segment - 1),",
"# np.random.randint(thresh) - half_thresh]) # src_pts.append([cut * cut_idx + np.random.randint(thresh)",
"coding:utf-8 -*- # Author: RubanSeven # import cv2 import numpy",
"# dst_pts.append([cut * i, img_h]) # # src_box = np.array(src_pts[-4:-2]",
"mat, (img_w - cut * (segment - 1), img_h)) #",
"# dst_pts.append([0, 0]) # # dst_pts.append([0, img_h]) # dst_box =",
"dst_box) # # dst[:, cut * (segment - 1):] =",
"# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)]) # # # dst_pts.append([0, 0])",
"src_pts.append([0, img_h]) dst_pts.append([0, 0]) dst_pts.append([img_w, 0]) dst_pts.append([img_w, img_h]) dst_pts.append([0, img_h])",
"- 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment",
"src_pts.append([img_w + np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) - half_thresh]) #",
"WarpMLS def distort(src, segment): img_h, img_w = src.shape[:2] cut =",
"= np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32) # # #",
"(cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))",
"img_h + np.random.randint(thresh) - half_thresh]) trans = WarpMLS(src, src_pts, dst_pts,",
"+ np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) - half_thresh]) # src_pts.append([img_w",
"np.random.randint(thresh) - half_thresh]) # src_pts.append([img_w + np.random.randint(thresh) - half_thresh, #",
"img_w, img_h) dst = trans.generate() return dst # def distort(src,",
"img_h]) dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, np.random.randint(thresh) -",
"img_h) dst = trans.generate() return dst def perspective(src): img_h, img_w",
"img_h)) # # print(mat) # # src_pts.append([img_w + np.random.randint(thresh) -",
"thresh * 0.5 for cut_idx in np.arange(1, segment, 1): move",
"0]) src_pts.append([img_w, 0]) src_pts.append([img_w, img_h]) src_pts.append([0, img_h]) dst_pts.append([0, np.random.randint(thresh)]) dst_pts.append([img_w,",
"warp_perspective(src, mat, (cut, img_h)) # # print(mat) # # src_pts.append([img_w",
"src_box) # dst[:, cut * (cut_idx - 1):cut * cut_idx]",
"half_thresh]) # src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh, #",
"0]) src_pts.append([cut * cut_idx, img_h]) dst_pts.append([cut * cut_idx + np.random.randint(thresh)",
"dtype=np.float32) # # half_thresh = thresh * 0.5 # #",
"dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w",
"= img_h // segment // 3 # thresh = img_h",
"np.random.randint(thresh)]) dst_pts.append([img_w, np.random.randint(thresh)]) dst_pts.append([img_w, img_h - np.random.randint(thresh)]) dst_pts.append([0, img_h -",
"thresh = img_h // segment // 3 # thresh =",
"img_h]) src_pts.append([0, img_h]) dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)]) dst_pts.append([img_w",
"= src.shape[:2] thresh = img_h // 2 src_pts = list()",
"cut_idx + np.random.randint(thresh) - half_thresh, # np.random.randint(thresh) - half_thresh]) #"
] |
[
"self.resultPaths def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string): visitedVertices[current_vertex] = True",
"dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string): visitedVertices[current_vertex] = True if output_string",
"if visitedVertices[vertex] == False: self.dfsUtil(vertex, visitedVertices, target_vertex, output_string) visitedVertices[vertex] =",
"\"\") return self.resultPaths def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string): visitedVertices[current_vertex]",
"= defaultdict(bool) self.resultPaths = [] self.dfsUtil(starting_vertex, visitedVertices, target_vertex, \"\") return",
"== \"__main__\": g = Graph() g.addEdge(\"A\", \"B\") g.addEdge(\"B\", \"D\") g.addEdge(\"A\",",
"self.resultPaths = [] self.dfsUtil(starting_vertex, visitedVertices, target_vertex, \"\") return self.resultPaths def",
"class Graph: def __init__(self): self.graph = defaultdict(list) def addEdge(self, starting_vertex,",
"== False: self.dfsUtil(vertex, visitedVertices, target_vertex, output_string) visitedVertices[vertex] = False if",
"= False if __name__ == \"__main__\": g = Graph() g.addEdge(\"A\",",
"printAllPaths(self, starting_vertex, target_vertex): visitedVertices = defaultdict(bool) self.resultPaths = [] self.dfsUtil(starting_vertex,",
"= defaultdict(list) def addEdge(self, starting_vertex, end_vertex): self.graph[starting_vertex].append(end_vertex) def printAllPaths(self, starting_vertex,",
"collections import defaultdict class Graph: def __init__(self): self.graph = defaultdict(list)",
"target_vertex: self.resultPaths.append(output_string) return for vertex in self.graph[current_vertex]: if visitedVertices[vertex] ==",
"visitedVertices, target_vertex, output_string) visitedVertices[vertex] = False if __name__ == \"__main__\":",
"Graph: def __init__(self): self.graph = defaultdict(list) def addEdge(self, starting_vertex, end_vertex):",
"\"__main__\": g = Graph() g.addEdge(\"A\", \"B\") g.addEdge(\"B\", \"D\") g.addEdge(\"A\", \"D\")",
"\"\": output_string = current_vertex else: output_string = output_string + \"->\"",
"False: self.dfsUtil(vertex, visitedVertices, target_vertex, output_string) visitedVertices[vertex] = False if __name__",
"defaultdict(list) def addEdge(self, starting_vertex, end_vertex): self.graph[starting_vertex].append(end_vertex) def printAllPaths(self, starting_vertex, target_vertex):",
"current_vertex == target_vertex: self.resultPaths.append(output_string) return for vertex in self.graph[current_vertex]: if",
"self.graph[current_vertex]: if visitedVertices[vertex] == False: self.dfsUtil(vertex, visitedVertices, target_vertex, output_string) visitedVertices[vertex]",
"self.graph = defaultdict(list) def addEdge(self, starting_vertex, end_vertex): self.graph[starting_vertex].append(end_vertex) def printAllPaths(self,",
"visitedVertices[vertex] == False: self.dfsUtil(vertex, visitedVertices, target_vertex, output_string) visitedVertices[vertex] = False",
"g.addEdge(\"A\", \"D\") g.addEdge(\"C\", \"A\") g.addEdge(\"C\", \"B\") g.addEdge(\"A\", \"C\") paths =",
"False if __name__ == \"__main__\": g = Graph() g.addEdge(\"A\", \"B\")",
"target_vertex, \"\") return self.resultPaths def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string):",
"visitedVertices, target_vertex, \"\") return self.resultPaths def dfsUtil(self, current_vertex, visitedVertices, target_vertex,",
"\"A\") g.addEdge(\"C\", \"B\") g.addEdge(\"A\", \"C\") paths = g.printAllPaths(\"A\", \"B\") print(paths)",
"+ current_vertex if current_vertex == target_vertex: self.resultPaths.append(output_string) return for vertex",
"self.resultPaths.append(output_string) return for vertex in self.graph[current_vertex]: if visitedVertices[vertex] == False:",
"in self.graph[current_vertex]: if visitedVertices[vertex] == False: self.dfsUtil(vertex, visitedVertices, target_vertex, output_string)",
"current_vertex, visitedVertices, target_vertex, output_string): visitedVertices[current_vertex] = True if output_string ==",
"end_vertex): self.graph[starting_vertex].append(end_vertex) def printAllPaths(self, starting_vertex, target_vertex): visitedVertices = defaultdict(bool) self.resultPaths",
"output_string == \"\": output_string = current_vertex else: output_string = output_string",
"if output_string == \"\": output_string = current_vertex else: output_string =",
"= output_string + \"->\" + current_vertex if current_vertex == target_vertex:",
"current_vertex if current_vertex == target_vertex: self.resultPaths.append(output_string) return for vertex in",
"self.graph[starting_vertex].append(end_vertex) def printAllPaths(self, starting_vertex, target_vertex): visitedVertices = defaultdict(bool) self.resultPaths =",
"visitedVertices[vertex] = False if __name__ == \"__main__\": g = Graph()",
"== target_vertex: self.resultPaths.append(output_string) return for vertex in self.graph[current_vertex]: if visitedVertices[vertex]",
"addEdge(self, starting_vertex, end_vertex): self.graph[starting_vertex].append(end_vertex) def printAllPaths(self, starting_vertex, target_vertex): visitedVertices =",
"else: output_string = output_string + \"->\" + current_vertex if current_vertex",
"Graph() g.addEdge(\"A\", \"B\") g.addEdge(\"B\", \"D\") g.addEdge(\"A\", \"D\") g.addEdge(\"C\", \"A\") g.addEdge(\"C\",",
"target_vertex, output_string): visitedVertices[current_vertex] = True if output_string == \"\": output_string",
"if current_vertex == target_vertex: self.resultPaths.append(output_string) return for vertex in self.graph[current_vertex]:",
"defaultdict class Graph: def __init__(self): self.graph = defaultdict(list) def addEdge(self,",
"for vertex in self.graph[current_vertex]: if visitedVertices[vertex] == False: self.dfsUtil(vertex, visitedVertices,",
"+ \"->\" + current_vertex if current_vertex == target_vertex: self.resultPaths.append(output_string) return",
"output_string = current_vertex else: output_string = output_string + \"->\" +",
"defaultdict(bool) self.resultPaths = [] self.dfsUtil(starting_vertex, visitedVertices, target_vertex, \"\") return self.resultPaths",
"def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string): visitedVertices[current_vertex] = True if",
"def addEdge(self, starting_vertex, end_vertex): self.graph[starting_vertex].append(end_vertex) def printAllPaths(self, starting_vertex, target_vertex): visitedVertices",
"visitedVertices = defaultdict(bool) self.resultPaths = [] self.dfsUtil(starting_vertex, visitedVertices, target_vertex, \"\")",
"__init__(self): self.graph = defaultdict(list) def addEdge(self, starting_vertex, end_vertex): self.graph[starting_vertex].append(end_vertex) def",
"output_string = output_string + \"->\" + current_vertex if current_vertex ==",
"= [] self.dfsUtil(starting_vertex, visitedVertices, target_vertex, \"\") return self.resultPaths def dfsUtil(self,",
"__name__ == \"__main__\": g = Graph() g.addEdge(\"A\", \"B\") g.addEdge(\"B\", \"D\")",
"= Graph() g.addEdge(\"A\", \"B\") g.addEdge(\"B\", \"D\") g.addEdge(\"A\", \"D\") g.addEdge(\"C\", \"A\")",
"starting_vertex, target_vertex): visitedVertices = defaultdict(bool) self.resultPaths = [] self.dfsUtil(starting_vertex, visitedVertices,",
"def __init__(self): self.graph = defaultdict(list) def addEdge(self, starting_vertex, end_vertex): self.graph[starting_vertex].append(end_vertex)",
"\"B\") g.addEdge(\"B\", \"D\") g.addEdge(\"A\", \"D\") g.addEdge(\"C\", \"A\") g.addEdge(\"C\", \"B\") g.addEdge(\"A\",",
"output_string): visitedVertices[current_vertex] = True if output_string == \"\": output_string =",
"output_string + \"->\" + current_vertex if current_vertex == target_vertex: self.resultPaths.append(output_string)",
"def printAllPaths(self, starting_vertex, target_vertex): visitedVertices = defaultdict(bool) self.resultPaths = []",
"== \"\": output_string = current_vertex else: output_string = output_string +",
"from collections import defaultdict class Graph: def __init__(self): self.graph =",
"vertex in self.graph[current_vertex]: if visitedVertices[vertex] == False: self.dfsUtil(vertex, visitedVertices, target_vertex,",
"visitedVertices[current_vertex] = True if output_string == \"\": output_string = current_vertex",
"visitedVertices, target_vertex, output_string): visitedVertices[current_vertex] = True if output_string == \"\":",
"if __name__ == \"__main__\": g = Graph() g.addEdge(\"A\", \"B\") g.addEdge(\"B\",",
"self.dfsUtil(starting_vertex, visitedVertices, target_vertex, \"\") return self.resultPaths def dfsUtil(self, current_vertex, visitedVertices,",
"starting_vertex, end_vertex): self.graph[starting_vertex].append(end_vertex) def printAllPaths(self, starting_vertex, target_vertex): visitedVertices = defaultdict(bool)",
"target_vertex, output_string) visitedVertices[vertex] = False if __name__ == \"__main__\": g",
"output_string) visitedVertices[vertex] = False if __name__ == \"__main__\": g =",
"target_vertex): visitedVertices = defaultdict(bool) self.resultPaths = [] self.dfsUtil(starting_vertex, visitedVertices, target_vertex,",
"= True if output_string == \"\": output_string = current_vertex else:",
"True if output_string == \"\": output_string = current_vertex else: output_string",
"[] self.dfsUtil(starting_vertex, visitedVertices, target_vertex, \"\") return self.resultPaths def dfsUtil(self, current_vertex,",
"\"D\") g.addEdge(\"C\", \"A\") g.addEdge(\"C\", \"B\") g.addEdge(\"A\", \"C\") paths = g.printAllPaths(\"A\",",
"g.addEdge(\"B\", \"D\") g.addEdge(\"A\", \"D\") g.addEdge(\"C\", \"A\") g.addEdge(\"C\", \"B\") g.addEdge(\"A\", \"C\")",
"g = Graph() g.addEdge(\"A\", \"B\") g.addEdge(\"B\", \"D\") g.addEdge(\"A\", \"D\") g.addEdge(\"C\",",
"g.addEdge(\"A\", \"B\") g.addEdge(\"B\", \"D\") g.addEdge(\"A\", \"D\") g.addEdge(\"C\", \"A\") g.addEdge(\"C\", \"B\")",
"return self.resultPaths def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string): visitedVertices[current_vertex] =",
"self.dfsUtil(vertex, visitedVertices, target_vertex, output_string) visitedVertices[vertex] = False if __name__ ==",
"= current_vertex else: output_string = output_string + \"->\" + current_vertex",
"\"D\") g.addEdge(\"A\", \"D\") g.addEdge(\"C\", \"A\") g.addEdge(\"C\", \"B\") g.addEdge(\"A\", \"C\") paths",
"return for vertex in self.graph[current_vertex]: if visitedVertices[vertex] == False: self.dfsUtil(vertex,",
"\"->\" + current_vertex if current_vertex == target_vertex: self.resultPaths.append(output_string) return for",
"import defaultdict class Graph: def __init__(self): self.graph = defaultdict(list) def",
"g.addEdge(\"C\", \"A\") g.addEdge(\"C\", \"B\") g.addEdge(\"A\", \"C\") paths = g.printAllPaths(\"A\", \"B\")",
"current_vertex else: output_string = output_string + \"->\" + current_vertex if"
] |
[
"itertools.izip(line1,line2): if not a==b: ind=1 # carrying the passing of",
"status down to the test for the rest if ind==0",
"against different input types import berrl as bl import itertools",
"itertools.izip(points1,points2): if not a==b: ind=1 # carrying the passing of",
"list testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True) # testing each bloocks geojson against each",
"csv file location points1=bl.make_line('csvs/points_example.csv') # making points with list testlist=bl.read('csvs/points_example.csv')",
"with list testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True) # testing each line geojson against",
"testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True) # testing each line geojson against each other",
"csv file location blocks1=bl.make_line('csvs/blocks_example.csv') # making blocks with list testlist=bl.read('csvs/blocks_example.csv')",
"different input types import berrl as bl import itertools #",
"with list testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True) # testing each bloocks geojson against",
"a,b in itertools.izip(points1,points2): if not a==b: ind=1 # carrying the",
"pipegeojson against different input types import berrl as bl import",
"status down to the test for the rest if ind==0:",
"the rest if ind==0: passing=0 else: passing=1 # making points",
"against each other ind=0 for a,b in itertools.izip(blocks1,blocks2): if not",
"rest if ind==0 and passing==0: passing=0 else: passing=1 # printing",
"the output of pipegeojson against different input types import berrl",
"for a,b in itertools.izip(blocks1,blocks2): if not a==b: ind=1 # carrying",
"testing the output of pipegeojson against different input types import",
"ind=0 for a,b in itertools.izip(blocks1,blocks2): if not a==b: ind=1 #",
"down to the test for the rest if ind==0 and",
"other ind=0 for a,b in itertools.izip(polygon1,polygon2): if not a==b: ind=1",
"blocks with csv file location polygon1=bl.make_line('csvs/polygon_example.csv') # making blocks with",
"import berrl as bl import itertools # making line with",
"passing==0: passing=0 else: passing=1 # making blocks with csv file",
"blocks with csv file location blocks1=bl.make_line('csvs/blocks_example.csv') # making blocks with",
"# making line with csv file location line1=bl.make_line('csvs/line_example.csv') # making",
"in itertools.izip(points1,points2): if not a==b: ind=1 # carrying the passing",
"line with list testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True) # testing each line geojson",
"other ind=0 for a,b in itertools.izip(line1,line2): if not a==b: ind=1",
"geojson against each other ind=0 for a,b in itertools.izip(blocks1,blocks2): if",
"itertools.izip(blocks1,blocks2): if not a==b: ind=1 # carrying the passing of",
"for the rest if ind==0 and passing==0: passing=0 else: passing=1",
"itertools # making line with csv file location line1=bl.make_line('csvs/line_example.csv') #",
"for the rest if ind==0: passing=0 else: passing=1 # making",
"each other ind=0 for a,b in itertools.izip(blocks1,blocks2): if not a==b:",
"rest if ind==0 and passing==0: passing=0 else: passing=1 # making",
"with csv file location line1=bl.make_line('csvs/line_example.csv') # making line with list",
"test for the rest if ind==0 and passing==0: passing=0 else:",
"passing=1 # making points with csv file location points1=bl.make_line('csvs/points_example.csv') #",
"output result if passing==0: print 'pipegeojson build passed' else: print",
"itertools.izip(polygon1,polygon2): if not a==b: ind=1 # carrying the passing of",
"against each other ind=0 for a,b in itertools.izip(line1,line2): if not",
"for a,b in itertools.izip(points1,points2): if not a==b: ind=1 # carrying",
"down to the test for the rest if ind==0: passing=0",
"blocks with list testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True) # testing each bloocks geojson",
"other ind=0 for a,b in itertools.izip(blocks1,blocks2): if not a==b: ind=1",
"location line1=bl.make_line('csvs/line_example.csv') # making line with list testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True) #",
"# carrying the passing of status down to the test",
"ind==0: passing=0 else: passing=1 # making points with csv file",
"geojson against each other ind=0 for a,b in itertools.izip(line1,line2): if",
"ind=0 for a,b in itertools.izip(polygon1,polygon2): if not a==b: ind=1 #",
"for a,b in itertools.izip(polygon1,polygon2): if not a==b: ind=1 # carrying",
"making points with csv file location points1=bl.make_line('csvs/points_example.csv') # making points",
"in itertools.izip(polygon1,polygon2): if not a==b: ind=1 # carrying the passing",
"location points1=bl.make_line('csvs/points_example.csv') # making points with list testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True) #",
"# testing each bloocks geojson against each other ind=0 for",
"import itertools # making line with csv file location line1=bl.make_line('csvs/line_example.csv')",
"making blocks with list testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True) # testing each bloocks",
"testing each points geojson against each other ind=0 for a,b",
"to the test for the rest if ind==0 and passing==0:",
"testing each line geojson against each other ind=0 for a,b",
"passing=1 # making blocks with csv file location polygon1=bl.make_line('csvs/polygon_example.csv') #",
"blocks with list testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True) # testing each bloocks geojson",
"berrl as bl import itertools # making line with csv",
"the passing of status down to the test for the",
"# making points with csv file location points1=bl.make_line('csvs/points_example.csv') # making",
"making line with csv file location line1=bl.make_line('csvs/line_example.csv') # making line",
"testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True) # testing each points geojson against each other",
"as bl import itertools # making line with csv file",
"ind==0 and passing==0: passing=0 else: passing=1 # making blocks with",
"file location points1=bl.make_line('csvs/points_example.csv') # making points with list testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True)",
"polygon2=bl.make_line(testlist,list=True) # testing each bloocks geojson against each other ind=0",
"with csv file location points1=bl.make_line('csvs/points_example.csv') # making points with list",
"# printing output result if passing==0: print 'pipegeojson build passed'",
"of pipegeojson against different input types import berrl as bl",
"file location polygon1=bl.make_line('csvs/polygon_example.csv') # making blocks with list testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True)",
"ind=1 # carrying the passing of status down to the",
"each line geojson against each other ind=0 for a,b in",
"points2=bl.make_line(testlist,list=True) # testing each points geojson against each other ind=0",
"testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True) # testing each bloocks geojson against each other",
"passing=1 # printing output result if passing==0: print 'pipegeojson build",
"passing=0 else: passing=1 # printing output result if passing==0: print",
"making blocks with csv file location polygon1=bl.make_line('csvs/polygon_example.csv') # making blocks",
"each other ind=0 for a,b in itertools.izip(points1,points2): if not a==b:",
"the rest if ind==0 and passing==0: passing=0 else: passing=1 #",
"if ind==0 and passing==0: passing=0 else: passing=1 # printing output",
"testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True) # testing each bloocks geojson against each other",
"passing of status down to the test for the rest",
"passing==0: passing=0 else: passing=1 # printing output result if passing==0:",
"# making line with list testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True) # testing each",
"making blocks with csv file location blocks1=bl.make_line('csvs/blocks_example.csv') # making blocks",
"line2=bl.make_line(testlist,list=True) # testing each line geojson against each other ind=0",
"a,b in itertools.izip(line1,line2): if not a==b: ind=1 # carrying the",
"if ind==0 and passing==0: passing=0 else: passing=1 # making blocks",
"each other ind=0 for a,b in itertools.izip(line1,line2): if not a==b:",
"if not a==b: ind=1 # carrying the passing of status",
"to the test for the rest if ind==0: passing=0 else:",
"location polygon1=bl.make_line('csvs/polygon_example.csv') # making blocks with list testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True) #",
"against each other ind=0 for a,b in itertools.izip(points1,points2): if not",
"list testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True) # testing each bloocks geojson against each",
"not a==b: ind=1 # carrying the passing of status down",
"else: passing=1 # making blocks with csv file location polygon1=bl.make_line('csvs/polygon_example.csv')",
"geojson against each other ind=0 for a,b in itertools.izip(polygon1,polygon2): if",
"bl import itertools # making line with csv file location",
"ind==0 and passing==0: passing=0 else: passing=1 # printing output result",
"# testing the output of pipegeojson against different input types",
"line1=bl.make_line('csvs/line_example.csv') # making line with list testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True) # testing",
"other ind=0 for a,b in itertools.izip(points1,points2): if not a==b: ind=1",
"result if passing==0: print 'pipegeojson build passed' else: print 'pipegeojson",
"passing=1 # making blocks with csv file location blocks1=bl.make_line('csvs/blocks_example.csv') #",
"points geojson against each other ind=0 for a,b in itertools.izip(points1,points2):",
"location blocks1=bl.make_line('csvs/blocks_example.csv') # making blocks with list testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True) #",
"csv file location polygon1=bl.make_line('csvs/polygon_example.csv') # making blocks with list testlist=bl.read('csvs/polygon_example.csv')",
"bloocks geojson against each other ind=0 for a,b in itertools.izip(blocks1,blocks2):",
"a,b in itertools.izip(blocks1,blocks2): if not a==b: ind=1 # carrying the",
"making blocks with list testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True) # testing each bloocks",
"blocks2=bl.make_line(testlist,list=True) # testing each bloocks geojson against each other ind=0",
"in itertools.izip(blocks1,blocks2): if not a==b: ind=1 # carrying the passing",
"line with csv file location line1=bl.make_line('csvs/line_example.csv') # making line with",
"each bloocks geojson against each other ind=0 for a,b in",
"the test for the rest if ind==0: passing=0 else: passing=1",
"geojson against each other ind=0 for a,b in itertools.izip(points1,points2): if",
"in itertools.izip(line1,line2): if not a==b: ind=1 # carrying the passing",
"passing==0: print 'pipegeojson build passed' else: print 'pipegeojson build failed'",
"points1=bl.make_line('csvs/points_example.csv') # making points with list testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True) # testing",
"if passing==0: print 'pipegeojson build passed' else: print 'pipegeojson build",
"# testing each points geojson against each other ind=0 for",
"line geojson against each other ind=0 for a,b in itertools.izip(line1,line2):",
"and passing==0: passing=0 else: passing=1 # printing output result if",
"each points geojson against each other ind=0 for a,b in",
"else: passing=1 # printing output result if passing==0: print 'pipegeojson",
"a==b: ind=1 # carrying the passing of status down to",
"# making blocks with list testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True) # testing each",
"input types import berrl as bl import itertools # making",
"with csv file location blocks1=bl.make_line('csvs/blocks_example.csv') # making blocks with list",
"# making blocks with list testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True) # testing each",
"types import berrl as bl import itertools # making line",
"the test for the rest if ind==0 and passing==0: passing=0",
"a,b in itertools.izip(polygon1,polygon2): if not a==b: ind=1 # carrying the",
"# making blocks with csv file location blocks1=bl.make_line('csvs/blocks_example.csv') # making",
"# testing each line geojson against each other ind=0 for",
"passing=0 else: passing=1 # making blocks with csv file location",
"else: passing=1 # making points with csv file location points1=bl.make_line('csvs/points_example.csv')",
"# making blocks with csv file location polygon1=bl.make_line('csvs/polygon_example.csv') # making",
"file location blocks1=bl.make_line('csvs/blocks_example.csv') # making blocks with list testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True)",
"blocks1=bl.make_line('csvs/blocks_example.csv') # making blocks with list testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True) # testing",
"rest if ind==0: passing=0 else: passing=1 # making points with",
"for a,b in itertools.izip(line1,line2): if not a==b: ind=1 # carrying",
"testing each bloocks geojson against each other ind=0 for a,b",
"output of pipegeojson against different input types import berrl as",
"of status down to the test for the rest if",
"list testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True) # testing each points geojson against each",
"with csv file location polygon1=bl.make_line('csvs/polygon_example.csv') # making blocks with list",
"ind=0 for a,b in itertools.izip(line1,line2): if not a==b: ind=1 #",
"making points with list testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True) # testing each points",
"# making points with list testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True) # testing each",
"file location line1=bl.make_line('csvs/line_example.csv') # making line with list testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True)",
"points with csv file location points1=bl.make_line('csvs/points_example.csv') # making points with",
"ind=0 for a,b in itertools.izip(points1,points2): if not a==b: ind=1 #",
"with list testlist=bl.read('csvs/blocks_example.csv') blocks2=bl.make_line(testlist,list=True) # testing each bloocks geojson against",
"list testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True) # testing each line geojson against each",
"passing=0 else: passing=1 # making points with csv file location",
"carrying the passing of status down to the test for",
"else: passing=1 # making blocks with csv file location blocks1=bl.make_line('csvs/blocks_example.csv')",
"printing output result if passing==0: print 'pipegeojson build passed' else:",
"test for the rest if ind==0: passing=0 else: passing=1 #",
"making line with list testlist=bl.read('csvs/line_example.csv') line2=bl.make_line(testlist,list=True) # testing each line",
"each other ind=0 for a,b in itertools.izip(polygon1,polygon2): if not a==b:",
"csv file location line1=bl.make_line('csvs/line_example.csv') # making line with list testlist=bl.read('csvs/line_example.csv')",
"against each other ind=0 for a,b in itertools.izip(polygon1,polygon2): if not",
"points with list testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True) # testing each points geojson",
"polygon1=bl.make_line('csvs/polygon_example.csv') # making blocks with list testlist=bl.read('csvs/polygon_example.csv') polygon2=bl.make_line(testlist,list=True) # testing",
"bloocks geojson against each other ind=0 for a,b in itertools.izip(polygon1,polygon2):",
"with list testlist=bl.read('csvs/points_example.csv') points2=bl.make_line(testlist,list=True) # testing each points geojson against",
"and passing==0: passing=0 else: passing=1 # making blocks with csv",
"if ind==0: passing=0 else: passing=1 # making points with csv"
] |
[
"-*- \"\"\" Created on Mon May 8 16:19:07 2017 @author:",
"= original return vid all_threads = [] for ii, video_file",
"fnames = glob.glob(os.path.join(main_dir, '**', '*.avi')) fnames = [x for x",
"for x in fnames if not x.endswith('_seg.avi')] fnames = sorted(fnames)",
"Mon May 8 16:19:07 2017 @author: ajaver \"\"\" import os",
"x.endswith('_seg.avi')] fnames = sorted(fnames) def get_and_release(video_file): original = sys.stderr f",
"import sys import glob import threading from functools import partial",
"@author: ajaver \"\"\" import os import cv2 import sys import",
"if not x.endswith('_seg.avi')] fnames = sorted(fnames) def get_and_release(video_file): original =",
"import partial main_dir = '/Volumes/behavgenom_archive$/Celine/raw/' fnames = glob.glob(os.path.join(main_dir, '**', '*.avi'))",
"-*- coding: utf-8 -*- \"\"\" Created on Mon May 8",
"Created on Mon May 8 16:19:07 2017 @author: ajaver \"\"\"",
"16:19:07 2017 @author: ajaver \"\"\" import os import cv2 import",
"original = sys.stderr f = open(os.devnull, 'w') sys.stderr = f",
"= glob.glob(os.path.join(main_dir, '**', '*.avi')) fnames = [x for x in",
"def get_and_release(video_file): original = sys.stderr f = open(os.devnull, 'w') sys.stderr",
"May 8 16:19:07 2017 @author: ajaver \"\"\" import os import",
"return vid all_threads = [] for ii, video_file in enumerate(fnames):",
"ii, video_file in enumerate(fnames): print(ii, video_file) vid = cv2.VideoCapture(video_file) vid.release()",
"[x for x in fnames if not x.endswith('_seg.avi')] fnames =",
"= [] for ii, video_file in enumerate(fnames): print(ii, video_file) vid",
"glob.glob(os.path.join(main_dir, '**', '*.avi')) fnames = [x for x in fnames",
"main_dir = '/Volumes/behavgenom_archive$/Celine/raw/' fnames = glob.glob(os.path.join(main_dir, '**', '*.avi')) fnames =",
"cv2 import sys import glob import threading from functools import",
"on Mon May 8 16:19:07 2017 @author: ajaver \"\"\" import",
"cv2.VideoCapture(video_file) vid.release() sys.stderr = original return vid all_threads = []",
"import os import cv2 import sys import glob import threading",
"sys.stderr = f print('here') vid = cv2.VideoCapture(video_file) vid.release() sys.stderr =",
"vid.release() sys.stderr = original return vid all_threads = [] for",
"python3 # -*- coding: utf-8 -*- \"\"\" Created on Mon",
"= open(os.devnull, 'w') sys.stderr = f print('here') vid = cv2.VideoCapture(video_file)",
"cv2.VideoCapture(video_file) vid.release() t = threading.Thread(target = partial(get_and_release, video_file)) t.start() all_threads.append((video_file,",
"fnames = [x for x in fnames if not x.endswith('_seg.avi')]",
"fnames = sorted(fnames) def get_and_release(video_file): original = sys.stderr f =",
"#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on",
"vid.release() t = threading.Thread(target = partial(get_and_release, video_file)) t.start() all_threads.append((video_file, t))",
"coding: utf-8 -*- \"\"\" Created on Mon May 8 16:19:07",
"'*.avi')) fnames = [x for x in fnames if not",
"utf-8 -*- \"\"\" Created on Mon May 8 16:19:07 2017",
"in enumerate(fnames): print(ii, video_file) vid = cv2.VideoCapture(video_file) vid.release() t =",
"sys.stderr f = open(os.devnull, 'w') sys.stderr = f print('here') vid",
"glob import threading from functools import partial main_dir = '/Volumes/behavgenom_archive$/Celine/raw/'",
"\"\"\" Created on Mon May 8 16:19:07 2017 @author: ajaver",
"vid all_threads = [] for ii, video_file in enumerate(fnames): print(ii,",
"not x.endswith('_seg.avi')] fnames = sorted(fnames) def get_and_release(video_file): original = sys.stderr",
"vid = cv2.VideoCapture(video_file) vid.release() t = threading.Thread(target = partial(get_and_release, video_file))",
"sys import glob import threading from functools import partial main_dir",
"ajaver \"\"\" import os import cv2 import sys import glob",
"x in fnames if not x.endswith('_seg.avi')] fnames = sorted(fnames) def",
"'**', '*.avi')) fnames = [x for x in fnames if",
"video_file in enumerate(fnames): print(ii, video_file) vid = cv2.VideoCapture(video_file) vid.release() t",
"threading from functools import partial main_dir = '/Volumes/behavgenom_archive$/Celine/raw/' fnames =",
"8 16:19:07 2017 @author: ajaver \"\"\" import os import cv2",
"= '/Volumes/behavgenom_archive$/Celine/raw/' fnames = glob.glob(os.path.join(main_dir, '**', '*.avi')) fnames = [x",
"# -*- coding: utf-8 -*- \"\"\" Created on Mon May",
"os import cv2 import sys import glob import threading from",
"sys.stderr = original return vid all_threads = [] for ii,",
"= cv2.VideoCapture(video_file) vid.release() sys.stderr = original return vid all_threads =",
"print(ii, video_file) vid = cv2.VideoCapture(video_file) vid.release() t = threading.Thread(target =",
"in fnames if not x.endswith('_seg.avi')] fnames = sorted(fnames) def get_and_release(video_file):",
"original return vid all_threads = [] for ii, video_file in",
"import glob import threading from functools import partial main_dir =",
"= [x for x in fnames if not x.endswith('_seg.avi')] fnames",
"2017 @author: ajaver \"\"\" import os import cv2 import sys",
"'/Volumes/behavgenom_archive$/Celine/raw/' fnames = glob.glob(os.path.join(main_dir, '**', '*.avi')) fnames = [x for",
"sorted(fnames) def get_and_release(video_file): original = sys.stderr f = open(os.devnull, 'w')",
"= cv2.VideoCapture(video_file) vid.release() t = threading.Thread(target = partial(get_and_release, video_file)) t.start()",
"for ii, video_file in enumerate(fnames): print(ii, video_file) vid = cv2.VideoCapture(video_file)",
"<filename>tierpsy/debugging/catch_infinite_loop.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created",
"import cv2 import sys import glob import threading from functools",
"partial main_dir = '/Volumes/behavgenom_archive$/Celine/raw/' fnames = glob.glob(os.path.join(main_dir, '**', '*.avi')) fnames",
"f = open(os.devnull, 'w') sys.stderr = f print('here') vid =",
"video_file) vid = cv2.VideoCapture(video_file) vid.release() t = threading.Thread(target = partial(get_and_release,",
"enumerate(fnames): print(ii, video_file) vid = cv2.VideoCapture(video_file) vid.release() t = threading.Thread(target",
"= sorted(fnames) def get_and_release(video_file): original = sys.stderr f = open(os.devnull,",
"[] for ii, video_file in enumerate(fnames): print(ii, video_file) vid =",
"functools import partial main_dir = '/Volumes/behavgenom_archive$/Celine/raw/' fnames = glob.glob(os.path.join(main_dir, '**',",
"= f print('here') vid = cv2.VideoCapture(video_file) vid.release() sys.stderr = original",
"f print('here') vid = cv2.VideoCapture(video_file) vid.release() sys.stderr = original return",
"open(os.devnull, 'w') sys.stderr = f print('here') vid = cv2.VideoCapture(video_file) vid.release()",
"= sys.stderr f = open(os.devnull, 'w') sys.stderr = f print('here')",
"'w') sys.stderr = f print('here') vid = cv2.VideoCapture(video_file) vid.release() sys.stderr",
"from functools import partial main_dir = '/Volumes/behavgenom_archive$/Celine/raw/' fnames = glob.glob(os.path.join(main_dir,",
"fnames if not x.endswith('_seg.avi')] fnames = sorted(fnames) def get_and_release(video_file): original",
"vid = cv2.VideoCapture(video_file) vid.release() sys.stderr = original return vid all_threads",
"\"\"\" import os import cv2 import sys import glob import",
"print('here') vid = cv2.VideoCapture(video_file) vid.release() sys.stderr = original return vid",
"all_threads = [] for ii, video_file in enumerate(fnames): print(ii, video_file)",
"get_and_release(video_file): original = sys.stderr f = open(os.devnull, 'w') sys.stderr =",
"import threading from functools import partial main_dir = '/Volumes/behavgenom_archive$/Celine/raw/' fnames"
] |
[
"devito.ir import Call from devito.passes.iet.definitions import DataManager from devito.passes.iet.langbase import",
"Call('posix_memalign', (i, j, k)), 'host-free': lambda i: Call('free', (i,)), }",
"lambda i, j, k: Call('posix_memalign', (i, j, k)), 'host-free': lambda",
"j, k: Call('posix_memalign', (i, j, k)), 'host-free': lambda i: Call('free',",
"devito.passes.iet.definitions import DataManager from devito.passes.iet.langbase import LangBB __all__ = ['CBB',",
"from devito.passes.iet.langbase import LangBB __all__ = ['CBB', 'CDataManager'] class CBB(LangBB):",
"'__attribute__((aligned(%d)))' % i, 'host-alloc': lambda i, j, k: Call('posix_memalign', (i,",
"Call from devito.passes.iet.definitions import DataManager from devito.passes.iet.langbase import LangBB __all__",
"mapper = { 'aligned': lambda i: '__attribute__((aligned(%d)))' % i, 'host-alloc':",
"class CBB(LangBB): mapper = { 'aligned': lambda i: '__attribute__((aligned(%d)))' %",
"'host-alloc': lambda i, j, k: Call('posix_memalign', (i, j, k)), 'host-free':",
"% i, 'host-alloc': lambda i, j, k: Call('posix_memalign', (i, j,",
"CBB(LangBB): mapper = { 'aligned': lambda i: '__attribute__((aligned(%d)))' % i,",
"i: '__attribute__((aligned(%d)))' % i, 'host-alloc': lambda i, j, k: Call('posix_memalign',",
"'host-free': lambda i: Call('free', (i,)), } class CDataManager(DataManager): lang =",
"k: Call('posix_memalign', (i, j, k)), 'host-free': lambda i: Call('free', (i,)),",
"k)), 'host-free': lambda i: Call('free', (i,)), } class CDataManager(DataManager): lang",
"i, 'host-alloc': lambda i, j, k: Call('posix_memalign', (i, j, k)),",
"import Call from devito.passes.iet.definitions import DataManager from devito.passes.iet.langbase import LangBB",
"= { 'aligned': lambda i: '__attribute__((aligned(%d)))' % i, 'host-alloc': lambda",
"lambda i: Call('free', (i,)), } class CDataManager(DataManager): lang = CBB",
"'aligned': lambda i: '__attribute__((aligned(%d)))' % i, 'host-alloc': lambda i, j,",
"LangBB __all__ = ['CBB', 'CDataManager'] class CBB(LangBB): mapper = {",
"from devito.ir import Call from devito.passes.iet.definitions import DataManager from devito.passes.iet.langbase",
"__all__ = ['CBB', 'CDataManager'] class CBB(LangBB): mapper = { 'aligned':",
"devito.passes.iet.langbase import LangBB __all__ = ['CBB', 'CDataManager'] class CBB(LangBB): mapper",
"import DataManager from devito.passes.iet.langbase import LangBB __all__ = ['CBB', 'CDataManager']",
"{ 'aligned': lambda i: '__attribute__((aligned(%d)))' % i, 'host-alloc': lambda i,",
"lambda i: '__attribute__((aligned(%d)))' % i, 'host-alloc': lambda i, j, k:",
"DataManager from devito.passes.iet.langbase import LangBB __all__ = ['CBB', 'CDataManager'] class",
"'CDataManager'] class CBB(LangBB): mapper = { 'aligned': lambda i: '__attribute__((aligned(%d)))'",
"from devito.passes.iet.definitions import DataManager from devito.passes.iet.langbase import LangBB __all__ =",
"= ['CBB', 'CDataManager'] class CBB(LangBB): mapper = { 'aligned': lambda",
"(i, j, k)), 'host-free': lambda i: Call('free', (i,)), } class",
"import LangBB __all__ = ['CBB', 'CDataManager'] class CBB(LangBB): mapper =",
"j, k)), 'host-free': lambda i: Call('free', (i,)), } class CDataManager(DataManager):",
"i, j, k: Call('posix_memalign', (i, j, k)), 'host-free': lambda i:",
"['CBB', 'CDataManager'] class CBB(LangBB): mapper = { 'aligned': lambda i:"
] |
[
"_filter, _filter.H) + _f(im1, _filter.H, _filter) rec2 = rec1 +",
"= h1 # return (im @ h1.tensor(h2).H).P @ h1.tensor(h2) return",
"assert True def test_quantize(): im = ImageRGB.open('src/lenna.jpg') d = im.quantize(128)",
"@ _filter assert True def test_rec(): im = ImageRGB.open('src/lenna.jpg') def",
"if h2 is None: h2 = h1 # return (im",
"True def test_filter(): im = ImageRGB.open('src/lenna.jpg') rec = (im @",
"_filter, _filter.H) + _f(im1, _filter.H, _filter) rec2 = rec1.expand(_filter) +",
"(im @ _filter.H).D # print(f\"{d:i}, {d.shape}\") assert True def test_filter():",
"assert True def test_rec3(): im = ImageRGB.open('src/lenna.jpg') def _f(im, h1,",
"assert True def test_convolve(): im = ImageRGB.open('src/lenna.jpg') d = (im",
"(im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) rec = _f(im, _filter) +",
"_filter) + _f(im1, _filter.H) + _f(im1, _filter, _filter.H) + _f(im1,",
"about image process Make sure the existance of the images",
"d.to_image() assert True def test_quantize(): im = ImageRGB.open('src/lenna.jpg') d =",
"h2 is None: h2 = h1 return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1,",
"(im @ _filter.H).D.U @ _filter assert True def test_rec(): im",
"assert True def test_rec(): im = ImageRGB.open('src/lenna.jpg') def _f(im, h1,",
"None: h2 = h1 return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1)",
"im1 = im.reduce(_filter) rec1 = _f(im1, _filter) + _f(im1, _filter.H)",
"* import numpy as np _filter = Filter.from_name('db4') def test_resize():",
"(im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) im1 = _f(im, _filter) rec1",
"= _f(im1, _filter) + _f(im1, _filter.H) + _f(im1, _filter, _filter.H)",
"_f(im1, _filter.H) + _f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter)",
"= im.quantize(128) d.to_image() assert True def test_convolve(): im = ImageRGB.open('src/lenna.jpg')",
"@ _filter.H).D # print(f\"{d:i}, {d.shape}\") assert True def test_filter(): im",
"return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) im1 = _f(im, _filter)",
"axis=1) rec = _f(im, _filter) + _f(im, _filter.H) + _f(im,",
"_filter) assert True def test_rec2(): im = ImageRGB.open('../src/lenna.jpg') def _f(im,",
"im1 = _f(im, _filter) rec1 = _f(im1, _filter) + _f(im1,",
"True def test_rec(): im = ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None):",
"np _filter = Filter.from_name('db4') def test_resize(): chennal=0 c = ImageRGB.open('src/lenna.jpg')",
"_filter, _filter.H) + _f(im, _filter.H, _filter) assert True def test_rec3():",
"from ell import * import numpy as np _filter =",
"ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100), maxInd=(100,100)) d.to_image() assert True def test_quantize(): im =",
"True def test_rec2(): im = ImageRGB.open('../src/lenna.jpg') def _f(im, h1, h2=None):",
"the existance of the images \"\"\" from ell import *",
"= (im @ _filter.H).D.U @ _filter assert True def test_rec():",
"methods about image process Make sure the existance of the",
"True def test_rec3(): im = ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None):",
"_filter.H) + _f(im, _filter.H, _filter) assert True def test_rec2(): im",
"def test_rec3(): im = ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None): if",
"def test_rec2(): im = ImageRGB.open('../src/lenna.jpg') def _f(im, h1, h2=None): if",
"_filter) rec2 = rec1.expand(_filter) + _f(im, _filter.H) + _f(im, _filter,",
"= ImageRGB.open('src/lenna.jpg') d = (im @ _filter.H).D # print(f\"{d:i}, {d.shape}\")",
"_filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im,",
"_f(im1, _filter.H, _filter) rec2 = rec1 + _f(im, _filter.H) +",
"= h1.tensor(h2) return im.reduce(f).expand(f) im1 = im.reduce(_filter) rec1 = _f(im1,",
"@ h1.tensor(h2) return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) im1 =",
"rec = (im @ _filter.H).D.U @ _filter assert True def",
"rec1.expand(_filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im,",
"None: h2 = h1 f = h1.tensor(h2) return im.reduce(f).expand(f) im1",
"h1 f = h1.tensor(h2) return im.reduce(f).expand(f) im1 = im.reduce(_filter) rec1",
"if h2 is None: h2 = h1 f = h1.tensor(h2)",
"ImageRGB.open('src/lenna.jpg') d = (im @ _filter.H).D # print(f\"{d:i}, {d.shape}\") assert",
"_filter, _filter.H) + _f(im, _filter.H, _filter) assert True def test_rec2():",
"is None: h2 = h1 return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2,",
"assert True def test_rec2(): im = ImageRGB.open('../src/lenna.jpg') def _f(im, h1,",
"h2=None): if h2 is None: h2 = h1 # return",
"ImageRGB.open('src/lenna.jpg') d = im.quantize(128) d.to_image() assert True def test_convolve(): im",
"= rec1 + _f(im, _filter.H) + _f(im, _filter, _filter.H) +",
"process Make sure the existance of the images \"\"\" from",
"h2 = h1 f = h1.tensor(h2) return im.reduce(f).expand(f) im1 =",
"_f(im1, _filter.H, _filter) rec2 = rec1.expand(_filter) + _f(im, _filter.H) +",
"rec = _f(im, _filter) + _f(im, _filter.H) + _f(im, _filter,",
"images \"\"\" from ell import * import numpy as np",
"h2=None): if h2 is None: h2 = h1 return (im.conv1d(h1.H,",
"= h1 f = h1.tensor(h2) return im.reduce(f).expand(f) im1 = im.reduce(_filter)",
"_f(im, _filter, _filter.H) + _f(im, _filter.H, _filter) assert True def",
"_filter.H).D.U @ _filter assert True def test_rec(): im = ImageRGB.open('src/lenna.jpg')",
"h2 is None: h2 = h1 f = h1.tensor(h2) return",
"def _f(im, h1, h2=None): if h2 is None: h2 =",
"h1.tensor(h2).H).P @ h1.tensor(h2) return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) im1",
"d = (im @ _filter.H).D # print(f\"{d:i}, {d.shape}\") assert True",
"as np _filter = Filter.from_name('db4') def test_resize(): chennal=0 c =",
"rec2 = rec1 + _f(im, _filter.H) + _f(im, _filter, _filter.H)",
"h1 return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) rec = _f(im,",
"h1, h2=None): if h2 is None: h2 = h1 f",
"h1, h2=None): if h2 is None: h2 = h1 #",
"_filter.H, _filter) rec2 = rec1 + _f(im, _filter.H) + _f(im,",
"print(f\"{d:i}, {d.shape}\") assert True def test_filter(): im = ImageRGB.open('src/lenna.jpg') rec",
"\"\"\" from ell import * import numpy as np _filter",
"return im.reduce(f).expand(f) im1 = im.reduce(_filter) rec1 = _f(im1, _filter) +",
"im = ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None): if h2 is",
"ell import * import numpy as np _filter = Filter.from_name('db4')",
"_filter.H, _filter) assert True def test_rec3(): im = ImageRGB.open('src/lenna.jpg') def",
"rec2 = rec1.expand(_filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H)",
"_f(im1, _filter) + _f(im1, _filter.H) + _f(im1, _filter, _filter.H) +",
"_filter.H) + _f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter) rec2",
"_filter.H) + _f(im1, _filter.H, _filter) rec2 = rec1.expand(_filter) + _f(im,",
"f = h1.tensor(h2) return im.reduce(f).expand(f) im1 = im.reduce(_filter) rec1 =",
"True def test_quantize(): im = ImageRGB.open('src/lenna.jpg') d = im.quantize(128) d.to_image()",
"im.quantize(128) d.to_image() assert True def test_convolve(): im = ImageRGB.open('src/lenna.jpg') d",
"test_rec3(): im = ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None): if h2",
"im.reduce(f).expand(f) im1 = im.reduce(_filter) rec1 = _f(im1, _filter) + _f(im1,",
"# print(f\"{d:i}, {d.shape}\") assert True def test_filter(): im = ImageRGB.open('src/lenna.jpg')",
"+ _f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter) rec2 =",
"im = ImageRGB.open('src/lenna.jpg') d = (im @ _filter.H).D # print(f\"{d:i},",
"_f(im, h1, h2=None): if h2 is None: h2 = h1",
"= ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100), maxInd=(100,100)) d.to_image() assert True def test_quantize(): im",
"= ImageRGB.open('src/lenna.jpg') rec = (im @ _filter.H).D.U @ _filter assert",
"= _f(im, _filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H)",
"_f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter)",
"_filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter) assert",
"_f(im, _filter.H, _filter) assert True def test_rec2(): im = ImageRGB.open('../src/lenna.jpg')",
"d = im.quantize(128) d.to_image() assert True def test_convolve(): im =",
"h1.tensor(h2) return im.reduce(f).expand(f) im1 = im.reduce(_filter) rec1 = _f(im1, _filter)",
"axis=0).conv1d(h2, axis=1) rec = _f(im, _filter) + _f(im, _filter.H) +",
"def test_quantize(): im = ImageRGB.open('src/lenna.jpg') d = im.quantize(128) d.to_image() assert",
"= Filter.from_name('db4') def test_resize(): chennal=0 c = ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100), maxInd=(100,100))",
"test_rec2(): im = ImageRGB.open('../src/lenna.jpg') def _f(im, h1, h2=None): if h2",
"test_filter(): im = ImageRGB.open('src/lenna.jpg') rec = (im @ _filter.H).D.U @",
"h2 is None: h2 = h1 # return (im @",
"axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) im1 = _f(im, _filter) rec1 = _f(im1,",
"axis=0).conv1d(h2, axis=1) im1 = _f(im, _filter) rec1 = _f(im1, _filter)",
"rec1 = _f(im1, _filter) + _f(im1, _filter.H) + _f(im1, _filter,",
"\"\"\"Test methods about image process Make sure the existance of",
"test_rec(): im = ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None): if h2",
"_f(im, _filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H) +",
"h2=None): if h2 is None: h2 = h1 f =",
"c = ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100), maxInd=(100,100)) d.to_image() assert True def test_quantize():",
"assert True def test_filter(): im = ImageRGB.open('src/lenna.jpg') rec = (im",
"+ _f(im1, _filter.H, _filter) rec2 = rec1 + _f(im, _filter.H)",
"Make sure the existance of the images \"\"\" from ell",
"the images \"\"\" from ell import * import numpy as",
"im = ImageRGB.open('../src/lenna.jpg') def _f(im, h1, h2=None): if h2 is",
"test_resize(): chennal=0 c = ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100), maxInd=(100,100)) d.to_image() assert True",
"_filter.H) + _f(im1, _filter.H, _filter) rec2 = rec1 + _f(im,",
"of the images \"\"\" from ell import * import numpy",
"import * import numpy as np _filter = Filter.from_name('db4') def",
"True def test_convolve(): im = ImageRGB.open('src/lenna.jpg') d = (im @",
"= h1 return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) rec =",
"h2 = h1 # return (im @ h1.tensor(h2).H).P @ h1.tensor(h2)",
"h1.tensor(h2) return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) im1 = _f(im,",
"test_quantize(): im = ImageRGB.open('src/lenna.jpg') d = im.quantize(128) d.to_image() assert True",
"= im.reduce(_filter) rec1 = _f(im1, _filter) + _f(im1, _filter.H) +",
"im.reduce(_filter) rec1 = _f(im1, _filter) + _f(im1, _filter.H) + _f(im1,",
"im = ImageRGB.open('src/lenna.jpg') d = im.quantize(128) d.to_image() assert True def",
"d=c.resize(minInd=(-100,-100), maxInd=(100,100)) d.to_image() assert True def test_quantize(): im = ImageRGB.open('src/lenna.jpg')",
"{d.shape}\") assert True def test_filter(): im = ImageRGB.open('src/lenna.jpg') rec =",
"= rec1.expand(_filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H) +",
"ImageRGB.open('../src/lenna.jpg') def _f(im, h1, h2=None): if h2 is None: h2",
"_filter.H).D # print(f\"{d:i}, {d.shape}\") assert True def test_filter(): im =",
"@ h1.tensor(h2).H).P @ h1.tensor(h2) return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1)",
"chennal=0 c = ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100), maxInd=(100,100)) d.to_image() assert True def",
"axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) im1 = _f(im, _filter) rec1 =",
"return (im @ h1.tensor(h2).H).P @ h1.tensor(h2) return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1,",
"sure the existance of the images \"\"\" from ell import",
"numpy as np _filter = Filter.from_name('db4') def test_resize(): chennal=0 c",
"import numpy as np _filter = Filter.from_name('db4') def test_resize(): chennal=0",
"maxInd=(100,100)) d.to_image() assert True def test_quantize(): im = ImageRGB.open('src/lenna.jpg') d",
"axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) rec = _f(im, _filter) + _f(im, _filter.H)",
"#!/usr/bin/env python3 \"\"\"Test methods about image process Make sure the",
"def test_resize(): chennal=0 c = ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100), maxInd=(100,100)) d.to_image() assert",
"+ _f(im1, _filter.H, _filter) rec2 = rec1.expand(_filter) + _f(im, _filter.H)",
"_f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter) rec2 = rec1.expand(_filter)",
"_f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter) rec2 = rec1",
"@ _filter.H).D.U @ _filter assert True def test_rec(): im =",
"_filter.H) + _f(im, _filter.H, _filter) assert True def test_rec3(): im",
"image process Make sure the existance of the images \"\"\"",
"ImageRGB.open('src/lenna.jpg') rec = (im @ _filter.H).D.U @ _filter assert True",
"= ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None): if h2 is None:",
"= ImageRGB.open('../src/lenna.jpg') def _f(im, h1, h2=None): if h2 is None:",
"_f(im, _filter.H, _filter) assert True def test_rec3(): im = ImageRGB.open('src/lenna.jpg')",
"rec1 + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im,",
"_filter.H, _filter) rec2 = rec1.expand(_filter) + _f(im, _filter.H) + _f(im,",
"axis=1) im1 = _f(im, _filter) rec1 = _f(im1, _filter) +",
"h1, h2=None): if h2 is None: h2 = h1 return",
"im = ImageRGB.open('src/lenna.jpg') rec = (im @ _filter.H).D.U @ _filter",
"_filter) rec2 = rec1 + _f(im, _filter.H) + _f(im, _filter,",
"= _f(im, _filter) rec1 = _f(im1, _filter) + _f(im1, _filter.H)",
"(im @ h1.tensor(h2).H).P @ h1.tensor(h2) return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2,",
"= (im @ _filter.H).D # print(f\"{d:i}, {d.shape}\") assert True def",
"None: h2 = h1 # return (im @ h1.tensor(h2).H).P @",
"d.to_image() assert True def test_convolve(): im = ImageRGB.open('src/lenna.jpg') d =",
"_filter) rec1 = _f(im1, _filter) + _f(im1, _filter.H) + _f(im1,",
"_f(im, _filter) rec1 = _f(im1, _filter) + _f(im1, _filter.H) +",
"python3 \"\"\"Test methods about image process Make sure the existance",
"if h2 is None: h2 = h1 return (im.conv1d(h1.H, axis=0).conv1d(h2.H,",
"h2 = h1 return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) rec",
"+ _f(im, _filter.H, _filter) assert True def test_rec2(): im =",
"_filter assert True def test_rec(): im = ImageRGB.open('src/lenna.jpg') def _f(im,",
"is None: h2 = h1 f = h1.tensor(h2) return im.reduce(f).expand(f)",
"+ _f(im, _filter.H, _filter) assert True def test_rec3(): im =",
"ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None): if h2 is None: h2",
"h1 # return (im @ h1.tensor(h2).H).P @ h1.tensor(h2) return (im.conv1d(h1.H,",
"def test_rec(): im = ImageRGB.open('src/lenna.jpg') def _f(im, h1, h2=None): if",
"+ _f(im1, _filter.H) + _f(im1, _filter, _filter.H) + _f(im1, _filter.H,",
"axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) rec = _f(im, _filter) + _f(im,",
"+ _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter) assert True",
"_filter.H, _filter) assert True def test_rec2(): im = ImageRGB.open('../src/lenna.jpg') def",
"# return (im @ h1.tensor(h2).H).P @ h1.tensor(h2) return (im.conv1d(h1.H, axis=0).conv1d(h2.H,",
"_filter = Filter.from_name('db4') def test_resize(): chennal=0 c = ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100),",
"test_convolve(): im = ImageRGB.open('src/lenna.jpg') d = (im @ _filter.H).D #",
"return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1) rec = _f(im, _filter)",
"Filter.from_name('db4') def test_resize(): chennal=0 c = ImageRGB.open('src/lenna.jpg') d=c.resize(minInd=(-100,-100), maxInd=(100,100)) d.to_image()",
"= ImageRGB.open('src/lenna.jpg') d = im.quantize(128) d.to_image() assert True def test_convolve():",
"+ _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H,",
"existance of the images \"\"\" from ell import * import",
"def test_convolve(): im = ImageRGB.open('src/lenna.jpg') d = (im @ _filter.H).D",
"is None: h2 = h1 # return (im @ h1.tensor(h2).H).P",
"def test_filter(): im = ImageRGB.open('src/lenna.jpg') rec = (im @ _filter.H).D.U",
"_filter) assert True def test_rec3(): im = ImageRGB.open('src/lenna.jpg') def _f(im,"
] |
[
"resize=True): \"\"\" Creates a default transform to work with torchvision",
"PyTorch import torch from torch.utils.data import IterableDataset, DataLoader from donkeycar.utils",
"config self.tub_paths = tub_paths # Handle the transforms if transform:",
"trainer.fit and trainer.test. Defaults to None. \"\"\" # Loop through",
"= [0.22803, 0.22145, 0.216989] input_size = (112, 112) transform_items =",
"use (minimum size of 1). Each tub path corresponds to",
"[Tub(tub_path, read_only=True) for tub_path in self.tub_paths] self.records: List[TubRecord] = []",
"the dataset, and creates a train/test split. ''' def __init__(self,",
"optional): a transform to apply to the data \"\"\" super().__init__()",
"self.tubs: List[Tub] = [Tub(tub_path, read_only=True) for tub_path in self.tub_paths] self.records:",
"through all the different tubs and load all the records",
"std = [0.22803, 0.22145, 0.216989] input_size = (112, 112) transform_items",
"(predictions + 1) / 2 return predictions def x_transform(record: TubRecord):",
"be loaded in to a range of [0, 1] and",
"records (List[TubRecord]): a list of tub records transform (function, optional):",
"\"\"\" self.config = config # Handle the transforms if transform:",
"__init__(self, config: Any, tub_paths: List[str], transform=None): \"\"\"Create a PyTorch Lightning",
"1] # angle and throttle are originally between [-1, 1]",
"pipeline using the transformations pipeline = self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform) return pipeline",
"def train_dataloader(self): # The number of workers are set to",
"y_transform(record: TubRecord): angle: float = record.underlying['user/angle'] throttle: float = record.underlying['user/throttle']",
"the transformations pipeline = self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform) return pipeline def __len__(self):",
"std = [0.229, 0.224, 0.225] input_size = (224, 224) if",
"and load all the records for each of them for",
"transform to apply to the data \"\"\" super().__init__() self.config =",
"[0.43216, 0.394666, 0.37645] std = [0.22803, 0.22145, 0.216989] input_size =",
"images normalized in the same way, i.e. mini-batches of 3-channel",
"a transform to apply to the data \"\"\" self.config =",
"a range of [0, 1] and then normalized using mean",
"to None. \"\"\" # Loop through all the different tubs",
"for tub in self.tubs: for underlying in tub: record =",
"else: self.transform = get_default_transform() self.tubs: List[Tub] = [Tub(tub_path, read_only=True) for",
"See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) def val_dataloader(self): # The",
"tub.base_path, underlying=underlying) self.records.append(record) train_records, val_records = train_test_split( self.records, test_size=(1. -",
"W), where H and W are expected to be 112,",
"(112, 112) transform_items = [ transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ] if",
"def get_default_transform(for_video=False, for_inference=False, resize=True): \"\"\" Creates a default transform to",
"validation data. Add more data\" self.train_dataset = TorchTubDataset( self.config, train_records,",
"self.transform = get_default_transform() self.tubs: List[Tub] = [Tub(tub_path, read_only=True) for tub_path",
"can be overridden if more complicated pipelines are required \"\"\"",
"images have to be loaded in to a range of",
"from typing import List, Any from donkeycar.pipeline.types import TubRecord, TubDataset",
"x_transform(record: TubRecord): # Loads the result of Image.open() img_arr =",
"import TubRecord, TubDataset from donkeycar.pipeline.sequence import TubSequence import pytorch_lightning as",
"self.transform = transform else: self.transform = get_default_transform() self.tubs: List[Tub] =",
"the transforms if transform: self.transform = transform else: self.transform =",
"- self.config.TRAIN_TEST_SPLIT)) assert len(val_records) > 0, \"Not enough validation data.",
"[-1, 1] predictions = (predictions + 1) / 2 return",
"and W are expected to be 112, and T is",
"to use (minimum size of 1). Each tub path corresponds",
"predictions = (predictions + 1) / 2 return predictions def",
"T x H x W), where H and W are",
"get_default_transform() self.tubs: List[Tub] = [Tub(tub_path, read_only=True) for tub_path in self.tub_paths]",
"of [0, 1] and then normalized using mean = [0.43216,",
"loading logic Args: config (object): the configuration information tub_paths (List[str]):",
"on Macs and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE,",
"required \"\"\" def y_transform(record: TubRecord): angle: float = record.underlying['user/angle'] throttle:",
"Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) def val_dataloader(self):",
"video frames in a clip. The images have to be",
"underlying in tub: record = TubRecord(self.config, tub.base_path, underlying=underlying) self.records.append(record) train_records,",
"''' def __init__(self, config, records: List[TubRecord], transform=None): \"\"\"Create a PyTorch",
"stage ([string], optional): setup expects a string arg stage. It",
"torch.utils.data import IterableDataset, DataLoader from donkeycar.utils import train_test_split from donkeycar.parts.tub_v2",
"TubDataset from donkeycar.pipeline.sequence import TubSequence import pytorch_lightning as pl def",
"frames in a clip. The images have to be loaded",
"in to a range of [0, 1] and then normalized",
"are set to 0 to avoid errors on Macs and",
"all the records for each of them for tub in",
"transform_items = [ transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ] if resize: transform_items.insert(0,",
"= transform else: self.transform = get_default_transform() self.tubs: List[Tub] = [Tub(tub_path,",
"= (112, 112) transform_items = [ transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ]",
"creates a train/test split. ''' def __init__(self, config, records: List[TubRecord],",
"of them for tub in self.tubs: for underlying in tub:",
"in tub: record = TubRecord(self.config, tub.base_path, underlying=underlying) self.records.append(record) train_records, val_records",
"0.406] std = [0.229, 0.224, 0.225] input_size = (224, 224)",
"0.394666, 0.37645] and std = [0.22803, 0.22145, 0.216989]. \"\"\" mean",
"enough validation data. Add more data\" self.train_dataset = TorchTubDataset( self.config,",
"to the data \"\"\" super().__init__() self.config = config self.tub_paths =",
"TubRecord): angle: float = record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] predictions",
"List, Any from donkeycar.pipeline.types import TubRecord, TubDataset from donkeycar.pipeline.sequence import",
"predictions def x_transform(record: TubRecord): # Loads the result of Image.open()",
"set up the datasets. Args: stage ([string], optional): setup expects",
"List[TubRecord] = [] def setup(self, stage=None): \"\"\"Load all the tub",
"config (object): the configuration information tub_paths (List[str]): a list of",
"# The number of workers are set to 0 to",
"record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] predictions = torch.tensor([angle, throttle], dtype=torch.float)",
"of Image.open() img_arr = record.image(cached=True, as_nparray=False) return self.transform(img_arr) # Build",
"# Loads the result of Image.open() img_arr = record.image(cached=True, as_nparray=False)",
"(object): the configuration information tub_paths (List[str]): a list of paths",
"transform=None): \"\"\"Create a PyTorch Tub Dataset Args: config (object): the",
"([string], optional): setup expects a string arg stage. It is",
"in a clip. The images have to be loaded in",
"and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) def",
"throttle are originally between [-1, 1] predictions = (predictions +",
"Args: config (object): the configuration information records (List[TubRecord]): a list",
"self.transform = transform else: self.transform = get_default_transform() self.sequence = TubSequence(records)",
"the tubs to use (minimum size of 1). Each tub",
"and throttle are originally between [-1, 1] predictions = (predictions",
"setup(self, stage=None): \"\"\"Load all the tub data and set up",
"self.tub_paths] self.records: List[TubRecord] = [] def setup(self, stage=None): \"\"\"Load all",
"normalized in the same way, i.e. mini-batches of 3-channel RGB",
"a default transform to work with torchvision models Video transform:",
"float = record.underlying['user/throttle'] predictions = torch.tensor([angle, throttle], dtype=torch.float) # Normalize",
"val_records, transform=self.transform) def train_dataloader(self): # The number of workers are",
"overridden if more complicated pipelines are required \"\"\" def y_transform(record:",
"in self.tub_paths] self.records: List[TubRecord] = [] def setup(self, stage=None): \"\"\"Load",
"transform=self.transform) self.val_dataset = TorchTubDataset( self.config, val_records, transform=self.transform) def train_dataloader(self): #",
"to another training run. transform (function, optional): a transform to",
"between [0, 1] # angle and throttle are originally between",
"iter(self.pipeline) class TorchTubDataModule(pl.LightningDataModule): def __init__(self, config: Any, tub_paths: List[str], transform=None):",
"the data \"\"\" self.config = config # Handle the transforms",
"= [0.43216, 0.394666, 0.37645] and std = [0.22803, 0.22145, 0.216989].",
"Creates a default transform to work with torchvision models Video",
"= tub_paths # Handle the transforms if transform: self.transform =",
"1) / 2 return predictions def x_transform(record: TubRecord): # Loads",
"= TubSequence(records) self.pipeline = self._create_pipeline() self.len = len(records) def _create_pipeline(self):",
"return pipeline def __len__(self): return len(self.sequence) def __iter__(self): return iter(self.pipeline)",
"on Macs and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE,",
"Each tub path corresponds to another training run. transform (function,",
"T is a number of video frames in a clip.",
"[0.22803, 0.22145, 0.216989] input_size = (112, 112) transform_items = [",
"as pl def get_default_transform(for_video=False, for_inference=False, resize=True): \"\"\" Creates a default",
"torchvision models Video transform: All pre-trained models expect input images",
"[0.229, 0.224, 0.225] input_size = (224, 224) if for_video: mean",
"list of paths to the tubs to use (minimum size",
"self.tubs: for underlying in tub: record = TubRecord(self.config, tub.base_path, underlying=underlying)",
"self.config, val_records, transform=self.transform) def train_dataloader(self): # The number of workers",
"\"\"\" def y_transform(record: TubRecord): angle: float = record.underlying['user/angle'] throttle: float",
"= get_default_transform() self.sequence = TubSequence(records) self.pipeline = self._create_pipeline() self.len =",
"same way, i.e. mini-batches of 3-channel RGB videos of shape",
"> 0, \"Not enough validation data. Add more data\" self.train_dataset",
"to be loaded in to a range of [0, 1]",
"shape (3 x T x H x W), where H",
"Macs and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)",
"(List[TubRecord]): a list of tub records transform (function, optional): a",
"the same way, i.e. mini-batches of 3-channel RGB videos of",
"is a number of video frames in a clip. The",
"(3 x T x H x W), where H and",
"transforms.Normalize(mean=mean, std=std) ] if resize: transform_items.insert(0, transforms.Resize(input_size)) return transforms.Compose(transform_items) class",
"pl def get_default_transform(for_video=False, for_inference=False, resize=True): \"\"\" Creates a default transform",
"apply to the data \"\"\" super().__init__() self.config = config self.tub_paths",
"donkeycar.utils import train_test_split from donkeycar.parts.tub_v2 import Tub from torchvision import",
"import train_test_split from donkeycar.parts.tub_v2 import Tub from torchvision import transforms",
"= self._create_pipeline() self.len = len(records) def _create_pipeline(self): \"\"\" This can",
"def y_transform(record: TubRecord): angle: float = record.underlying['user/angle'] throttle: float =",
"way, i.e. mini-batches of 3-channel RGB videos of shape (3",
"= TorchTubDataset( self.config, train_records, transform=self.transform) self.val_dataset = TorchTubDataset( self.config, val_records,",
"from donkeycar.utils import train_test_split from donkeycar.parts.tub_v2 import Tub from torchvision",
"expect input images normalized in the same way, i.e. mini-batches",
"= record.underlying['user/throttle'] predictions = torch.tensor([angle, throttle], dtype=torch.float) # Normalize to",
"0, \"Not enough validation data. Add more data\" self.train_dataset =",
"config (object): the configuration information records (List[TubRecord]): a list of",
"transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ] if resize: transform_items.insert(0, transforms.Resize(input_size)) return transforms.Compose(transform_items)",
"clip. The images have to be loaded in to a",
"DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) def val_dataloader(self): # The number of workers",
"# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) def val_dataloader(self): #",
"self.sequence = TubSequence(records) self.pipeline = self._create_pipeline() self.len = len(records) def",
"Loop through all the different tubs and load all the",
"TubSequence(records) self.pipeline = self._create_pipeline() self.len = len(records) def _create_pipeline(self): \"\"\"",
"the configuration information tub_paths (List[str]): a list of paths to",
"record = TubRecord(self.config, tub.base_path, underlying=underlying) self.records.append(record) train_records, val_records = train_test_split(",
"different tubs and load all the records for each of",
"train_records, transform=self.transform) self.val_dataset = TorchTubDataset( self.config, val_records, transform=self.transform) def train_dataloader(self):",
"import torch from torch.utils.data import IterableDataset, DataLoader from donkeycar.utils import",
"to be 112, and T is a number of video",
"0.224, 0.225] input_size = (224, 224) if for_video: mean =",
"pipelines are required \"\"\" def y_transform(record: TubRecord): angle: float =",
"transform: self.transform = transform else: self.transform = get_default_transform() self.tubs: List[Tub]",
"torch from torch.utils.data import IterableDataset, DataLoader from donkeycar.utils import train_test_split",
"self.transform(img_arr) # Build pipeline using the transformations pipeline = self.sequence.build_pipeline(x_transform=x_transform,",
"more complicated pipelines are required \"\"\" def y_transform(record: TubRecord): angle:",
"super().__init__() self.config = config self.tub_paths = tub_paths # Handle the",
"resize: transform_items.insert(0, transforms.Resize(input_size)) return transforms.Compose(transform_items) class TorchTubDataset(IterableDataset): ''' Loads the",
"import IterableDataset, DataLoader from donkeycar.utils import train_test_split from donkeycar.parts.tub_v2 import",
"import pytorch_lightning as pl def get_default_transform(for_video=False, for_inference=False, resize=True): \"\"\" Creates",
"donkeycar.parts.tub_v2 import Tub from torchvision import transforms from typing import",
"train_test_split( self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT)) assert len(val_records) > 0, \"Not",
"= [0.22803, 0.22145, 0.216989]. \"\"\" mean = [0.485, 0.456, 0.406]",
"assert len(val_records) > 0, \"Not enough validation data. Add more",
"[0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] input_size =",
"The number of workers are set to 0 to avoid",
"PyTorch Lightning Data Module to contain all data loading logic",
"for underlying in tub: record = TubRecord(self.config, tub.base_path, underlying=underlying) self.records.append(record)",
"self.pipeline = self._create_pipeline() self.len = len(records) def _create_pipeline(self): \"\"\" This",
"Normalize to be between [0, 1] # angle and throttle",
"= config # Handle the transforms if transform: self.transform =",
"3-channel RGB videos of shape (3 x T x H",
"val_records = train_test_split( self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT)) assert len(val_records) >",
"TubRecord, TubDataset from donkeycar.pipeline.sequence import TubSequence import pytorch_lightning as pl",
"= [ transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ] if resize: transform_items.insert(0, transforms.Resize(input_size))",
"and creates a train/test split. ''' def __init__(self, config, records:",
"x W), where H and W are expected to be",
"a transform to apply to the data \"\"\" super().__init__() self.config",
"separate setup logic for trainer.fit and trainer.test. Defaults to None.",
"train_records, val_records = train_test_split( self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT)) assert len(val_records)",
"https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) def val_dataloader(self): # The number",
"and std = [0.22803, 0.22145, 0.216989]. \"\"\" mean = [0.485,",
"self.config.TRAIN_TEST_SPLIT)) assert len(val_records) > 0, \"Not enough validation data. Add",
"Video transform: All pre-trained models expect input images normalized in",
"(object): the configuration information records (List[TubRecord]): a list of tub",
"TubRecord(self.config, tub.base_path, underlying=underlying) self.records.append(record) train_records, val_records = train_test_split( self.records, test_size=(1.",
"dtype=torch.float) # Normalize to be between [0, 1] # angle",
"mean = [0.43216, 0.394666, 0.37645] std = [0.22803, 0.22145, 0.216989]",
"mini-batches of 3-channel RGB videos of shape (3 x T",
"/ 2 return predictions def x_transform(record: TubRecord): # Loads the",
"This can be overridden if more complicated pipelines are required",
"of workers are set to 0 to avoid errors on",
"else: self.transform = get_default_transform() self.sequence = TubSequence(records) self.pipeline = self._create_pipeline()",
"self.transform = get_default_transform() self.sequence = TubSequence(records) self.pipeline = self._create_pipeline() self.len",
"Handle the transforms if transform: self.transform = transform else: self.transform",
"import TubSequence import pytorch_lightning as pl def get_default_transform(for_video=False, for_inference=False, resize=True):",
"throttle], dtype=torch.float) # Normalize to be between [0, 1] #",
"between [-1, 1] predictions = (predictions + 1) / 2",
"with torchvision models Video transform: All pre-trained models expect input",
"stage=None): \"\"\"Load all the tub data and set up the",
"result of Image.open() img_arr = record.image(cached=True, as_nparray=False) return self.transform(img_arr) #",
"= record.image(cached=True, as_nparray=False) return self.transform(img_arr) # Build pipeline using the",
"in the same way, i.e. mini-batches of 3-channel RGB videos",
"(224, 224) if for_video: mean = [0.43216, 0.394666, 0.37645] std",
"if transform: self.transform = transform else: self.transform = get_default_transform() self.sequence",
"\"\"\" mean = [0.485, 0.456, 0.406] std = [0.229, 0.224,",
"torchvision import transforms from typing import List, Any from donkeycar.pipeline.types",
"information tub_paths (List[str]): a list of paths to the tubs",
"for tub_path in self.tub_paths] self.records: List[TubRecord] = [] def setup(self,",
"__init__(self, config, records: List[TubRecord], transform=None): \"\"\"Create a PyTorch Tub Dataset",
"to be between [0, 1] # angle and throttle are",
"tub: record = TubRecord(self.config, tub.base_path, underlying=underlying) self.records.append(record) train_records, val_records =",
"trainer.test. Defaults to None. \"\"\" # Loop through all the",
"be 112, and T is a number of video frames",
"''' Loads the dataset, and creates a train/test split. '''",
"data and set up the datasets. Args: stage ([string], optional):",
"Module to contain all data loading logic Args: config (object):",
"a string arg stage. It is used to separate setup",
"std = [0.22803, 0.22145, 0.216989]. \"\"\" mean = [0.485, 0.456,",
"def __init__(self, config: Any, tub_paths: List[str], transform=None): \"\"\"Create a PyTorch",
"arg stage. It is used to separate setup logic for",
"size of 1). Each tub path corresponds to another training",
"configuration information tub_paths (List[str]): a list of paths to the",
"setup logic for trainer.fit and trainer.test. Defaults to None. \"\"\"",
"Any from donkeycar.pipeline.types import TubRecord, TubDataset from donkeycar.pipeline.sequence import TubSequence",
"transform to work with torchvision models Video transform: All pre-trained",
"dataset, and creates a train/test split. ''' def __init__(self, config,",
"import Tub from torchvision import transforms from typing import List,",
"a PyTorch Tub Dataset Args: config (object): the configuration information",
"where H and W are expected to be 112, and",
"def __len__(self): return len(self.sequence) def __iter__(self): return iter(self.pipeline) class TorchTubDataModule(pl.LightningDataModule):",
"stage. It is used to separate setup logic for trainer.fit",
"split. ''' def __init__(self, config, records: List[TubRecord], transform=None): \"\"\"Create a",
"to avoid errors on Macs and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534",
"to work with torchvision models Video transform: All pre-trained models",
"be between [0, 1] # angle and throttle are originally",
"TubSequence import pytorch_lightning as pl def get_default_transform(for_video=False, for_inference=False, resize=True): \"\"\"",
"tub in self.tubs: for underlying in tub: record = TubRecord(self.config,",
"transform: self.transform = transform else: self.transform = get_default_transform() self.sequence =",
"self.records: List[TubRecord] = [] def setup(self, stage=None): \"\"\"Load all the",
"if for_video: mean = [0.43216, 0.394666, 0.37645] std = [0.22803,",
"\"\"\"Create a PyTorch Lightning Data Module to contain all data",
"__iter__(self): return iter(self.pipeline) class TorchTubDataModule(pl.LightningDataModule): def __init__(self, config: Any, tub_paths:",
"tubs to use (minimum size of 1). Each tub path",
"= (224, 224) if for_video: mean = [0.43216, 0.394666, 0.37645]",
"transforms if transform: self.transform = transform else: self.transform = get_default_transform()",
"errors on Macs and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.val_dataset,",
"pipeline def __len__(self): return len(self.sequence) def __iter__(self): return iter(self.pipeline) class",
"Tub from torchvision import transforms from typing import List, Any",
"= [0.43216, 0.394666, 0.37645] std = [0.22803, 0.22145, 0.216989] input_size",
"0.37645] and std = [0.22803, 0.22145, 0.216989]. \"\"\" mean =",
"= config self.tub_paths = tub_paths # Handle the transforms if",
"1] and then normalized using mean = [0.43216, 0.394666, 0.37645]",
"errors on Macs and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.train_dataset,",
"datasets. Args: stage ([string], optional): setup expects a string arg",
"len(records) def _create_pipeline(self): \"\"\" This can be overridden if more",
"config: Any, tub_paths: List[str], transform=None): \"\"\"Create a PyTorch Lightning Data",
"to a range of [0, 1] and then normalized using",
"optional): setup expects a string arg stage. It is used",
"run. transform (function, optional): a transform to apply to the",
"are required \"\"\" def y_transform(record: TubRecord): angle: float = record.underlying['user/angle']",
"# Build pipeline using the transformations pipeline = self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform)",
"and trainer.test. Defaults to None. \"\"\" # Loop through all",
"the different tubs and load all the records for each",
"if more complicated pipelines are required \"\"\" def y_transform(record: TubRecord):",
"up the datasets. Args: stage ([string], optional): setup expects a",
"std=std) ] if resize: transform_items.insert(0, transforms.Resize(input_size)) return transforms.Compose(transform_items) class TorchTubDataset(IterableDataset):",
"\"\"\"Create a PyTorch Tub Dataset Args: config (object): the configuration",
"self.records.append(record) train_records, val_records = train_test_split( self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT)) assert",
"transforms.Resize(input_size)) return transforms.Compose(transform_items) class TorchTubDataset(IterableDataset): ''' Loads the dataset, and",
"records transform (function, optional): a transform to apply to the",
"of 3-channel RGB videos of shape (3 x T x",
"TubRecord): # Loads the result of Image.open() img_arr = record.image(cached=True,",
"TorchTubDataModule(pl.LightningDataModule): def __init__(self, config: Any, tub_paths: List[str], transform=None): \"\"\"Create a",
"originally between [-1, 1] predictions = (predictions + 1) /",
"a list of tub records transform (function, optional): a transform",
"record.image(cached=True, as_nparray=False) return self.transform(img_arr) # Build pipeline using the transformations",
"(minimum size of 1). Each tub path corresponds to another",
"transform (function, optional): a transform to apply to the data",
"class TorchTubDataModule(pl.LightningDataModule): def __init__(self, config: Any, tub_paths: List[str], transform=None): \"\"\"Create",
"Loads the result of Image.open() img_arr = record.image(cached=True, as_nparray=False) return",
"each of them for tub in self.tubs: for underlying in",
"0.216989]. \"\"\" mean = [0.485, 0.456, 0.406] std = [0.229,",
"y_transform=y_transform) return pipeline def __len__(self): return len(self.sequence) def __iter__(self): return",
"to 0 to avoid errors on Macs and Windows #",
"in self.tubs: for underlying in tub: record = TubRecord(self.config, tub.base_path,",
"= get_default_transform() self.tubs: List[Tub] = [Tub(tub_path, read_only=True) for tub_path in",
"be overridden if more complicated pipelines are required \"\"\" def",
"load all the records for each of them for tub",
"return len(self.sequence) def __iter__(self): return iter(self.pipeline) class TorchTubDataModule(pl.LightningDataModule): def __init__(self,",
"number of workers are set to 0 to avoid errors",
"set to 0 to avoid errors on Macs and Windows",
"0.216989] input_size = (112, 112) transform_items = [ transforms.ToTensor(), transforms.Normalize(mean=mean,",
"def val_dataloader(self): # The number of workers are set to",
"\"\"\" This can be overridden if more complicated pipelines are",
"the records for each of them for tub in self.tubs:",
"transform: All pre-trained models expect input images normalized in the",
"batch_size=self.config.BATCH_SIZE, num_workers=0) def val_dataloader(self): # The number of workers are",
"TorchTubDataset( self.config, val_records, transform=self.transform) def train_dataloader(self): # The number of",
"] if resize: transform_items.insert(0, transforms.Resize(input_size)) return transforms.Compose(transform_items) class TorchTubDataset(IterableDataset): '''",
"the data \"\"\" super().__init__() self.config = config self.tub_paths = tub_paths",
"__len__(self): return len(self.sequence) def __iter__(self): return iter(self.pipeline) class TorchTubDataModule(pl.LightningDataModule): def",
"optional): a transform to apply to the data \"\"\" self.config",
"models Video transform: All pre-trained models expect input images normalized",
"# Loop through all the different tubs and load all",
"another training run. transform (function, optional): a transform to apply",
"config, records: List[TubRecord], transform=None): \"\"\"Create a PyTorch Tub Dataset Args:",
"data \"\"\" super().__init__() self.config = config self.tub_paths = tub_paths #",
"transform else: self.transform = get_default_transform() self.tubs: List[Tub] = [Tub(tub_path, read_only=True)",
"+ 1) / 2 return predictions def x_transform(record: TubRecord): #",
"0.456, 0.406] std = [0.229, 0.224, 0.225] input_size = (224,",
"self.val_dataset = TorchTubDataset( self.config, val_records, transform=self.transform) def train_dataloader(self): # The",
"[0.22803, 0.22145, 0.216989]. \"\"\" mean = [0.485, 0.456, 0.406] std",
"of tub records transform (function, optional): a transform to apply",
"def x_transform(record: TubRecord): # Loads the result of Image.open() img_arr",
"string arg stage. It is used to separate setup logic",
"corresponds to another training run. transform (function, optional): a transform",
"a PyTorch Lightning Data Module to contain all data loading",
"list of tub records transform (function, optional): a transform to",
"and set up the datasets. Args: stage ([string], optional): setup",
"paths to the tubs to use (minimum size of 1).",
"[0, 1] # angle and throttle are originally between [-1,",
"\"\"\" # Loop through all the different tubs and load",
"for_video: mean = [0.43216, 0.394666, 0.37645] std = [0.22803, 0.22145,",
"IterableDataset, DataLoader from donkeycar.utils import train_test_split from donkeycar.parts.tub_v2 import Tub",
"Macs and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)",
"information records (List[TubRecord]): a list of tub records transform (function,",
"return predictions def x_transform(record: TubRecord): # Loads the result of",
"# angle and throttle are originally between [-1, 1] predictions",
"1). Each tub path corresponds to another training run. transform",
"pre-trained models expect input images normalized in the same way,",
"transform else: self.transform = get_default_transform() self.sequence = TubSequence(records) self.pipeline =",
"= torch.tensor([angle, throttle], dtype=torch.float) # Normalize to be between [0,",
"predictions = torch.tensor([angle, throttle], dtype=torch.float) # Normalize to be between",
"them for tub in self.tubs: for underlying in tub: record",
"# Normalize to be between [0, 1] # angle and",
"range of [0, 1] and then normalized using mean =",
"TorchTubDataset( self.config, train_records, transform=self.transform) self.val_dataset = TorchTubDataset( self.config, val_records, transform=self.transform)",
"data. Add more data\" self.train_dataset = TorchTubDataset( self.config, train_records, transform=self.transform)",
"are expected to be 112, and T is a number",
"test_size=(1. - self.config.TRAIN_TEST_SPLIT)) assert len(val_records) > 0, \"Not enough validation",
"using mean = [0.43216, 0.394666, 0.37645] and std = [0.22803,",
"apply to the data \"\"\" self.config = config # Handle",
"List[Tub] = [Tub(tub_path, read_only=True) for tub_path in self.tub_paths] self.records: List[TubRecord]",
"of video frames in a clip. The images have to",
"transforms.Compose(transform_items) class TorchTubDataset(IterableDataset): ''' Loads the dataset, and creates a",
"throttle: float = record.underlying['user/throttle'] predictions = torch.tensor([angle, throttle], dtype=torch.float) #",
"transform=self.transform) def train_dataloader(self): # The number of workers are set",
"Dataset Args: config (object): the configuration information records (List[TubRecord]): a",
"= TorchTubDataset( self.config, val_records, transform=self.transform) def train_dataloader(self): # The number",
"All pre-trained models expect input images normalized in the same",
"\"\"\" Creates a default transform to work with torchvision models",
"1] predictions = (predictions + 1) / 2 return predictions",
"RGB videos of shape (3 x T x H x",
"to apply to the data \"\"\" self.config = config #",
"data\" self.train_dataset = TorchTubDataset( self.config, train_records, transform=self.transform) self.val_dataset = TorchTubDataset(",
"= record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] predictions = torch.tensor([angle, throttle],",
"donkeycar.pipeline.sequence import TubSequence import pytorch_lightning as pl def get_default_transform(for_video=False, for_inference=False,",
"self.config = config self.tub_paths = tub_paths # Handle the transforms",
"transformations pipeline = self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform) return pipeline def __len__(self): return",
"used to separate setup logic for trainer.fit and trainer.test. Defaults",
"0.22145, 0.216989]. \"\"\" mean = [0.485, 0.456, 0.406] std =",
"mean = [0.43216, 0.394666, 0.37645] and std = [0.22803, 0.22145,",
"self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform) return pipeline def __len__(self): return len(self.sequence) def __iter__(self):",
"= train_test_split( self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT)) assert len(val_records) > 0,",
"112, and T is a number of video frames in",
"train/test split. ''' def __init__(self, config, records: List[TubRecord], transform=None): \"\"\"Create",
"pipeline = self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform) return pipeline def __len__(self): return len(self.sequence)",
"the result of Image.open() img_arr = record.image(cached=True, as_nparray=False) return self.transform(img_arr)",
"tub_paths (List[str]): a list of paths to the tubs to",
"0.394666, 0.37645] std = [0.22803, 0.22145, 0.216989] input_size = (112,",
"transform_items.insert(0, transforms.Resize(input_size)) return transforms.Compose(transform_items) class TorchTubDataset(IterableDataset): ''' Loads the dataset,",
"tub path corresponds to another training run. transform (function, optional):",
"return transforms.Compose(transform_items) class TorchTubDataset(IterableDataset): ''' Loads the dataset, and creates",
"= [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] input_size",
"a list of paths to the tubs to use (minimum",
"H and W are expected to be 112, and T",
"Args: stage ([string], optional): setup expects a string arg stage.",
"training run. transform (function, optional): a transform to apply to",
"len(val_records) > 0, \"Not enough validation data. Add more data\"",
"input_size = (224, 224) if for_video: mean = [0.43216, 0.394666,",
"is used to separate setup logic for trainer.fit and trainer.test.",
"setup expects a string arg stage. It is used to",
"tub_paths: List[str], transform=None): \"\"\"Create a PyTorch Lightning Data Module to",
"from donkeycar.pipeline.types import TubRecord, TubDataset from donkeycar.pipeline.sequence import TubSequence import",
"# Handle the transforms if transform: self.transform = transform else:",
"DataLoader from donkeycar.utils import train_test_split from donkeycar.parts.tub_v2 import Tub from",
"def setup(self, stage=None): \"\"\"Load all the tub data and set",
"tubs and load all the records for each of them",
"= len(records) def _create_pipeline(self): \"\"\" This can be overridden if",
"self.len = len(records) def _create_pipeline(self): \"\"\" This can be overridden",
"TorchTubDataset(IterableDataset): ''' Loads the dataset, and creates a train/test split.",
"of 1). Each tub path corresponds to another training run.",
"normalized using mean = [0.43216, 0.394666, 0.37645] and std =",
"2 return predictions def x_transform(record: TubRecord): # Loads the result",
"avoid errors on Macs and Windows # See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534 return",
"def _create_pipeline(self): \"\"\" This can be overridden if more complicated",
"Data Module to contain all data loading logic Args: config",
"the tub data and set up the datasets. Args: stage",
"self._create_pipeline() self.len = len(records) def _create_pipeline(self): \"\"\" This can be",
"\"\"\"Load all the tub data and set up the datasets.",
"if resize: transform_items.insert(0, transforms.Resize(input_size)) return transforms.Compose(transform_items) class TorchTubDataset(IterableDataset): ''' Loads",
"return self.transform(img_arr) # Build pipeline using the transformations pipeline =",
"record.underlying['user/throttle'] predictions = torch.tensor([angle, throttle], dtype=torch.float) # Normalize to be",
"have to be loaded in to a range of [0,",
"config # Handle the transforms if transform: self.transform = transform",
"train_dataloader(self): # The number of workers are set to 0",
"0.225] input_size = (224, 224) if for_video: mean = [0.43216,",
"a train/test split. ''' def __init__(self, config, records: List[TubRecord], transform=None):",
"records: List[TubRecord], transform=None): \"\"\"Create a PyTorch Tub Dataset Args: config",
"import transforms from typing import List, Any from donkeycar.pipeline.types import",
"donkeycar.pipeline.types import TubRecord, TubDataset from donkeycar.pipeline.sequence import TubSequence import pytorch_lightning",
"for_inference=False, resize=True): \"\"\" Creates a default transform to work with",
"of paths to the tubs to use (minimum size of",
"transform=None): \"\"\"Create a PyTorch Lightning Data Module to contain all",
"\"\"\" super().__init__() self.config = config self.tub_paths = tub_paths # Handle",
"= [0.229, 0.224, 0.225] input_size = (224, 224) if for_video:",
"= [] def setup(self, stage=None): \"\"\"Load all the tub data",
"of shape (3 x T x H x W), where",
"more data\" self.train_dataset = TorchTubDataset( self.config, train_records, transform=self.transform) self.val_dataset =",
"_create_pipeline(self): \"\"\" This can be overridden if more complicated pipelines",
"0 to avoid errors on Macs and Windows # See:",
"get_default_transform(for_video=False, for_inference=False, resize=True): \"\"\" Creates a default transform to work",
"a clip. The images have to be loaded in to",
"PyTorch Tub Dataset Args: config (object): the configuration information records",
"models expect input images normalized in the same way, i.e.",
"[] def setup(self, stage=None): \"\"\"Load all the tub data and",
"underlying=underlying) self.records.append(record) train_records, val_records = train_test_split( self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT))",
"\"Not enough validation data. Add more data\" self.train_dataset = TorchTubDataset(",
"the configuration information records (List[TubRecord]): a list of tub records",
"logic for trainer.fit and trainer.test. Defaults to None. \"\"\" #",
"Defaults to None. \"\"\" # Loop through all the different",
"0.22145, 0.216989] input_size = (112, 112) transform_items = [ transforms.ToTensor(),",
"W are expected to be 112, and T is a",
"and T is a number of video frames in a",
"torch.tensor([angle, throttle], dtype=torch.float) # Normalize to be between [0, 1]",
"Image.open() img_arr = record.image(cached=True, as_nparray=False) return self.transform(img_arr) # Build pipeline",
"data loading logic Args: config (object): the configuration information tub_paths",
"= (predictions + 1) / 2 return predictions def x_transform(record:",
"None. \"\"\" # Loop through all the different tubs and",
"if transform: self.transform = transform else: self.transform = get_default_transform() self.tubs:",
"def __init__(self, config, records: List[TubRecord], transform=None): \"\"\"Create a PyTorch Tub",
"for trainer.fit and trainer.test. Defaults to None. \"\"\" # Loop",
"to the data \"\"\" self.config = config # Handle the",
"Loads the dataset, and creates a train/test split. ''' def",
"using the transformations pipeline = self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform) return pipeline def",
"as_nparray=False) return self.transform(img_arr) # Build pipeline using the transformations pipeline",
"self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT)) assert len(val_records) > 0, \"Not enough",
"path corresponds to another training run. transform (function, optional): a",
"The images have to be loaded in to a range",
"a number of video frames in a clip. The images",
"[0.43216, 0.394666, 0.37645] and std = [0.22803, 0.22145, 0.216989]. \"\"\"",
"H x W), where H and W are expected to",
"logic Args: config (object): the configuration information tub_paths (List[str]): a",
"= transform else: self.transform = get_default_transform() self.sequence = TubSequence(records) self.pipeline",
"from torchvision import transforms from typing import List, Any from",
"transform to apply to the data \"\"\" self.config = config",
"contain all data loading logic Args: config (object): the configuration",
"224) if for_video: mean = [0.43216, 0.394666, 0.37645] std =",
"x H x W), where H and W are expected",
"Args: config (object): the configuration information tub_paths (List[str]): a list",
"to the tubs to use (minimum size of 1). Each",
"input_size = (112, 112) transform_items = [ transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)",
"return iter(self.pipeline) class TorchTubDataModule(pl.LightningDataModule): def __init__(self, config: Any, tub_paths: List[str],",
"all data loading logic Args: config (object): the configuration information",
"(List[str]): a list of paths to the tubs to use",
"records for each of them for tub in self.tubs: for",
"[ transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ] if resize: transform_items.insert(0, transforms.Resize(input_size)) return",
"(function, optional): a transform to apply to the data \"\"\"",
"Tub Dataset Args: config (object): the configuration information records (List[TubRecord]):",
"# PyTorch import torch from torch.utils.data import IterableDataset, DataLoader from",
"then normalized using mean = [0.43216, 0.394666, 0.37645] and std",
"self.tub_paths = tub_paths # Handle the transforms if transform: self.transform",
"= self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform) return pipeline def __len__(self): return len(self.sequence) def",
"from donkeycar.pipeline.sequence import TubSequence import pytorch_lightning as pl def get_default_transform(for_video=False,",
"all the tub data and set up the datasets. Args:",
"all the different tubs and load all the records for",
"len(self.sequence) def __iter__(self): return iter(self.pipeline) class TorchTubDataModule(pl.LightningDataModule): def __init__(self, config:",
"return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0) def val_dataloader(self): # The number of",
"def __iter__(self): return iter(self.pipeline) class TorchTubDataModule(pl.LightningDataModule): def __init__(self, config: Any,",
"work with torchvision models Video transform: All pre-trained models expect",
"Add more data\" self.train_dataset = TorchTubDataset( self.config, train_records, transform=self.transform) self.val_dataset",
"112) transform_items = [ transforms.ToTensor(), transforms.Normalize(mean=mean, std=std) ] if resize:",
"tub_path in self.tub_paths] self.records: List[TubRecord] = [] def setup(self, stage=None):",
"It is used to separate setup logic for trainer.fit and",
"default transform to work with torchvision models Video transform: All",
"videos of shape (3 x T x H x W),",
"Any, tub_paths: List[str], transform=None): \"\"\"Create a PyTorch Lightning Data Module",
"= TubRecord(self.config, tub.base_path, underlying=underlying) self.records.append(record) train_records, val_records = train_test_split( self.records,",
"0.37645] std = [0.22803, 0.22145, 0.216989] input_size = (112, 112)",
"loaded in to a range of [0, 1] and then",
"Lightning Data Module to contain all data loading logic Args:",
"are originally between [-1, 1] predictions = (predictions + 1)",
"for each of them for tub in self.tubs: for underlying",
"pytorch_lightning as pl def get_default_transform(for_video=False, for_inference=False, resize=True): \"\"\" Creates a",
"to apply to the data \"\"\" super().__init__() self.config = config",
"val_dataloader(self): # The number of workers are set to 0",
"List[str], transform=None): \"\"\"Create a PyTorch Lightning Data Module to contain",
"import List, Any from donkeycar.pipeline.types import TubRecord, TubDataset from donkeycar.pipeline.sequence",
"self.train_dataset = TorchTubDataset( self.config, train_records, transform=self.transform) self.val_dataset = TorchTubDataset( self.config,",
"train_test_split from donkeycar.parts.tub_v2 import Tub from torchvision import transforms from",
"read_only=True) for tub_path in self.tub_paths] self.records: List[TubRecord] = [] def",
"List[TubRecord], transform=None): \"\"\"Create a PyTorch Tub Dataset Args: config (object):",
"data \"\"\" self.config = config # Handle the transforms if",
"to contain all data loading logic Args: config (object): the",
"expects a string arg stage. It is used to separate",
"num_workers=0) def val_dataloader(self): # The number of workers are set",
"input images normalized in the same way, i.e. mini-batches of",
"angle: float = record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] predictions =",
"tub data and set up the datasets. Args: stage ([string],",
"Build pipeline using the transformations pipeline = self.sequence.build_pipeline(x_transform=x_transform, y_transform=y_transform) return",
"self.config = config # Handle the transforms if transform: self.transform",
"from donkeycar.parts.tub_v2 import Tub from torchvision import transforms from typing",
"tub_paths # Handle the transforms if transform: self.transform = transform",
"transforms from typing import List, Any from donkeycar.pipeline.types import TubRecord,",
"= [Tub(tub_path, read_only=True) for tub_path in self.tub_paths] self.records: List[TubRecord] =",
"x T x H x W), where H and W",
"configuration information records (List[TubRecord]): a list of tub records transform",
"complicated pipelines are required \"\"\" def y_transform(record: TubRecord): angle: float",
"float = record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] predictions = torch.tensor([angle,",
"i.e. mini-batches of 3-channel RGB videos of shape (3 x",
"mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225]",
"from torch.utils.data import IterableDataset, DataLoader from donkeycar.utils import train_test_split from",
"[0, 1] and then normalized using mean = [0.43216, 0.394666,",
"class TorchTubDataset(IterableDataset): ''' Loads the dataset, and creates a train/test",
"and then normalized using mean = [0.43216, 0.394666, 0.37645] and",
"img_arr = record.image(cached=True, as_nparray=False) return self.transform(img_arr) # Build pipeline using",
"tub records transform (function, optional): a transform to apply to",
"workers are set to 0 to avoid errors on Macs",
"number of video frames in a clip. The images have",
"expected to be 112, and T is a number of",
"the datasets. Args: stage ([string], optional): setup expects a string",
"to separate setup logic for trainer.fit and trainer.test. Defaults to",
"self.config, train_records, transform=self.transform) self.val_dataset = TorchTubDataset( self.config, val_records, transform=self.transform) def",
"typing import List, Any from donkeycar.pipeline.types import TubRecord, TubDataset from",
"angle and throttle are originally between [-1, 1] predictions =",
"get_default_transform() self.sequence = TubSequence(records) self.pipeline = self._create_pipeline() self.len = len(records)"
] |
[
"T(name, **kw): t = Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read()) return",
"__SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet') def T(name, **kw): t = Template(open(os.path.join(__SNIPPET__,",
"import Template __SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet') def T(name, **kw): t",
"'_snippet') def T(name, **kw): t = Template(open(os.path.join(__SNIPPET__, name + '.html'),",
"+ '.html'), 'rb').read()) return t.generate(**dict([('template_file', name)] + globals().items() + kw.items()))",
"tornado.template import Template __SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet') def T(name, **kw):",
"name + '.html'), 'rb').read()) return t.generate(**dict([('template_file', name)] + globals().items() +",
"os from tornado.template import Template __SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet') def",
"def T(name, **kw): t = Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read())",
"= os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet') def T(name, **kw): t = Template(open(os.path.join(__SNIPPET__, name",
"**kw): t = Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read()) return t.generate(**dict([('template_file',",
"t = Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read()) return t.generate(**dict([('template_file', name)]",
"Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read()) return t.generate(**dict([('template_file', name)] + globals().items()",
"import os from tornado.template import Template __SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet')",
"= Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read()) return t.generate(**dict([('template_file', name)] +",
"from tornado.template import Template __SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet') def T(name,",
"os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet') def T(name, **kw): t = Template(open(os.path.join(__SNIPPET__, name +",
"Template __SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet') def T(name, **kw): t ="
] |
[
"PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT):",
"= 'localhost' DEFAULT_PORT = 7634 DEFAULT_NAME = 'HD Temperature' DEFAULT_TIMEOUT",
"a HDDTemp sensor.\"\"\" self.hddtemp = hddtemp self.disk = disk self._name",
"cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) def setup_platform(hass,",
"latest data from HDDTemp and update the states.\"\"\" def __init__(self,",
"def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Set up the HDDTemp sensor.\"\"\"",
"the device.\"\"\" return self._state @property def unit_of_measurement(self): \"\"\"Return the unit",
"config.get(CONF_NAME) host = config.get(CONF_HOST) port = config.get(CONF_PORT) disks = config.get(CONF_DISKS)",
"host. For more details about this platform, please refer to",
"HDDTemp sensor.\"\"\" self.hddtemp = hddtemp self.disk = disk self._name =",
"from telnetlib import Telnet import voluptuous as vol import homeassistant.helpers.config_validation",
"= HddTempData(host, port) hddtemp.update() if hddtemp.data is None: return False",
"self._details[1], } def update(self): \"\"\"Get the latest data from HDDTemp",
"self._details[0], ATTR_MODEL: self._details[1], } def update(self): \"\"\"Get the latest data",
"cv.string, }) def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Set up the",
"data = connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||') self.data = {data[i].split('|')[0]: data[i] for i",
"data from HDDTemp running as daemon.\"\"\" try: connection = Telnet(",
"\"\"\" Support for getting the disk temperature of a host.",
"'ascii').lstrip('|').rstrip('|').split('||') self.data = {data[i].split('|')[0]: data[i] for i in range(0, len(data),",
"default=DEFAULT_NAME): cv.string, }) def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Set up",
"= [] for disk in disks: if disk in hddtemp.data:",
"def __init__(self, host, port): \"\"\"Initialize the data object.\"\"\" self.host =",
"for disk in disks: if disk in hddtemp.data: dev.append(HddTempSensor(name, disk,",
"temperature of a host. For more details about this platform,",
"= 'HD Temperature' DEFAULT_TIMEOUT = 5 SCAN_INTERVAL = timedelta(minutes=1) PLATFORM_SCHEMA",
"= Telnet( host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT) data = connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||') self.data",
"return TEMP_FAHRENHEIT @property def device_state_attributes(self): \"\"\"Return the state attributes of",
"\"\"\"Get the latest data from HDDTemp and update the states.\"\"\"",
"None def update(self): \"\"\"Get the latest data from HDDTemp running",
"None class HddTempData(object): \"\"\"Get the latest data from HDDTemp and",
"disk, hddtemp): \"\"\"Initialize a HDDTemp sensor.\"\"\" self.hddtemp = hddtemp self.disk",
"name = config.get(CONF_NAME) host = config.get(CONF_HOST) port = config.get(CONF_PORT) disks",
"the states.\"\"\" def __init__(self, host, port): \"\"\"Initialize the data object.\"\"\"",
"homeassistant.const import ( CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS) from",
"a host. For more details about this platform, please refer",
"{ ATTR_DEVICE: self._details[0], ATTR_MODEL: self._details[1], } def update(self): \"\"\"Get the",
"HDDTemp sensor.\"\"\" name = config.get(CONF_NAME) host = config.get(CONF_HOST) port =",
"hddtemp.update() if hddtemp.data is None: return False if not disks:",
"discovery_info=None): \"\"\"Set up the HDDTemp sensor.\"\"\" name = config.get(CONF_NAME) host",
"self.data = {data[i].split('|')[0]: data[i] for i in range(0, len(data), 1)}",
"len(data), 1)} except ConnectionRefusedError: _LOGGER.error( \"HDDTemp is not available at",
"updates the state.\"\"\" self.hddtemp.update() if self.hddtemp.data and self.disk in self.hddtemp.data:",
"if not disks: disks = [next(iter(hddtemp.data)).split('|')[0]] dev = [] for",
"self.hddtemp.data and self.disk in self.hddtemp.data: self._details = self.hddtemp.data[self.disk].split('|') self._state =",
"port self.data = None def update(self): \"\"\"Get the latest data",
"documentation at https://home-assistant.io/components/sensor.hddtemp/ \"\"\" import logging from datetime import timedelta",
"( CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS) from homeassistant.helpers.entity import",
"[cv.string]), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,",
"host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT) data = connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||') self.data = {data[i].split('|')[0]:",
"value is expressed in.\"\"\" if self._details[3] == 'C': return TEMP_CELSIUS",
"about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.hddtemp/",
"class HddTempSensor(Entity): \"\"\"Representation of a HDDTemp sensor.\"\"\" def __init__(self, name,",
"self._details = self.hddtemp.data[self.disk].split('|') self._state = self._details[2] else: self._state = None",
"'C': return TEMP_CELSIUS return TEMP_FAHRENHEIT @property def device_state_attributes(self): \"\"\"Return the",
"disk in hddtemp.data: dev.append(HddTempSensor(name, disk, hddtemp)) add_devices(dev, True) class HddTempSensor(Entity):",
"import timedelta from telnetlib import Telnet import voluptuous as vol",
"the latest data from HDDTemp daemon and updates the state.\"\"\"",
"= disk self._name = '{} {}'.format(name, disk) self._state = None",
"to the documentation at https://home-assistant.io/components/sensor.hddtemp/ \"\"\" import logging from datetime",
"cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) def setup_platform(hass, config, add_devices, discovery_info=None):",
"= timedelta(minutes=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_HOST,",
"return self._name @property def state(self): \"\"\"Return the state of the",
"host = config.get(CONF_HOST) port = config.get(CONF_PORT) disks = config.get(CONF_DISKS) hddtemp",
"from homeassistant.const import ( CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS)",
"= None self._details = None @property def name(self): \"\"\"Return the",
"if self.hddtemp.data and self.disk in self.hddtemp.data: self._details = self.hddtemp.data[self.disk].split('|') self._state",
"= config.get(CONF_DISKS) hddtemp = HddTempData(host, port) hddtemp.update() if hddtemp.data is",
"return TEMP_CELSIUS return TEMP_FAHRENHEIT @property def device_state_attributes(self): \"\"\"Return the state",
"from HDDTemp daemon and updates the state.\"\"\" self.hddtemp.update() if self.hddtemp.data",
"try: connection = Telnet( host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT) data = connection.read_all().decode(",
"default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) def setup_platform(hass, config, add_devices,",
"else: self._state = None class HddTempData(object): \"\"\"Get the latest data",
"the unit the value is expressed in.\"\"\" if self._details[3] ==",
"self.disk = disk self._name = '{} {}'.format(name, disk) self._state =",
"for getting the disk temperature of a host. For more",
"HddTempSensor(Entity): \"\"\"Representation of a HDDTemp sensor.\"\"\" def __init__(self, name, disk,",
"= config.get(CONF_NAME) host = config.get(CONF_HOST) port = config.get(CONF_PORT) disks =",
"default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) def",
"1)} except ConnectionRefusedError: _LOGGER.error( \"HDDTemp is not available at %s:%s\",",
"the state attributes of the sensor.\"\"\" return { ATTR_DEVICE: self._details[0],",
"Entity _LOGGER = logging.getLogger(__name__) ATTR_DEVICE = 'device' ATTR_MODEL = 'model'",
"latest data from HDDTemp running as daemon.\"\"\" try: connection =",
"from HDDTemp running as daemon.\"\"\" try: connection = Telnet( host=self.host,",
"if disk in hddtemp.data: dev.append(HddTempSensor(name, disk, hddtemp)) add_devices(dev, True) class",
"disk) self._state = None self._details = None @property def name(self):",
"self._details[2] else: self._state = None class HddTempData(object): \"\"\"Get the latest",
"vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Set",
"ConnectionRefusedError: _LOGGER.error( \"HDDTemp is not available at %s:%s\", self.host, self.port)",
"disks = [next(iter(hddtemp.data)).split('|')[0]] dev = [] for disk in disks:",
"from datetime import timedelta from telnetlib import Telnet import voluptuous",
"@property def name(self): \"\"\"Return the name of the sensor.\"\"\" return",
"a HDDTemp sensor.\"\"\" def __init__(self, name, disk, hddtemp): \"\"\"Initialize a",
"'{} {}'.format(name, disk) self._state = None self._details = None @property",
"in range(0, len(data), 1)} except ConnectionRefusedError: _LOGGER.error( \"HDDTemp is not",
"'localhost' DEFAULT_PORT = 7634 DEFAULT_NAME = 'HD Temperature' DEFAULT_TIMEOUT =",
"platform, please refer to the documentation at https://home-assistant.io/components/sensor.hddtemp/ \"\"\" import",
"HDDTemp sensor.\"\"\" def __init__(self, name, disk, hddtemp): \"\"\"Initialize a HDDTemp",
"self._state = self._details[2] else: self._state = None class HddTempData(object): \"\"\"Get",
"states.\"\"\" def __init__(self, host, port): \"\"\"Initialize the data object.\"\"\" self.host",
"sensor.\"\"\" return self._name @property def state(self): \"\"\"Return the state of",
"self.data = None def update(self): \"\"\"Get the latest data from",
"= None @property def name(self): \"\"\"Return the name of the",
"[] for disk in disks: if disk in hddtemp.data: dev.append(HddTempSensor(name,",
"True) class HddTempSensor(Entity): \"\"\"Representation of a HDDTemp sensor.\"\"\" def __init__(self,",
"timeout=DEFAULT_TIMEOUT) data = connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||') self.data = {data[i].split('|')[0]: data[i] for",
"hddtemp)) add_devices(dev, True) class HddTempSensor(Entity): \"\"\"Representation of a HDDTemp sensor.\"\"\"",
"\"\"\"Return the unit the value is expressed in.\"\"\" if self._details[3]",
"__init__(self, name, disk, hddtemp): \"\"\"Initialize a HDDTemp sensor.\"\"\" self.hddtemp =",
"_LOGGER = logging.getLogger(__name__) ATTR_DEVICE = 'device' ATTR_MODEL = 'model' DEFAULT_HOST",
"vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from",
"def __init__(self, name, disk, hddtemp): \"\"\"Initialize a HDDTemp sensor.\"\"\" self.hddtemp",
"host, port): \"\"\"Initialize the data object.\"\"\" self.host = host self.port",
"if self._details[3] == 'C': return TEMP_CELSIUS return TEMP_FAHRENHEIT @property def",
"PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT,",
"telnetlib import Telnet import voluptuous as vol import homeassistant.helpers.config_validation as",
"config.get(CONF_HOST) port = config.get(CONF_PORT) disks = config.get(CONF_DISKS) hddtemp = HddTempData(host,",
"hddtemp): \"\"\"Initialize a HDDTemp sensor.\"\"\" self.hddtemp = hddtemp self.disk =",
"'HD Temperature' DEFAULT_TIMEOUT = 5 SCAN_INTERVAL = timedelta(minutes=1) PLATFORM_SCHEMA =",
"name of the sensor.\"\"\" return self._name @property def state(self): \"\"\"Return",
"SCAN_INTERVAL = timedelta(minutes=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]),",
"not disks: disks = [next(iter(hddtemp.data)).split('|')[0]] dev = [] for disk",
"import ( CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS) from homeassistant.helpers.entity",
"unit the value is expressed in.\"\"\" if self._details[3] == 'C':",
"connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||') self.data = {data[i].split('|')[0]: data[i] for i in range(0,",
"homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import",
"as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA",
"as daemon.\"\"\" try: connection = Telnet( host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT) data",
"the documentation at https://home-assistant.io/components/sensor.hddtemp/ \"\"\" import logging from datetime import",
"@property def state(self): \"\"\"Return the state of the device.\"\"\" return",
"__init__(self, host, port): \"\"\"Initialize the data object.\"\"\" self.host = host",
"self.disk in self.hddtemp.data: self._details = self.hddtemp.data[self.disk].split('|') self._state = self._details[2] else:",
"range(0, len(data), 1)} except ConnectionRefusedError: _LOGGER.error( \"HDDTemp is not available",
"host self.port = port self.data = None def update(self): \"\"\"Get",
"more details about this platform, please refer to the documentation",
"add_devices(dev, True) class HddTempSensor(Entity): \"\"\"Representation of a HDDTemp sensor.\"\"\" def",
"[next(iter(hddtemp.data)).split('|')[0]] dev = [] for disk in disks: if disk",
"{data[i].split('|')[0]: data[i] for i in range(0, len(data), 1)} except ConnectionRefusedError:",
"for i in range(0, len(data), 1)} except ConnectionRefusedError: _LOGGER.error( \"HDDTemp",
"\"\"\"Initialize a HDDTemp sensor.\"\"\" self.hddtemp = hddtemp self.disk = disk",
"CONF_DISKS) from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) ATTR_DEVICE =",
"import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor",
"'device' ATTR_MODEL = 'model' DEFAULT_HOST = 'localhost' DEFAULT_PORT = 7634",
"\"\"\" import logging from datetime import timedelta from telnetlib import",
"the state.\"\"\" self.hddtemp.update() if self.hddtemp.data and self.disk in self.hddtemp.data: self._details",
"of the sensor.\"\"\" return { ATTR_DEVICE: self._details[0], ATTR_MODEL: self._details[1], }",
"of a HDDTemp sensor.\"\"\" def __init__(self, name, disk, hddtemp): \"\"\"Initialize",
"the data object.\"\"\" self.host = host self.port = port self.data",
"CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS) from homeassistant.helpers.entity import Entity _LOGGER",
"None self._details = None @property def name(self): \"\"\"Return the name",
"= 'device' ATTR_MODEL = 'model' DEFAULT_HOST = 'localhost' DEFAULT_PORT =",
"data from HDDTemp daemon and updates the state.\"\"\" self.hddtemp.update() if",
"HDDTemp running as daemon.\"\"\" try: connection = Telnet( host=self.host, port=self.port,",
"from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, CONF_HOST,",
"HDDTemp daemon and updates the state.\"\"\" self.hddtemp.update() if self.hddtemp.data and",
"disks = config.get(CONF_DISKS) hddtemp = HddTempData(host, port) hddtemp.update() if hddtemp.data",
"device.\"\"\" return self._state @property def unit_of_measurement(self): \"\"\"Return the unit the",
"= None def update(self): \"\"\"Get the latest data from HDDTemp",
"attributes of the sensor.\"\"\" return { ATTR_DEVICE: self._details[0], ATTR_MODEL: self._details[1],",
"CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS) from homeassistant.helpers.entity import Entity",
"TEMP_CELSIUS return TEMP_FAHRENHEIT @property def device_state_attributes(self): \"\"\"Return the state attributes",
"config.get(CONF_PORT) disks = config.get(CONF_DISKS) hddtemp = HddTempData(host, port) hddtemp.update() if",
"\"\"\"Set up the HDDTemp sensor.\"\"\" name = config.get(CONF_NAME) host =",
"Temperature' DEFAULT_TIMEOUT = 5 SCAN_INTERVAL = timedelta(minutes=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({",
"logging from datetime import timedelta from telnetlib import Telnet import",
"in disks: if disk in hddtemp.data: dev.append(HddTempSensor(name, disk, hddtemp)) add_devices(dev,",
"state attributes of the sensor.\"\"\" return { ATTR_DEVICE: self._details[0], ATTR_MODEL:",
"in.\"\"\" if self._details[3] == 'C': return TEMP_CELSIUS return TEMP_FAHRENHEIT @property",
"the latest data from HDDTemp and update the states.\"\"\" def",
"Telnet import voluptuous as vol import homeassistant.helpers.config_validation as cv from",
"= logging.getLogger(__name__) ATTR_DEVICE = 'device' ATTR_MODEL = 'model' DEFAULT_HOST =",
"refer to the documentation at https://home-assistant.io/components/sensor.hddtemp/ \"\"\" import logging from",
"self._state = None class HddTempData(object): \"\"\"Get the latest data from",
"None: return False if not disks: disks = [next(iter(hddtemp.data)).split('|')[0]] dev",
"self.host = host self.port = port self.data = None def",
"self.hddtemp.data: self._details = self.hddtemp.data[self.disk].split('|') self._state = self._details[2] else: self._state =",
"}) def setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Set up the HDDTemp",
"7634 DEFAULT_NAME = 'HD Temperature' DEFAULT_TIMEOUT = 5 SCAN_INTERVAL =",
"= connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||') self.data = {data[i].split('|')[0]: data[i] for i in",
"data from HDDTemp and update the states.\"\"\" def __init__(self, host,",
"the state of the device.\"\"\" return self._state @property def unit_of_measurement(self):",
"as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import (",
"dev.append(HddTempSensor(name, disk, hddtemp)) add_devices(dev, True) class HddTempSensor(Entity): \"\"\"Representation of a",
"self._name = '{} {}'.format(name, disk) self._state = None self._details =",
"{}'.format(name, disk) self._state = None self._details = None @property def",
"details about this platform, please refer to the documentation at",
"ATTR_DEVICE: self._details[0], ATTR_MODEL: self._details[1], } def update(self): \"\"\"Get the latest",
"the sensor.\"\"\" return self._name @property def state(self): \"\"\"Return the state",
"config.get(CONF_DISKS) hddtemp = HddTempData(host, port) hddtemp.update() if hddtemp.data is None:",
"False if not disks: disks = [next(iter(hddtemp.data)).split('|')[0]] dev = []",
"self.hddtemp = hddtemp self.disk = disk self._name = '{} {}'.format(name,",
"\"\"\"Return the state attributes of the sensor.\"\"\" return { ATTR_DEVICE:",
"the disk temperature of a host. For more details about",
"except ConnectionRefusedError: _LOGGER.error( \"HDDTemp is not available at %s:%s\", self.host,",
"self._state @property def unit_of_measurement(self): \"\"\"Return the unit the value is",
"= config.get(CONF_HOST) port = config.get(CONF_PORT) disks = config.get(CONF_DISKS) hddtemp =",
"vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME):",
"5 SCAN_INTERVAL = timedelta(minutes=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list,",
"= '{} {}'.format(name, disk) self._state = None self._details = None",
"in hddtemp.data: dev.append(HddTempSensor(name, disk, hddtemp)) add_devices(dev, True) class HddTempSensor(Entity): \"\"\"Representation",
"state of the device.\"\"\" return self._state @property def unit_of_measurement(self): \"\"\"Return",
"device_state_attributes(self): \"\"\"Return the state attributes of the sensor.\"\"\" return {",
"please refer to the documentation at https://home-assistant.io/components/sensor.hddtemp/ \"\"\" import logging",
"timedelta(minutes=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_HOST, default=DEFAULT_HOST):",
"= [next(iter(hddtemp.data)).split('|')[0]] dev = [] for disk in disks: if",
"'model' DEFAULT_HOST = 'localhost' DEFAULT_PORT = 7634 DEFAULT_NAME = 'HD",
"disk in disks: if disk in hddtemp.data: dev.append(HddTempSensor(name, disk, hddtemp))",
"unit_of_measurement(self): \"\"\"Return the unit the value is expressed in.\"\"\" if",
"Support for getting the disk temperature of a host. For",
"and self.disk in self.hddtemp.data: self._details = self.hddtemp.data[self.disk].split('|') self._state = self._details[2]",
"import Telnet import voluptuous as vol import homeassistant.helpers.config_validation as cv",
"disks: if disk in hddtemp.data: dev.append(HddTempSensor(name, disk, hddtemp)) add_devices(dev, True)",
"port = config.get(CONF_PORT) disks = config.get(CONF_DISKS) hddtemp = HddTempData(host, port)",
"homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, CONF_HOST, CONF_PORT,",
"DEFAULT_NAME = 'HD Temperature' DEFAULT_TIMEOUT = 5 SCAN_INTERVAL = timedelta(minutes=1)",
"ATTR_MODEL: self._details[1], } def update(self): \"\"\"Get the latest data from",
"\"HDDTemp is not available at %s:%s\", self.host, self.port) self.data =",
"of the device.\"\"\" return self._state @property def unit_of_measurement(self): \"\"\"Return the",
"the HDDTemp sensor.\"\"\" name = config.get(CONF_NAME) host = config.get(CONF_HOST) port",
"update(self): \"\"\"Get the latest data from HDDTemp daemon and updates",
"https://home-assistant.io/components/sensor.hddtemp/ \"\"\" import logging from datetime import timedelta from telnetlib",
"\"\"\"Return the state of the device.\"\"\" return self._state @property def",
"sensor.\"\"\" return { ATTR_DEVICE: self._details[0], ATTR_MODEL: self._details[1], } def update(self):",
"return { ATTR_DEVICE: self._details[0], ATTR_MODEL: self._details[1], } def update(self): \"\"\"Get",
"cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME,",
"the value is expressed in.\"\"\" if self._details[3] == 'C': return",
"self._state = None self._details = None @property def name(self): \"\"\"Return",
"= None class HddTempData(object): \"\"\"Get the latest data from HDDTemp",
"default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME,",
"= 'model' DEFAULT_HOST = 'localhost' DEFAULT_PORT = 7634 DEFAULT_NAME =",
"== 'C': return TEMP_CELSIUS return TEMP_FAHRENHEIT @property def device_state_attributes(self): \"\"\"Return",
"sensor.\"\"\" self.hddtemp = hddtemp self.disk = disk self._name = '{}",
"HddTempData(host, port) hddtemp.update() if hddtemp.data is None: return False if",
"port): \"\"\"Initialize the data object.\"\"\" self.host = host self.port =",
"DEFAULT_HOST = 'localhost' DEFAULT_PORT = 7634 DEFAULT_NAME = 'HD Temperature'",
"config, add_devices, discovery_info=None): \"\"\"Set up the HDDTemp sensor.\"\"\" name =",
"daemon.\"\"\" try: connection = Telnet( host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT) data =",
"voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import",
"object.\"\"\" self.host = host self.port = port self.data = None",
"is None: return False if not disks: disks = [next(iter(hddtemp.data)).split('|')[0]]",
"def unit_of_measurement(self): \"\"\"Return the unit the value is expressed in.\"\"\"",
"= self.hddtemp.data[self.disk].split('|') self._state = self._details[2] else: self._state = None class",
"the latest data from HDDTemp running as daemon.\"\"\" try: connection",
"self._name @property def state(self): \"\"\"Return the state of the device.\"\"\"",
"hddtemp self.disk = disk self._name = '{} {}'.format(name, disk) self._state",
"Telnet( host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT) data = connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||') self.data =",
"vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, })",
"None @property def name(self): \"\"\"Return the name of the sensor.\"\"\"",
"the name of the sensor.\"\"\" return self._name @property def state(self):",
"getting the disk temperature of a host. For more details",
"running as daemon.\"\"\" try: connection = Telnet( host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)",
"= PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT,",
"dev = [] for disk in disks: if disk in",
"connection = Telnet( host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT) data = connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||')",
"disks: disks = [next(iter(hddtemp.data)).split('|')[0]] dev = [] for disk in",
"return False if not disks: disks = [next(iter(hddtemp.data)).split('|')[0]] dev =",
"sensor.\"\"\" def __init__(self, name, disk, hddtemp): \"\"\"Initialize a HDDTemp sensor.\"\"\"",
"_LOGGER.error( \"HDDTemp is not available at %s:%s\", self.host, self.port) self.data",
"\"\"\"Return the name of the sensor.\"\"\" return self._name @property def",
"of a host. For more details about this platform, please",
"HDDTemp and update the states.\"\"\" def __init__(self, host, port): \"\"\"Initialize",
"= host self.port = port self.data = None def update(self):",
"hddtemp = HddTempData(host, port) hddtemp.update() if hddtemp.data is None: return",
"hddtemp.data is None: return False if not disks: disks =",
"disk, hddtemp)) add_devices(dev, True) class HddTempSensor(Entity): \"\"\"Representation of a HDDTemp",
"class HddTempData(object): \"\"\"Get the latest data from HDDTemp and update",
"at https://home-assistant.io/components/sensor.hddtemp/ \"\"\" import logging from datetime import timedelta from",
"PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,",
"For more details about this platform, please refer to the",
"data[i] for i in range(0, len(data), 1)} except ConnectionRefusedError: _LOGGER.error(",
"vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]), vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,",
"the sensor.\"\"\" return { ATTR_DEVICE: self._details[0], ATTR_MODEL: self._details[1], } def",
"import Entity _LOGGER = logging.getLogger(__name__) ATTR_DEVICE = 'device' ATTR_MODEL =",
"DEFAULT_TIMEOUT = 5 SCAN_INTERVAL = timedelta(minutes=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS,",
"if hddtemp.data is None: return False if not disks: disks",
"is expressed in.\"\"\" if self._details[3] == 'C': return TEMP_CELSIUS return",
"disk temperature of a host. For more details about this",
"import PLATFORM_SCHEMA from homeassistant.const import ( CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS,",
"def state(self): \"\"\"Return the state of the device.\"\"\" return self._state",
"port=self.port, timeout=DEFAULT_TIMEOUT) data = connection.read_all().decode( 'ascii').lstrip('|').rstrip('|').split('||') self.data = {data[i].split('|')[0]: data[i]",
"= {data[i].split('|')[0]: data[i] for i in range(0, len(data), 1)} except",
"ATTR_DEVICE = 'device' ATTR_MODEL = 'model' DEFAULT_HOST = 'localhost' DEFAULT_PORT",
"} def update(self): \"\"\"Get the latest data from HDDTemp daemon",
"of the sensor.\"\"\" return self._name @property def state(self): \"\"\"Return the",
"port) hddtemp.update() if hddtemp.data is None: return False if not",
"timedelta from telnetlib import Telnet import voluptuous as vol import",
"CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS) from homeassistant.helpers.entity import Entity _LOGGER =",
"name(self): \"\"\"Return the name of the sensor.\"\"\" return self._name @property",
"i in range(0, len(data), 1)} except ConnectionRefusedError: _LOGGER.error( \"HDDTemp is",
"TEMP_FAHRENHEIT, CONF_DISKS) from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) ATTR_DEVICE",
"update(self): \"\"\"Get the latest data from HDDTemp running as daemon.\"\"\"",
"self.port = port self.data = None def update(self): \"\"\"Get the",
"DEFAULT_PORT = 7634 DEFAULT_NAME = 'HD Temperature' DEFAULT_TIMEOUT = 5",
"up the HDDTemp sensor.\"\"\" name = config.get(CONF_NAME) host = config.get(CONF_HOST)",
"update the states.\"\"\" def __init__(self, host, port): \"\"\"Initialize the data",
"and updates the state.\"\"\" self.hddtemp.update() if self.hddtemp.data and self.disk in",
"@property def unit_of_measurement(self): \"\"\"Return the unit the value is expressed",
"\"\"\"Get the latest data from HDDTemp running as daemon.\"\"\" try:",
"self._details[3] == 'C': return TEMP_CELSIUS return TEMP_FAHRENHEIT @property def device_state_attributes(self):",
"data object.\"\"\" self.host = host self.port = port self.data =",
"def update(self): \"\"\"Get the latest data from HDDTemp running as",
"= port self.data = None def update(self): \"\"\"Get the latest",
"hddtemp.data: dev.append(HddTempSensor(name, disk, hddtemp)) add_devices(dev, True) class HddTempSensor(Entity): \"\"\"Representation of",
"= config.get(CONF_PORT) disks = config.get(CONF_DISKS) hddtemp = HddTempData(host, port) hddtemp.update()",
"add_devices, discovery_info=None): \"\"\"Set up the HDDTemp sensor.\"\"\" name = config.get(CONF_NAME)",
"import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const",
"= hddtemp self.disk = disk self._name = '{} {}'.format(name, disk)",
"daemon and updates the state.\"\"\" self.hddtemp.update() if self.hddtemp.data and self.disk",
"latest data from HDDTemp daemon and updates the state.\"\"\" self.hddtemp.update()",
"self._details = None @property def name(self): \"\"\"Return the name of",
"and update the states.\"\"\" def __init__(self, host, port): \"\"\"Initialize the",
"datetime import timedelta from telnetlib import Telnet import voluptuous as",
"def device_state_attributes(self): \"\"\"Return the state attributes of the sensor.\"\"\" return",
"TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS) from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__)",
"import logging from datetime import timedelta from telnetlib import Telnet",
"from HDDTemp and update the states.\"\"\" def __init__(self, host, port):",
"TEMP_FAHRENHEIT @property def device_state_attributes(self): \"\"\"Return the state attributes of the",
"setup_platform(hass, config, add_devices, discovery_info=None): \"\"\"Set up the HDDTemp sensor.\"\"\" name",
"self.hddtemp.update() if self.hddtemp.data and self.disk in self.hddtemp.data: self._details = self.hddtemp.data[self.disk].split('|')",
"sensor.\"\"\" name = config.get(CONF_NAME) host = config.get(CONF_HOST) port = config.get(CONF_PORT)",
"= 5 SCAN_INTERVAL = timedelta(minutes=1) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_DISKS, default=[]):",
"def update(self): \"\"\"Get the latest data from HDDTemp daemon and",
"def name(self): \"\"\"Return the name of the sensor.\"\"\" return self._name",
"return self._state @property def unit_of_measurement(self): \"\"\"Return the unit the value",
"self.hddtemp.data[self.disk].split('|') self._state = self._details[2] else: self._state = None class HddTempData(object):",
"\"\"\"Initialize the data object.\"\"\" self.host = host self.port = port",
"logging.getLogger(__name__) ATTR_DEVICE = 'device' ATTR_MODEL = 'model' DEFAULT_HOST = 'localhost'",
"vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, }) def setup_platform(hass, config,",
"disk self._name = '{} {}'.format(name, disk) self._state = None self._details",
"state(self): \"\"\"Return the state of the device.\"\"\" return self._state @property",
"from homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) ATTR_DEVICE = 'device'",
"\"\"\"Representation of a HDDTemp sensor.\"\"\" def __init__(self, name, disk, hddtemp):",
"ATTR_MODEL = 'model' DEFAULT_HOST = 'localhost' DEFAULT_PORT = 7634 DEFAULT_NAME",
"name, disk, hddtemp): \"\"\"Initialize a HDDTemp sensor.\"\"\" self.hddtemp = hddtemp",
"state.\"\"\" self.hddtemp.update() if self.hddtemp.data and self.disk in self.hddtemp.data: self._details =",
"this platform, please refer to the documentation at https://home-assistant.io/components/sensor.hddtemp/ \"\"\"",
"\"\"\"Get the latest data from HDDTemp daemon and updates the",
"in self.hddtemp.data: self._details = self.hddtemp.data[self.disk].split('|') self._state = self._details[2] else: self._state",
"@property def device_state_attributes(self): \"\"\"Return the state attributes of the sensor.\"\"\"",
"homeassistant.helpers.entity import Entity _LOGGER = logging.getLogger(__name__) ATTR_DEVICE = 'device' ATTR_MODEL",
"expressed in.\"\"\" if self._details[3] == 'C': return TEMP_CELSIUS return TEMP_FAHRENHEIT",
"= self._details[2] else: self._state = None class HddTempData(object): \"\"\"Get the",
"HddTempData(object): \"\"\"Get the latest data from HDDTemp and update the",
"is not available at %s:%s\", self.host, self.port) self.data = None",
"= 7634 DEFAULT_NAME = 'HD Temperature' DEFAULT_TIMEOUT = 5 SCAN_INTERVAL"
] |
[
"< 0.5: x = x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif",
"sentence def add_elipses(sentence): words = sentence.split() for i in range(4,",
"len(prefix) - 1)] + sentence else: sentence += suffix[random.randint(0, len(suffix)",
"(8, 9): return sentence.title() elif seed == 10: return sentence.upper()",
"', 'I heard about that! ', 'Amen!', 'You and the",
"else: sentence += suffix[random.randint(0, len(suffix) - 1)] return sentence def",
"sentence = prefix[random.randint(0, len(prefix) - 1)] + sentence else: sentence",
"sentence else: sentence += suffix[random.randint(0, len(suffix) - 1)] return sentence",
"sentence.split() for i in range(4, len(words), 5): if random.randint(1,10) <=",
"3): return sentence elif seed in (4, 5): temp_sent =",
"ya\\'ll!' ] suffix = [ '. Amen!', '. God bless",
"God bless america', '. God bless!', ' haha', '. love",
"len(words), 5): if random.randint(1,10) <= 7: words[i] += \"...\" return",
"<= 7: words[i] += \"...\" return \" \".join(words) def boomer_caps(sentence):",
"x = x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif seed in",
"'You and the kids doing alright?', 'Miss ya\\'ll!' ] suffix",
"0.5: x = x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif seed",
"'Look at you! ', 'Bless ', 'Bless! ', 'I heard",
"\".join(words) def boomer_caps(sentence): seed = random.randint(1, 10) sent_array = sentence.split()",
"= x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif seed in (8,",
"'Amen!', 'You and the kids doing alright?', 'Miss ya\\'ll!' ]",
"'. Amen!', '. God bless america', '. God bless!', '",
"kids doing alright?', 'Miss ya\\'ll!' ] suffix = [ '.",
"if random.random() < 0.5: x = x.upper() temp_sent.append(x) return \"",
"prefix = [ 'Look at you! ', 'Bless ', 'Bless!",
"that! ', 'Amen!', 'You and the kids doing alright?', 'Miss",
"'. love ya\\'ll!', ] def add_pre_suf(sentence): if random.randint(1,10) <= 6:",
"heard about that! ', 'Amen!', 'You and the kids doing",
"\" \".join(temp_sent) elif seed in (6, 7): temp_sent = []",
"random.randint(1,10) <= 7: words[i] += \"...\" return \" \".join(words) def",
"for x in sent_array: if random.random() < 0.5: x =",
"Amen!', '. God bless america', '. God bless!', ' haha',",
"america', '. God bless!', ' haha', '. love ya!', '.",
"- 1)] + sentence else: sentence += suffix[random.randint(0, len(suffix) -",
"if random.random() < 0.25: x = x.upper() temp_sent.append(x) return \"",
"random.randint(1,10) <= 5: sentence = prefix[random.randint(0, len(prefix) - 1)] +",
"< 0.25: x = x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif",
"\"...\" return \" \".join(words) def boomer_caps(sentence): seed = random.randint(1, 10)",
"[] for x in sent_array: if random.random() < 0.25: x",
"\".join(temp_sent) elif seed in (8, 9): return sentence.title() elif seed",
"'. God bless america', '. God bless!', ' haha', '.",
"alright?', 'Miss ya\\'ll!' ] suffix = [ '. Amen!', '.",
"0.25: x = x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif seed",
"in (8, 9): return sentence.title() elif seed == 10: return",
"seed in (4, 5): temp_sent = [] for x in",
"at you! ', 'Bless ', 'Bless! ', 'I heard about",
"random.randint(1,10) <= 6: if random.randint(1,10) <= 5: sentence = prefix[random.randint(0,",
"<reponame>JohnnySn0w/BabbleBot import random prefix = [ 'Look at you! ',",
"import random prefix = [ 'Look at you! ', 'Bless",
"(6, 7): temp_sent = [] for x in sent_array: if",
"x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif seed in (8, 9):",
"return sentence elif seed in (4, 5): temp_sent = []",
"'Bless! ', 'I heard about that! ', 'Amen!', 'You and",
"x in sent_array: if random.random() < 0.5: x = x.upper()",
"in sent_array: if random.random() < 0.5: x = x.upper() temp_sent.append(x)",
"\" \".join(temp_sent) elif seed in (8, 9): return sentence.title() elif",
"seed in (1, 2, 3): return sentence elif seed in",
"<= 6: if random.randint(1,10) <= 5: sentence = prefix[random.randint(0, len(prefix)",
"'Bless ', 'Bless! ', 'I heard about that! ', 'Amen!',",
"= [] for x in sent_array: if random.random() < 0.5:",
"[ '. Amen!', '. God bless america', '. God bless!',",
"sentence elif seed in (4, 5): temp_sent = [] for",
"len(suffix) - 1)] return sentence def add_elipses(sentence): words = sentence.split()",
"2, 3): return sentence elif seed in (4, 5): temp_sent",
"def boomer_caps(sentence): seed = random.randint(1, 10) sent_array = sentence.split() if",
"temp_sent.append(x) return \" \".join(temp_sent) elif seed in (6, 7): temp_sent",
"- 1)] return sentence def add_elipses(sentence): words = sentence.split() for",
"def add_pre_suf(sentence): if random.randint(1,10) <= 6: if random.randint(1,10) <= 5:",
"return \" \".join(temp_sent) elif seed in (8, 9): return sentence.title()",
"if random.randint(1,10) <= 6: if random.randint(1,10) <= 5: sentence =",
"\" \".join(words) def boomer_caps(sentence): seed = random.randint(1, 10) sent_array =",
"return \" \".join(words) def boomer_caps(sentence): seed = random.randint(1, 10) sent_array",
"bless america', '. God bless!', ' haha', '. love ya!',",
"return sentence def add_elipses(sentence): words = sentence.split() for i in",
"(4, 5): temp_sent = [] for x in sent_array: if",
"love ya!', '. love ya\\'ll!', ] def add_pre_suf(sentence): if random.randint(1,10)",
"\".join(temp_sent) elif seed in (6, 7): temp_sent = [] for",
"and the kids doing alright?', 'Miss ya\\'ll!' ] suffix =",
"about that! ', 'Amen!', 'You and the kids doing alright?',",
"sent_array = sentence.split() if seed in (1, 2, 3): return",
"the kids doing alright?', 'Miss ya\\'ll!' ] suffix = [",
"1)] return sentence def add_elipses(sentence): words = sentence.split() for i",
"x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif seed in (6, 7):",
"seed in (6, 7): temp_sent = [] for x in",
"i in range(4, len(words), 5): if random.randint(1,10) <= 7: words[i]",
"= sentence.split() for i in range(4, len(words), 5): if random.randint(1,10)",
"God bless!', ' haha', '. love ya!', '. love ya\\'ll!',",
"'Miss ya\\'ll!' ] suffix = [ '. Amen!', '. God",
"for x in sent_array: if random.random() < 0.25: x =",
"sentence.split() if seed in (1, 2, 3): return sentence elif",
"elif seed in (6, 7): temp_sent = [] for x",
"if random.randint(1,10) <= 7: words[i] += \"...\" return \" \".join(words)",
"words[i] += \"...\" return \" \".join(words) def boomer_caps(sentence): seed =",
"5: sentence = prefix[random.randint(0, len(prefix) - 1)] + sentence else:",
"haha', '. love ya!', '. love ya\\'ll!', ] def add_pre_suf(sentence):",
"seed in (8, 9): return sentence.title() elif seed == 10:",
"range(4, len(words), 5): if random.randint(1,10) <= 7: words[i] += \"...\"",
"7: words[i] += \"...\" return \" \".join(words) def boomer_caps(sentence): seed",
"add_pre_suf(sentence): if random.randint(1,10) <= 6: if random.randint(1,10) <= 5: sentence",
"random.random() < 0.5: x = x.upper() temp_sent.append(x) return \" \".join(temp_sent)",
"for i in range(4, len(words), 5): if random.randint(1,10) <= 7:",
"in sent_array: if random.random() < 0.25: x = x.upper() temp_sent.append(x)",
"] suffix = [ '. Amen!', '. God bless america',",
"random.random() < 0.25: x = x.upper() temp_sent.append(x) return \" \".join(temp_sent)",
"= [ 'Look at you! ', 'Bless ', 'Bless! ',",
"[ 'Look at you! ', 'Bless ', 'Bless! ', 'I",
"prefix[random.randint(0, len(prefix) - 1)] + sentence else: sentence += suffix[random.randint(0,",
"sentence += suffix[random.randint(0, len(suffix) - 1)] return sentence def add_elipses(sentence):",
"boomer_caps(sentence): seed = random.randint(1, 10) sent_array = sentence.split() if seed",
"'I heard about that! ', 'Amen!', 'You and the kids",
"= [ '. Amen!', '. God bless america', '. God",
"5): if random.randint(1,10) <= 7: words[i] += \"...\" return \"",
"elif seed in (4, 5): temp_sent = [] for x",
"[] for x in sent_array: if random.random() < 0.5: x",
"5): temp_sent = [] for x in sent_array: if random.random()",
"sent_array: if random.random() < 0.5: x = x.upper() temp_sent.append(x) return",
"7): temp_sent = [] for x in sent_array: if random.random()",
"ya!', '. love ya\\'ll!', ] def add_pre_suf(sentence): if random.randint(1,10) <=",
"random prefix = [ 'Look at you! ', 'Bless ',",
"love ya\\'ll!', ] def add_pre_suf(sentence): if random.randint(1,10) <= 6: if",
"doing alright?', 'Miss ya\\'ll!' ] suffix = [ '. Amen!',",
"random.randint(1, 10) sent_array = sentence.split() if seed in (1, 2,",
"'. love ya!', '. love ya\\'ll!', ] def add_pre_suf(sentence): if",
"', 'Bless ', 'Bless! ', 'I heard about that! ',",
"+ sentence else: sentence += suffix[random.randint(0, len(suffix) - 1)] return",
"' haha', '. love ya!', '. love ya\\'ll!', ] def",
"', 'Amen!', 'You and the kids doing alright?', 'Miss ya\\'ll!'",
"<= 5: sentence = prefix[random.randint(0, len(prefix) - 1)] + sentence",
"6: if random.randint(1,10) <= 5: sentence = prefix[random.randint(0, len(prefix) -",
"in (1, 2, 3): return sentence elif seed in (4,",
"= prefix[random.randint(0, len(prefix) - 1)] + sentence else: sentence +=",
"', 'Bless! ', 'I heard about that! ', 'Amen!', 'You",
"return \" \".join(temp_sent) elif seed in (6, 7): temp_sent =",
"in range(4, len(words), 5): if random.randint(1,10) <= 7: words[i] +=",
"in (4, 5): temp_sent = [] for x in sent_array:",
"x in sent_array: if random.random() < 0.25: x = x.upper()",
"elif seed in (8, 9): return sentence.title() elif seed ==",
"(1, 2, 3): return sentence elif seed in (4, 5):",
"10) sent_array = sentence.split() if seed in (1, 2, 3):",
"= sentence.split() if seed in (1, 2, 3): return sentence",
"suffix[random.randint(0, len(suffix) - 1)] return sentence def add_elipses(sentence): words =",
"] def add_pre_suf(sentence): if random.randint(1,10) <= 6: if random.randint(1,10) <=",
"add_elipses(sentence): words = sentence.split() for i in range(4, len(words), 5):",
"= random.randint(1, 10) sent_array = sentence.split() if seed in (1,",
"temp_sent.append(x) return \" \".join(temp_sent) elif seed in (8, 9): return",
"seed = random.randint(1, 10) sent_array = sentence.split() if seed in",
"if random.randint(1,10) <= 5: sentence = prefix[random.randint(0, len(prefix) - 1)]",
"+= suffix[random.randint(0, len(suffix) - 1)] return sentence def add_elipses(sentence): words",
"= [] for x in sent_array: if random.random() < 0.25:",
"def add_elipses(sentence): words = sentence.split() for i in range(4, len(words),",
"suffix = [ '. Amen!', '. God bless america', '.",
"= x.upper() temp_sent.append(x) return \" \".join(temp_sent) elif seed in (6,",
"sent_array: if random.random() < 0.25: x = x.upper() temp_sent.append(x) return",
"words = sentence.split() for i in range(4, len(words), 5): if",
"if seed in (1, 2, 3): return sentence elif seed",
"in (6, 7): temp_sent = [] for x in sent_array:",
"bless!', ' haha', '. love ya!', '. love ya\\'ll!', ]",
"you! ', 'Bless ', 'Bless! ', 'I heard about that!",
"ya\\'ll!', ] def add_pre_suf(sentence): if random.randint(1,10) <= 6: if random.randint(1,10)",
"+= \"...\" return \" \".join(words) def boomer_caps(sentence): seed = random.randint(1,",
"1)] + sentence else: sentence += suffix[random.randint(0, len(suffix) - 1)]",
"'. God bless!', ' haha', '. love ya!', '. love",
"temp_sent = [] for x in sent_array: if random.random() <"
] |
[
"csrf_exempt from braces.views import CsrfExemptMixin from rest_framework.authentication import BasicAuthentication from",
"redirect from django.utils.decorators import method_decorator from django.views.generic import View from",
"rest_framework.views import APIView from rest_framework.permissions import AllowAny from .bots import",
"from .models import TelegramUser as User @method_decorator(csrf_exempt, name='dispatch') class TelegramBotView(APIView):",
"Response from rest_framework.views import APIView from rest_framework.permissions import AllowAny from",
"bot.sender.get('last_name', ''), 'username': bot.sender.get('username', ''), 'is_bot': bot.sender.get('is_bot', False) } )",
"''), 'is_bot': bot.sender.get('is_bot', False) } ) user.access_count += 1 user.save()",
"'first_name': bot.sender['first_name'], 'last_name': bot.sender.get('last_name', ''), 'username': bot.sender.get('username', ''), 'is_bot': bot.sender.get('is_bot',",
"import csrf_exempt from braces.views import CsrfExemptMixin from rest_framework.authentication import BasicAuthentication",
"View from django.views.decorators.csrf import csrf_exempt from braces.views import CsrfExemptMixin from",
"import redirect from django.utils.decorators import method_decorator from django.views.generic import View",
"User.objects.get_or_create( id=bot.sender['id'], defaults={ 'first_name': bot.sender['first_name'], 'last_name': bot.sender.get('last_name', ''), 'username': bot.sender.get('username',",
"import TelegramUser as User @method_decorator(csrf_exempt, name='dispatch') class TelegramBotView(APIView): permission_classes =",
"as User @method_decorator(csrf_exempt, name='dispatch') class TelegramBotView(APIView): permission_classes = (AllowAny, )",
"from django.shortcuts import redirect from django.utils.decorators import method_decorator from django.views.generic",
"django.shortcuts import redirect from django.utils.decorators import method_decorator from django.views.generic import",
"'is_bot': bot.sender.get('is_bot', False) } ) user.access_count += 1 user.save() bot.process(user)",
"method_decorator from django.views.generic import View from django.views.decorators.csrf import csrf_exempt from",
"django.views.generic import View from django.views.decorators.csrf import csrf_exempt from braces.views import",
"braces.views import CsrfExemptMixin from rest_framework.authentication import BasicAuthentication from rest_framework import",
"from django.utils.decorators import method_decorator from django.views.generic import View from django.views.decorators.csrf",
"import View from django.views.decorators.csrf import csrf_exempt from braces.views import CsrfExemptMixin",
"settings from django.shortcuts import redirect from django.utils.decorators import method_decorator from",
"*args, **kwargs): context = request.data bot = TelegramBot(context) user, _",
"from rest_framework.response import Response from rest_framework.views import APIView from rest_framework.permissions",
"defaults={ 'first_name': bot.sender['first_name'], 'last_name': bot.sender.get('last_name', ''), 'username': bot.sender.get('username', ''), 'is_bot':",
"def post(self, request, *args, **kwargs): context = request.data bot =",
"import telegram from django.conf import settings from django.shortcuts import redirect",
"user, _ = User.objects.get_or_create( id=bot.sender['id'], defaults={ 'first_name': bot.sender['first_name'], 'last_name': bot.sender.get('last_name',",
"APIView from rest_framework.permissions import AllowAny from .bots import TelegramBot from",
"import method_decorator from django.views.generic import View from django.views.decorators.csrf import csrf_exempt",
"status from rest_framework.response import Response from rest_framework.views import APIView from",
"''), 'username': bot.sender.get('username', ''), 'is_bot': bot.sender.get('is_bot', False) } ) user.access_count",
"post(self, request, *args, **kwargs): context = request.data bot = TelegramBot(context)",
"class TelegramBotView(APIView): permission_classes = (AllowAny, ) def post(self, request, *args,",
"name='dispatch') class TelegramBotView(APIView): permission_classes = (AllowAny, ) def post(self, request,",
"context = request.data bot = TelegramBot(context) user, _ = User.objects.get_or_create(",
"import AllowAny from .bots import TelegramBot from .models import TelegramUser",
"from django.views.generic import View from django.views.decorators.csrf import csrf_exempt from braces.views",
"TelegramUser as User @method_decorator(csrf_exempt, name='dispatch') class TelegramBotView(APIView): permission_classes = (AllowAny,",
"(AllowAny, ) def post(self, request, *args, **kwargs): context = request.data",
"AllowAny from .bots import TelegramBot from .models import TelegramUser as",
"'username': bot.sender.get('username', ''), 'is_bot': bot.sender.get('is_bot', False) } ) user.access_count +=",
"import APIView from rest_framework.permissions import AllowAny from .bots import TelegramBot",
"TelegramBotView(APIView): permission_classes = (AllowAny, ) def post(self, request, *args, **kwargs):",
"django.views.decorators.csrf import csrf_exempt from braces.views import CsrfExemptMixin from rest_framework.authentication import",
"TelegramBot(context) user, _ = User.objects.get_or_create( id=bot.sender['id'], defaults={ 'first_name': bot.sender['first_name'], 'last_name':",
"import TelegramBot from .models import TelegramUser as User @method_decorator(csrf_exempt, name='dispatch')",
"request.data bot = TelegramBot(context) user, _ = User.objects.get_or_create( id=bot.sender['id'], defaults={",
"User @method_decorator(csrf_exempt, name='dispatch') class TelegramBotView(APIView): permission_classes = (AllowAny, ) def",
"BasicAuthentication from rest_framework import status from rest_framework.response import Response from",
"**kwargs): context = request.data bot = TelegramBot(context) user, _ =",
"rest_framework import status from rest_framework.response import Response from rest_framework.views import",
"request, *args, **kwargs): context = request.data bot = TelegramBot(context) user,",
"= (AllowAny, ) def post(self, request, *args, **kwargs): context =",
"rest_framework.authentication import BasicAuthentication from rest_framework import status from rest_framework.response import",
"= TelegramBot(context) user, _ = User.objects.get_or_create( id=bot.sender['id'], defaults={ 'first_name': bot.sender['first_name'],",
".models import TelegramUser as User @method_decorator(csrf_exempt, name='dispatch') class TelegramBotView(APIView): permission_classes",
"from rest_framework.authentication import BasicAuthentication from rest_framework import status from rest_framework.response",
"= request.data bot = TelegramBot(context) user, _ = User.objects.get_or_create( id=bot.sender['id'],",
"bot.sender.get('is_bot', False) } ) user.access_count += 1 user.save() bot.process(user) return",
"id=bot.sender['id'], defaults={ 'first_name': bot.sender['first_name'], 'last_name': bot.sender.get('last_name', ''), 'username': bot.sender.get('username', ''),",
"_ = User.objects.get_or_create( id=bot.sender['id'], defaults={ 'first_name': bot.sender['first_name'], 'last_name': bot.sender.get('last_name', ''),",
"bot = TelegramBot(context) user, _ = User.objects.get_or_create( id=bot.sender['id'], defaults={ 'first_name':",
"from rest_framework.permissions import AllowAny from .bots import TelegramBot from .models",
"django.conf import settings from django.shortcuts import redirect from django.utils.decorators import",
"import status from rest_framework.response import Response from rest_framework.views import APIView",
"from braces.views import CsrfExemptMixin from rest_framework.authentication import BasicAuthentication from rest_framework",
".bots import TelegramBot from .models import TelegramUser as User @method_decorator(csrf_exempt,",
"False) } ) user.access_count += 1 user.save() bot.process(user) return Response(status=status.HTTP_200_OK)",
"telegram from django.conf import settings from django.shortcuts import redirect from",
"CsrfExemptMixin from rest_framework.authentication import BasicAuthentication from rest_framework import status from",
"from .bots import TelegramBot from .models import TelegramUser as User",
"import Response from rest_framework.views import APIView from rest_framework.permissions import AllowAny",
"TelegramBot from .models import TelegramUser as User @method_decorator(csrf_exempt, name='dispatch') class",
"= User.objects.get_or_create( id=bot.sender['id'], defaults={ 'first_name': bot.sender['first_name'], 'last_name': bot.sender.get('last_name', ''), 'username':",
"from django.conf import settings from django.shortcuts import redirect from django.utils.decorators",
"rest_framework.permissions import AllowAny from .bots import TelegramBot from .models import",
"import BasicAuthentication from rest_framework import status from rest_framework.response import Response",
"django.utils.decorators import method_decorator from django.views.generic import View from django.views.decorators.csrf import",
"import CsrfExemptMixin from rest_framework.authentication import BasicAuthentication from rest_framework import status",
"import settings from django.shortcuts import redirect from django.utils.decorators import method_decorator",
"from rest_framework.views import APIView from rest_framework.permissions import AllowAny from .bots",
"bot.sender['first_name'], 'last_name': bot.sender.get('last_name', ''), 'username': bot.sender.get('username', ''), 'is_bot': bot.sender.get('is_bot', False)",
"bot.sender.get('username', ''), 'is_bot': bot.sender.get('is_bot', False) } ) user.access_count += 1",
"from django.views.decorators.csrf import csrf_exempt from braces.views import CsrfExemptMixin from rest_framework.authentication",
") def post(self, request, *args, **kwargs): context = request.data bot",
"rest_framework.response import Response from rest_framework.views import APIView from rest_framework.permissions import",
"permission_classes = (AllowAny, ) def post(self, request, *args, **kwargs): context",
"'last_name': bot.sender.get('last_name', ''), 'username': bot.sender.get('username', ''), 'is_bot': bot.sender.get('is_bot', False) }",
"from rest_framework import status from rest_framework.response import Response from rest_framework.views",
"@method_decorator(csrf_exempt, name='dispatch') class TelegramBotView(APIView): permission_classes = (AllowAny, ) def post(self,"
] |
[
"losses') args = parser.parse_args() cfg = get_cfg_defaults() cfg = merge_args_and_cfg(args,",
"= ''.join([f\"[{k}: {v:.3f}]\" for k, v in losses_g.items()]) pbar.set_description(msg) #",
"parser.parse_args() cfg = get_cfg_defaults() cfg = merge_args_and_cfg(args, cfg) print(cfg) main(cfg)",
"385, 483, 559, 751, 938, 947, 999] to_save = []",
"= cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) _, mask,",
"= (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) x_gt, mask, premask, foreground, background, bg_mask",
"under experiments/model_name') parser.add_argument('--weights_path', default='', help='provide path to continue training') parser.add_argument('--sampled_fixed_noise',",
"parser.add_argument('--save_iter', type=int, default=4000, help='Save samples/weights every n iter') parser.add_argument('--log_losses', default=False,",
"from os.path import join import pathlib from tqdm import tqdm",
"model_path = join('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path = join(model_path, 'weights') sample_path",
"cfg.LR.TEXTURE) opts.set('bg', cgn.f_bg, cfg.LR.BG) # losses L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1)",
"ep_str + '.png') torchvision.utils.save_image(torch.cat(to_save, 1), path) cgn.train() def save_sample_single(cgn, u_fixed,",
"cgn(inp) x_gen = mask * foreground + (1 - mask)",
"default=False, action='store_true', help='Save single images instead of sheets') parser.add_argument('--truncation', type=float,",
"noise sample u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt') if not os.path.isfile(u_fixed_path)",
"background, x_gen, x_gt # save the image path = join(sample_path,",
"join(sample_path, f'{y}_1_premask_' + ep_str + '.png') torchvision.utils.save_image(premask, path, normalize=True) path",
"= (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) _, mask, premask, foreground, background, _",
"save_sample_sheet(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device()) ys =",
"int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path = weights_path.replace('weights', 'samples') ep_range = (start_ep, start_ep +",
"samples/weights every n iter') parser.add_argument('--log_losses', default=False, action='store_true', help='Print out losses')",
"opts.step(['shape', 'bg', 'texture']) # Saving if not i % cfg.LOG.SAVE_ITER:",
"help=\"We don't do dataloading, hence, one episode = one gradient",
"f'cls_sheet_' + ep_str + '.png') torchvision.utils.save_image(torch.cat(to_save, 1), path) cgn.train() def",
"in losses_g.items()]) pbar.set_description(msg) # Calculate Inception SCore if cfg.LOG.INCEPTION_SCORE: score,",
"torch.device('cuda' if torch.cuda.is_available() else 'cpu') cgn = cgn.to(device) losses =",
"% cfg.LOG.SAVE_ITER: ep_str = f'ep_{ep:07}' save_samples(cgn, u_fixed, sample_path, ep_str) torch.save(cgn.state_dict(),",
"x_gt) losses_g['perc'] = L_perc(x_gen, x_gt) losses_g['binary'] = L_binary(mask) losses_g['mask'] =",
"join(sample_path, f'cls_sheet_' + ep_str + '.png') torchvision.utils.save_image(torch.cat(to_save, 1), path) cgn.train()",
"{k.replace('module.', ''): v for k, v in weights.items()} cgn.load_state_dict(weights) #",
"= parser.parse_args() cfg = get_cfg_defaults() cfg = merge_args_and_cfg(args, cfg) print(cfg)",
"ys = [15, 251, 330, 382, 385, 483, 559, 751,",
"path, normalize=True) path = join(sample_path, f'{y}_2_mask_' + ep_str + '.png')",
"mask, premask, foreground, background, bg_mask = cgn(inp) x_gen = mask",
"device and train device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')",
"cfg.WEIGHTS_PATH = args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES = args.save_singles cfg.LOG.SAVE_ITER",
"grid to_plot = [premask, foreground, background, x_gen, x_gt] grid =",
"= Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu() grid = torch.cat([mask, grid], 2) #",
"cfg.TRAIN.BATCH_ACC # directories for experiments time_str = datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if cfg.WEIGHTS_PATH:",
"pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True) ep_range = (0, episodes) # fixed",
"torchvision.utils import make_grid import repackage repackage.up() from imagenet.models import CGN",
"torchvision.utils.save_image(torch.cat(to_save, 1), path) cgn.train() def save_sample_single(cgn, u_fixed, sample_path, ep_str): cgn.eval()",
"parser.add_argument('--batch_sz', type=int, default=1, help='Batch size, use in conjunciton with batch_acc')",
"'.png') torchvision.utils.save_image(x_gen, path, normalize=True) cgn.train() def fit(cfg, cgn, opts, losses):",
"== \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='tmp', help='Weights and samples",
"L_perc, L_binary, L_mask, L_text, L_bg = losses save_samples = save_sample_single",
"cgn.f_shape, cfg.LR.SHAPE) opts.set('texture', cgn.f_text, cfg.LR.TEXTURE) opts.set('bg', cgn.f_bg, cfg.LR.BG) # losses",
"args.batch_sz cfg.TRAIN.BATCH_ACC = args.batch_acc cfg.MODEL.TRUNCATION = args.truncation return cfg if",
"ep_str = f'ep_{ep:07}' save_samples(cgn, u_fixed, sample_path, ep_str) torch.save(cgn.state_dict(), join(weights_path, ep_str",
"import pathlib from tqdm import tqdm import argparse import torch",
"L_perc, L_binary, L_mask, L_text, L_bg) # push to device and",
"weights_path = join(model_path, 'weights') sample_path = join(model_path, 'samples') pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True)",
"path, normalize=True) path = join(sample_path, f'{y}_3_texture_' + ep_str + '.png')",
"join import pathlib from tqdm import tqdm import argparse import",
"f'{y}_4_bgs_' + ep_str + '.png') torchvision.utils.save_image(background, path, normalize=True) path =",
"help='Print out losses') args = parser.parse_args() cfg = get_cfg_defaults() cfg",
"tqdm import tqdm import argparse import torch from torch import",
"opts = Optimizers() opts.set('shape', cgn.f_shape, cfg.LR.SHAPE) opts.set('texture', cgn.f_text, cfg.LR.TEXTURE) opts.set('bg',",
"559, 751, 938, 947, 999] with torch.no_grad(): for y in",
"default=4000, help='Save samples/weights every n iter') parser.add_argument('--log_losses', default=False, action='store_true', help='Print",
"foreground, background, background_mask = cgn() x_gen = mask * foreground",
"* background # save_images path = join(sample_path, f'{y}_1_premask_' + ep_str",
"= cfg.TRAIN.EPISODES episodes *= cfg.TRAIN.BATCH_ACC # directories for experiments time_str",
"type=float, default=1.0, help='Truncation value for noise sampling') parser.add_argument('--episodes', type=int, default=300,",
"if cfg.LOG.LOSSES: msg = ''.join([f\"[{k}: {v:.3f}]\" for k, v in",
"# Saving if not i % cfg.LOG.SAVE_ITER: ep_str = f'ep_{ep:07}'",
"382, 385, 483, 559, 751, 938, 947, 999] to_save =",
"i, ep in enumerate(pbar): x_gt, mask, premask, foreground, background, background_mask",
"for y in ys: # generate y_vec = cgn.get_class_vec(y, sz=1)",
"unnormalized mask mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu() grid = torch.cat([mask,",
"mask, premask, foreground, background, x_gen, x_gt # save the image",
"= torch.load(cfg.WEIGHTS_PATH) weights = {k.replace('module.', ''): v for k, v",
"= (L_l1, L_perc, L_binary, L_mask, L_text, L_bg) # push to",
"330, 382, 385, 483, 559, 751, 938, 947, 999] to_save",
"251, 330, 382, 385, 483, 559, 751, 938, 947, 999]",
"opts, losses): inception_score_val = list() # total number of episodes,",
"MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses = (L_l1,",
"k, v in losses_g.items()]) pbar.set_description(msg) # Calculate Inception SCore if",
"args.model_name cfg.WEIGHTS_PATH = args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES = args.save_singles",
"cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES = args.save_singles cfg.LOG.SAVE_ITER = args.save_iter cfg.LOG.LOSSES",
"for experiments time_str = datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if cfg.WEIGHTS_PATH: weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent)",
"sample_path, ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device()) ys = [15, 251,",
"ep_str + '.pth')) # Logging if cfg.LOG.LOSSES: msg = ''.join([f\"[{k}:",
"weights.items()} cgn.load_state_dict(weights) # optimizers opts = Optimizers() opts.set('shape', cgn.f_shape, cfg.LR.SHAPE)",
"episodes) else: model_path = join('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path = join(model_path,",
"= MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses =",
"cfg if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='tmp',",
"= join('imagenet', 'experiments', 'u_fixed.pt') if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed",
"path = join(sample_path, f'{y}_4_bgs_' + ep_str + '.png') torchvision.utils.save_image(background, path,",
"sum(losses_g.values()) g_loss.backward() if (i+1) % cfg.TRAIN.BATCH_ACC == 0: opts.step(['shape', 'bg',",
"save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet pbar = tqdm(range(*ep_range))",
"'.png') torchvision.utils.save_image(torch.cat(to_save, 1), path) cgn.train() def save_sample_single(cgn, u_fixed, sample_path, ep_str):",
"pbar = tqdm(range(*ep_range)) for i, ep in enumerate(pbar): x_gt, mask,",
"the repo') parser.add_argument('--save_singles', default=False, action='store_true', help='Save single images instead of",
"path = join(sample_path, f'{y}_2_mask_' + ep_str + '.png') torchvision.utils.save_image(mask, path,",
"'.png') torchvision.utils.save_image(background, path, normalize=True) path = join(sample_path, f'{y}_5_gen_ims_' + ep_str",
"datetime import datetime from os.path import join import pathlib from",
"= CGN( batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION, pretrained=True, ) print(\"------CGN-------\") print(cgn) if cfg.WEIGHTS_PATH:",
"= torch.load(u_fixed_path) # Training Loop cgn.train() L_l1, L_perc, L_binary, L_mask,",
"= L_perc(x_gen, x_gt) losses_g['binary'] = L_binary(mask) losses_g['mask'] = L_mask(mask) losses_g['perc_text']",
"= args.log_losses cfg.LOG.INCEPTION_SCORE = True cfg.TRAIN.EPISODES = args.episodes cfg.TRAIN.BATCH_SZ =",
"losses): inception_score_val = list() # total number of episodes, accounted",
"parser.add_argument('--log_losses', default=False, action='store_true', help='Print out losses') args = parser.parse_args() cfg",
"{k: v.mean() for k, v in losses_g.items()} g_loss = sum(losses_g.values())",
"save_samples(cgn, u_fixed, sample_path, ep_str) torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth')) #",
"sample_path, ep_str) torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth')) # Logging if",
"= losses save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet pbar",
"in the repo') parser.add_argument('--save_singles', default=False, action='store_true', help='Save single images instead",
"batch_acc') parser.add_argument('--batch_acc', type=int, default=4000, help='pseudo_batch_size = batch_acc*batch size') parser.add_argument('--save_iter', type=int,",
"save the image path = join(sample_path, f'cls_sheet_' + ep_str +",
"''.join([f\"[{k}: {v:.3f}]\" for k, v in losses_g.items()]) pbar.set_description(msg) # Calculate",
"559, 751, 938, 947, 999] to_save = [] with torch.no_grad():",
"join(sample_path, f'{y}_2_mask_' + ep_str + '.png') torchvision.utils.save_image(mask, path, normalize=True) path",
"torch from torch import nn, optim from torch.autograd import Variable",
"premask, foreground, background, _ = cgn(inp) x_gen = mask *",
"from datetime import datetime from os.path import join import pathlib",
"join(sample_path, f'{y}_4_bgs_' + ep_str + '.png') torchvision.utils.save_image(background, path, normalize=True) path",
"= make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot), padding=2, normalize=True) # add unnormalized mask mask",
"def save_sample_sheet(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device()) ys",
"parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='tmp', help='Weights and samples will be",
"= weights_path.replace('weights', 'samples') ep_range = (start_ep, start_ep + episodes) else:",
"% cfg.TRAIN.BATCH_ACC == 0: opts.step(['shape', 'bg', 'texture']) # Saving if",
"= join(sample_path, f'{y}_3_texture_' + ep_str + '.png') torchvision.utils.save_image(foreground, path, normalize=True)",
"losses_g['l1'] = L_l1(x_gen, x_gt) losses_g['perc'] = L_perc(x_gen, x_gt) losses_g['binary'] =",
"if not i % cfg.LOG.SAVE_ITER: ep_str = f'ep_{ep:07}' save_samples(cgn, u_fixed,",
"inception_score_val.append(score) def main(cfg): # model init cgn = CGN( batch_sz=cfg.TRAIN.BATCH_SZ,",
"torchvision.utils.save_image(premask, path, normalize=True) path = join(sample_path, f'{y}_2_mask_' + ep_str +",
"def fit(cfg, cgn, opts, losses): inception_score_val = list() # total",
"sample_path = join(model_path, 'samples') pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True) ep_range =",
"continue training') parser.add_argument('--sampled_fixed_noise', default=False, action='store_true', help='If you want a different",
"if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='tmp', help='Weights",
"import Pad from torchvision.utils import make_grid import repackage repackage.up() from",
"= torch.device('cuda' if torch.cuda.is_available() else 'cpu') cgn = cgn.to(device) losses",
"the image path = join(sample_path, f'cls_sheet_' + ep_str + '.png')",
"from torch.autograd import Variable import torchvision from torchvision.transforms import Pad",
"+ ep_str + '.png') torchvision.utils.save_image(foreground, path, normalize=True) path = join(sample_path,",
"mask * foreground + (1 - mask) * background #",
"938, 947, 999] to_save = [] with torch.no_grad(): for y",
"g_loss = sum(losses_g.values()) g_loss.backward() if (i+1) % cfg.TRAIN.BATCH_ACC == 0:",
"if cfg.LOG.INCEPTION_SCORE: score, score_std = inception_score(x_gen) inception_score_val.append(score) def main(cfg): #",
"opts.set('bg', cgn.f_bg, cfg.LR.BG) # losses L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1) L_perc",
"cfg.LOG.LOSSES = args.log_losses cfg.LOG.INCEPTION_SCORE = True cfg.TRAIN.EPISODES = args.episodes cfg.TRAIN.BATCH_SZ",
"not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed = cgn.get_noise_vec() torch.save(u_fixed, u_fixed_path) else:",
"cfg.TRAIN.EPISODES = args.episodes cfg.TRAIN.BATCH_SZ = args.batch_sz cfg.TRAIN.BATCH_ACC = args.batch_acc cfg.MODEL.TRUNCATION",
"torchvision from torchvision.transforms import Pad from torchvision.utils import make_grid import",
"weights_path.replace('weights', 'samples') ep_range = (start_ep, start_ep + episodes) else: model_path",
"= mask * foreground + (1 - mask) * background",
"n iter') parser.add_argument('--log_losses', default=False, action='store_true', help='Print out losses') args =",
"exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True) ep_range = (0, episodes) # fixed noise",
"import Variable import torchvision from torchvision.transforms import Pad from torchvision.utils",
"path) cgn.train() def save_sample_single(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev =",
"'.png') torchvision.utils.save_image(foreground, path, normalize=True) path = join(sample_path, f'{y}_4_bgs_' + ep_str",
"PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT)",
"update.\") parser.add_argument('--batch_sz', type=int, default=1, help='Batch size, use in conjunciton with",
"x_gt, mask, premask, foreground, background, bg_mask = cgn(inp) x_gen =",
"join(sample_path, f'{y}_3_texture_' + ep_str + '.png') torchvision.utils.save_image(foreground, path, normalize=True) path",
"save_sample_sheet pbar = tqdm(range(*ep_range)) for i, ep in enumerate(pbar): x_gt,",
"list() # total number of episodes, accounted for batch accumulation",
"padding=2, normalize=True) # add unnormalized mask mask = Pad(2)(mask[0].repeat(3, 1,",
"'.png') torchvision.utils.save_image(premask, path, normalize=True) path = join(sample_path, f'{y}_2_mask_' + ep_str",
"import join import pathlib from tqdm import tqdm import argparse",
"cfg.LOG.INCEPTION_SCORE: score, score_std = inception_score(x_gen) inception_score_val.append(score) def main(cfg): # model",
"args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES = args.save_singles cfg.LOG.SAVE_ITER = args.save_iter cfg.LOG.LOSSES = args.log_losses",
"g_loss.backward() if (i+1) % cfg.TRAIN.BATCH_ACC == 0: opts.step(['shape', 'bg', 'texture'])",
"disk to_save.append(grid) del to_plot, mask, premask, foreground, background, x_gen, x_gt",
"cfg.MODEL_NAME = args.model_name cfg.WEIGHTS_PATH = args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES",
"cfg.LOG.INCEPTION_SCORE = True cfg.TRAIN.EPISODES = args.episodes cfg.TRAIN.BATCH_SZ = args.batch_sz cfg.TRAIN.BATCH_ACC",
"one gradient update.\") parser.add_argument('--batch_sz', type=int, default=1, help='Batch size, use in",
"cgn.train() def fit(cfg, cgn, opts, losses): inception_score_val = list() #",
"will be saved under experiments/model_name') parser.add_argument('--weights_path', default='', help='provide path to",
"''): v for k, v in weights.items()} cgn.load_state_dict(weights) # optimizers",
"(u_fixed.to(dev), y_vec.to(dev), cgn.truncation) x_gt, mask, premask, foreground, background, bg_mask =",
"help='Save samples/weights every n iter') parser.add_argument('--log_losses', default=False, action='store_true', help='Print out",
"# directories for experiments time_str = datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if cfg.WEIGHTS_PATH: weights_path",
"inception_score(x_gen) inception_score_val.append(score) def main(cfg): # model init cgn = CGN(",
"483, 559, 751, 938, 947, 999] with torch.no_grad(): for y",
"of episodes, accounted for batch accumulation episodes = cfg.TRAIN.EPISODES episodes",
"Loop cgn.train() L_l1, L_perc, L_binary, L_mask, L_text, L_bg = losses",
"join(model_path, 'samples') pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True) ep_range = (0, episodes)",
"cgn.load_state_dict(weights) # optimizers opts = Optimizers() opts.set('shape', cgn.f_shape, cfg.LR.SHAPE) opts.set('texture',",
"single images instead of sheets') parser.add_argument('--truncation', type=float, default=1.0, help='Truncation value",
"foreground, background, x_gen, x_gt] grid = make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot), padding=2, normalize=True)",
"grid = make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot), padding=2, normalize=True) # add unnormalized mask",
"score_std = inception_score(x_gen) inception_score_val.append(score) def main(cfg): # model init cgn",
"inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) _, mask, premask, foreground, background,",
"= args.episodes cfg.TRAIN.BATCH_SZ = args.batch_sz cfg.TRAIN.BATCH_ACC = args.batch_acc cfg.MODEL.TRUNCATION =",
"# save the image path = join(sample_path, f'cls_sheet_' + ep_str",
"losses = (l.to(device) for l in losses) fit(cfg, cgn, opts,",
"cfg.LOG.LOSSES: msg = ''.join([f\"[{k}: {v:.3f}]\" for k, v in losses_g.items()])",
"'samples') ep_range = (start_ep, start_ep + episodes) else: model_path =",
"batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION, pretrained=True, ) print(\"------CGN-------\") print(cgn) if cfg.WEIGHTS_PATH: weights =",
"x_gen = mask * foreground + (1 - mask) *",
"L_bg = losses save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet",
"import * from utils import Optimizers from inception_score import *",
"action='store_true', help='Print out losses') args = parser.parse_args() cfg = get_cfg_defaults()",
"join(model_path, 'weights') sample_path = join(model_path, 'samples') pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True)",
"in losses) fit(cfg, cgn, opts, losses) def merge_args_and_cfg(args, cfg): cfg.MODEL_NAME",
"= torch.cat([mask, grid], 2) # save to disk to_save.append(grid) del",
"import argparse import torch from torch import nn, optim from",
"cfg.TRAIN.BATCH_ACC == 0: opts.step(['shape', 'bg', 'texture']) # Saving if not",
"datetime from os.path import join import pathlib from tqdm import",
"= cgn.get_noise_vec() torch.save(u_fixed, u_fixed_path) else: u_fixed = torch.load(u_fixed_path) # Training",
"mask mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu() grid = torch.cat([mask, grid],",
"of sheets') parser.add_argument('--truncation', type=float, default=1.0, help='Truncation value for noise sampling')",
"torchvision.utils.save_image(x_gen, path, normalize=True) cgn.train() def fit(cfg, cgn, opts, losses): inception_score_val",
"= f'ep_{ep:07}' save_samples(cgn, u_fixed, sample_path, ep_str) torch.save(cgn.state_dict(), join(weights_path, ep_str +",
"Calculate Inception SCore if cfg.LOG.INCEPTION_SCORE: score, score_std = inception_score(x_gen) inception_score_val.append(score)",
"premask, foreground, background, background_mask = cgn() x_gen = mask *",
"{} losses_g['l1'] = L_l1(x_gen, x_gt) losses_g['perc'] = L_perc(x_gen, x_gt) losses_g['binary']",
"cfg.WEIGHTS_PATH: weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path = weights_path.replace('weights',",
"background, x_gen, x_gt] grid = make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot), padding=2, normalize=True) #",
"parser.add_argument('--truncation', type=float, default=1.0, help='Truncation value for noise sampling') parser.add_argument('--episodes', type=int,",
"cgn.get_noise_vec() torch.save(u_fixed, u_fixed_path) else: u_fixed = torch.load(u_fixed_path) # Training Loop",
"L_binary, L_mask, L_text, L_bg = losses save_samples = save_sample_single if",
"losses_g['bg'] = L_bg(background_mask) # backprop losses_g = {k: v.mean() for",
"import datetime from os.path import join import pathlib from tqdm",
"= args.model_name cfg.WEIGHTS_PATH = args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES =",
"sampling') parser.add_argument('--episodes', type=int, default=300, help=\"We don't do dataloading, hence, one",
"1)).detach().cpu() grid = torch.cat([mask, grid], 2) # save to disk",
"CGN( batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION, pretrained=True, ) print(\"------CGN-------\") print(cgn) if cfg.WEIGHTS_PATH: weights",
"default=False, action='store_true', help='If you want a different noise vector than",
"accounted for batch accumulation episodes = cfg.TRAIN.EPISODES episodes *= cfg.TRAIN.BATCH_ACC",
"out losses') args = parser.parse_args() cfg = get_cfg_defaults() cfg =",
"= cgn(inp) x_gen = mask * foreground + (1 -",
"for k, v in losses_g.items()]) pbar.set_description(msg) # Calculate Inception SCore",
"= inception_score(x_gen) inception_score_val.append(score) def main(cfg): # model init cgn =",
"cgn.train() def save_sample_single(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device())",
"def merge_args_and_cfg(args, cfg): cfg.MODEL_NAME = args.model_name cfg.WEIGHTS_PATH = args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE",
"# push to device and train device = torch.device('cuda' if",
"type=int, default=4000, help='Save samples/weights every n iter') parser.add_argument('--log_losses', default=False, action='store_true',",
"join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png') torchvision.utils.save_image(x_gen, path, normalize=True) cgn.train()",
"0: opts.step(['shape', 'bg', 'texture']) # Saving if not i %",
"background, _ = cgn(inp) x_gen = mask * foreground +",
"batch accumulation episodes = cfg.TRAIN.EPISODES episodes *= cfg.TRAIN.BATCH_ACC # directories",
"(0, episodes) # fixed noise sample u_fixed_path = join('imagenet', 'experiments',",
"+ '.png') torchvision.utils.save_image(premask, path, normalize=True) path = join(sample_path, f'{y}_2_mask_' +",
"torch.save(u_fixed, u_fixed_path) else: u_fixed = torch.load(u_fixed_path) # Training Loop cgn.train()",
"cfg): cfg.MODEL_NAME = args.model_name cfg.WEIGHTS_PATH = args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise",
"cgn.truncation) x_gt, mask, premask, foreground, background, bg_mask = cgn(inp) x_gen",
"import CGN from imagenet.config import get_cfg_defaults from shared.losses import *",
"L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text",
"sample_path = weights_path.replace('weights', 'samples') ep_range = (start_ep, start_ep + episodes)",
"# build class grid to_plot = [premask, foreground, background, x_gen,",
"'cpu') cgn = cgn.to(device) losses = (l.to(device) for l in",
"= args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES = args.save_singles cfg.LOG.SAVE_ITER = args.save_iter cfg.LOG.LOSSES =",
"= args.truncation return cfg if __name__ == \"__main__\": parser =",
"torch.cuda.is_available() else 'cpu') cgn = cgn.to(device) losses = (l.to(device) for",
"torch.no_grad(): for y in ys: # generate y_vec = cgn.get_class_vec(y,",
"push to device and train device = torch.device('cuda' if torch.cuda.is_available()",
"experiments/model_name') parser.add_argument('--weights_path', default='', help='provide path to continue training') parser.add_argument('--sampled_fixed_noise', default=False,",
"help='Batch size, use in conjunciton with batch_acc') parser.add_argument('--batch_acc', type=int, default=4000,",
"merge_args_and_cfg(args, cfg): cfg.MODEL_NAME = args.model_name cfg.WEIGHTS_PATH = args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE =",
"= join(sample_path, f'{y}_1_premask_' + ep_str + '.png') torchvision.utils.save_image(premask, path, normalize=True)",
"losses_g = {} losses_g['l1'] = L_l1(x_gen, x_gt) losses_g['perc'] = L_perc(x_gen,",
"= str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path = weights_path.replace('weights', 'samples') ep_range",
"make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot), padding=2, normalize=True) # add unnormalized mask mask =",
"k, v in weights.items()} cgn.load_state_dict(weights) # optimizers opts = Optimizers()",
"= args.save_singles cfg.LOG.SAVE_ITER = args.save_iter cfg.LOG.LOSSES = args.log_losses cfg.LOG.INCEPTION_SCORE =",
"from imagenet.models import CGN from imagenet.config import get_cfg_defaults from shared.losses",
"= {} losses_g['l1'] = L_l1(x_gen, x_gt) losses_g['perc'] = L_perc(x_gen, x_gt)",
"* from utils import Optimizers from inception_score import * def",
"L_l1(x_gen, x_gt) losses_g['perc'] = L_perc(x_gen, x_gt) losses_g['binary'] = L_binary(mask) losses_g['mask']",
"= one gradient update.\") parser.add_argument('--batch_sz', type=int, default=1, help='Batch size, use",
"= cgn() x_gen = mask * foreground + (1 -",
"return cfg if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('--model_name',",
"gradient update.\") parser.add_argument('--batch_sz', type=int, default=1, help='Batch size, use in conjunciton",
"save_images path = join(sample_path, f'{y}_1_premask_' + ep_str + '.png') torchvision.utils.save_image(premask,",
"inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) x_gt, mask, premask, foreground, background,",
"= [premask, foreground, background, x_gen, x_gt] grid = make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot),",
"+ ep_str + '.png') torchvision.utils.save_image(premask, path, normalize=True) path = join(sample_path,",
"from shared.losses import * from utils import Optimizers from inception_score",
"background # Losses losses_g = {} losses_g['l1'] = L_l1(x_gen, x_gt)",
"v.mean() for k, v in losses_g.items()} g_loss = sum(losses_g.values()) g_loss.backward()",
"train device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') cgn =",
"pathlib from tqdm import tqdm import argparse import torch from",
"CGN from imagenet.config import get_cfg_defaults from shared.losses import * from",
"help='pseudo_batch_size = batch_acc*batch size') parser.add_argument('--save_iter', type=int, default=4000, help='Save samples/weights every",
"and samples will be saved under experiments/model_name') parser.add_argument('--weights_path', default='', help='provide",
"mask) * background # build class grid to_plot = [premask,",
"cgn.eval() dev = u_fixed.to(cgn.get_device()) ys = [15, 251, 330, 382,",
"= L_text(x_gt, mask, foreground) losses_g['bg'] = L_bg(background_mask) # backprop losses_g",
"to_save.append(grid) del to_plot, mask, premask, foreground, background, x_gen, x_gt #",
"947, 999] with torch.no_grad(): for y in ys: # generate",
"= L_bg(background_mask) # backprop losses_g = {k: v.mean() for k,",
"torch.load(cfg.WEIGHTS_PATH) weights = {k.replace('module.', ''): v for k, v in",
"483, 559, 751, 938, 947, 999] to_save = [] with",
"time_str = datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if cfg.WEIGHTS_PATH: weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep =",
"385, 483, 559, 751, 938, 947, 999] with torch.no_grad(): for",
"not i % cfg.LOG.SAVE_ITER: ep_str = f'ep_{ep:07}' save_samples(cgn, u_fixed, sample_path,",
"PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses = (L_l1, L_perc, L_binary, L_mask,",
"cfg.TRAIN.BATCH_SZ = args.batch_sz cfg.TRAIN.BATCH_ACC = args.batch_acc cfg.MODEL.TRUNCATION = args.truncation return",
"in weights.items()} cgn.load_state_dict(weights) # optimizers opts = Optimizers() opts.set('shape', cgn.f_shape,",
"foreground + (1 - mask) * background # Losses losses_g",
"L_perc(x_gen, x_gt) losses_g['binary'] = L_binary(mask) losses_g['mask'] = L_mask(mask) losses_g['perc_text'] =",
"y in ys: # generate y_vec = cgn.get_class_vec(y, sz=1) inp",
"'u_fixed.pt') if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed = cgn.get_noise_vec() torch.save(u_fixed,",
"= cgn.to(device) losses = (l.to(device) for l in losses) fit(cfg,",
"help='Weights and samples will be saved under experiments/model_name') parser.add_argument('--weights_path', default='',",
"bg_mask = cgn(inp) x_gen = mask * foreground + (1",
"999] with torch.no_grad(): for y in ys: # generate y_vec",
"premask, foreground, background, bg_mask = cgn(inp) x_gen = mask *",
"default=300, help=\"We don't do dataloading, hence, one episode = one",
"y_vec = cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) _,",
"from imagenet.config import get_cfg_defaults from shared.losses import * from utils",
"999] to_save = [] with torch.no_grad(): for y in ys:",
"cfg.LOG.SAVE_ITER: ep_str = f'ep_{ep:07}' save_samples(cgn, u_fixed, sample_path, ep_str) torch.save(cgn.state_dict(), join(weights_path,",
"args = parser.parse_args() cfg = get_cfg_defaults() cfg = merge_args_and_cfg(args, cfg)",
"normalize=True) path = join(sample_path, f'{y}_2_mask_' + ep_str + '.png') torchvision.utils.save_image(mask,",
"= datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if cfg.WEIGHTS_PATH: weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:])",
"Logging if cfg.LOG.LOSSES: msg = ''.join([f\"[{k}: {v:.3f}]\" for k, v",
"default=4000, help='pseudo_batch_size = batch_acc*batch size') parser.add_argument('--save_iter', type=int, default=4000, help='Save samples/weights",
"type=int, default=4000, help='pseudo_batch_size = batch_acc*batch size') parser.add_argument('--save_iter', type=int, default=4000, help='Save",
"(i+1) % cfg.TRAIN.BATCH_ACC == 0: opts.step(['shape', 'bg', 'texture']) # Saving",
"f'{y}_1_premask_' + ep_str + '.png') torchvision.utils.save_image(premask, path, normalize=True) path =",
"save_sample_single(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device()) ys =",
"- mask) * background # Losses losses_g = {} losses_g['l1']",
"L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses = (L_l1, L_perc,",
"cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed = cgn.get_noise_vec() torch.save(u_fixed, u_fixed_path) else: u_fixed = torch.load(u_fixed_path)",
"init cgn = CGN( batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION, pretrained=True, ) print(\"------CGN-------\") print(cgn)",
"import make_grid import repackage repackage.up() from imagenet.models import CGN from",
"from torch import nn, optim from torch.autograd import Variable import",
"torch.cat([mask, grid], 2) # save to disk to_save.append(grid) del to_plot,",
"to disk to_save.append(grid) del to_plot, mask, premask, foreground, background, x_gen,",
"cgn, opts, losses): inception_score_val = list() # total number of",
"total number of episodes, accounted for batch accumulation episodes =",
"every n iter') parser.add_argument('--log_losses', default=False, action='store_true', help='Print out losses') args",
"* foreground + (1 - mask) * background # build",
"and train device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') cgn",
"= PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text =",
"del to_plot, mask, premask, foreground, background, x_gen, x_gt # save",
"if cfg.LOG.SAVE_SINGLES else save_sample_sheet pbar = tqdm(range(*ep_range)) for i, ep",
"_ = cgn(inp) x_gen = mask * foreground + (1",
"with batch_acc') parser.add_argument('--batch_acc', type=int, default=4000, help='pseudo_batch_size = batch_acc*batch size') parser.add_argument('--save_iter',",
"path = join(sample_path, f'{y}_1_premask_' + ep_str + '.png') torchvision.utils.save_image(premask, path,",
"foreground, background, bg_mask = cgn(inp) x_gen = mask * foreground",
") print(\"------CGN-------\") print(cgn) if cfg.WEIGHTS_PATH: weights = torch.load(cfg.WEIGHTS_PATH) weights =",
"cgn.to(device) losses = (l.to(device) for l in losses) fit(cfg, cgn,",
"cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) _, mask, premask,",
"in ys: # generate y_vec = cgn.get_class_vec(y, sz=1) inp =",
"path, normalize=True) path = join(sample_path, f'{y}_4_bgs_' + ep_str + '.png')",
"weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path = weights_path.replace('weights', 'samples')",
"y_vec.to(dev), cgn.truncation) x_gt, mask, premask, foreground, background, bg_mask = cgn(inp)",
"model init cgn = CGN( batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION, pretrained=True, ) print(\"------CGN-------\")",
"want a different noise vector than provided in the repo')",
"cfg.TRAIN.BATCH_ACC = args.batch_acc cfg.MODEL.TRUNCATION = args.truncation return cfg if __name__",
"episodes, accounted for batch accumulation episodes = cfg.TRAIN.EPISODES episodes *=",
"if cfg.WEIGHTS_PATH: weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path =",
"= join(model_path, 'weights') sample_path = join(model_path, 'samples') pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True,",
"l in losses) fit(cfg, cgn, opts, losses) def merge_args_and_cfg(args, cfg):",
"def main(cfg): # model init cgn = CGN( batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION,",
"- mask) * background # build class grid to_plot =",
"args.save_iter cfg.LOG.LOSSES = args.log_losses cfg.LOG.INCEPTION_SCORE = True cfg.TRAIN.EPISODES = args.episodes",
"torchvision.utils.save_image(background, path, normalize=True) path = join(sample_path, f'{y}_5_gen_ims_' + ep_str +",
"# losses L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1) L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary",
"images instead of sheets') parser.add_argument('--truncation', type=float, default=1.0, help='Truncation value for",
"from utils import Optimizers from inception_score import * def save_sample_sheet(cgn,",
"= {k.replace('module.', ''): v for k, v in weights.items()} cgn.load_state_dict(weights)",
"i % cfg.LOG.SAVE_ITER: ep_str = f'ep_{ep:07}' save_samples(cgn, u_fixed, sample_path, ep_str)",
"= join(sample_path, f'{y}_4_bgs_' + ep_str + '.png') torchvision.utils.save_image(background, path, normalize=True)",
"y_vec = cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) x_gt,",
"# model init cgn = CGN( batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION, pretrained=True, )",
"cgn.truncation) _, mask, premask, foreground, background, _ = cgn(inp) x_gen",
"'samples') pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True) ep_range = (0, episodes) #",
"True cfg.TRAIN.EPISODES = args.episodes cfg.TRAIN.BATCH_SZ = args.batch_sz cfg.TRAIN.BATCH_ACC = args.batch_acc",
"loss_weight=cfg.LAMBDA.L1) L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK)",
"L_text, L_bg = losses save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else",
"to_save = [] with torch.no_grad(): for y in ys: #",
"torch import nn, optim from torch.autograd import Variable import torchvision",
"= L_mask(mask) losses_g['perc_text'] = L_text(x_gt, mask, foreground) losses_g['bg'] = L_bg(background_mask)",
"x_gt # save the image path = join(sample_path, f'cls_sheet_' +",
"L_mask, L_text, L_bg) # push to device and train device",
"size') parser.add_argument('--save_iter', type=int, default=4000, help='Save samples/weights every n iter') parser.add_argument('--log_losses',",
"f'{y}_5_gen_ims_' + ep_str + '.png') torchvision.utils.save_image(x_gen, path, normalize=True) cgn.train() def",
"fit(cfg, cgn, opts, losses) def merge_args_and_cfg(args, cfg): cfg.MODEL_NAME = args.model_name",
"path, normalize=True) cgn.train() def fit(cfg, cgn, opts, losses): inception_score_val =",
"cfg.MODEL.TRUNCATION = args.truncation return cfg if __name__ == \"__main__\": parser",
"L_binary(mask) losses_g['mask'] = L_mask(mask) losses_g['perc_text'] = L_text(x_gt, mask, foreground) losses_g['bg']",
"if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed = cgn.get_noise_vec() torch.save(u_fixed, u_fixed_path)",
"= int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path = weights_path.replace('weights', 'samples') ep_range = (start_ep, start_ep",
"path = join(sample_path, f'{y}_3_texture_' + ep_str + '.png') torchvision.utils.save_image(foreground, path,",
"losses) fit(cfg, cgn, opts, losses) def merge_args_and_cfg(args, cfg): cfg.MODEL_NAME =",
"cgn.f_bg, cfg.LR.BG) # losses L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1) L_perc =",
"import tqdm import argparse import torch from torch import nn,",
"ep_str) torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth')) # Logging if cfg.LOG.LOSSES:",
"330, 382, 385, 483, 559, 751, 938, 947, 999] with",
"cgn = CGN( batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION, pretrained=True, ) print(\"------CGN-------\") print(cgn) if",
"foreground, background, _ = cgn(inp) x_gen = mask * foreground",
"directories for experiments time_str = datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if cfg.WEIGHTS_PATH: weights_path =",
"action='store_true', help='If you want a different noise vector than provided",
"cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) x_gt, mask, premask,",
"sheets') parser.add_argument('--truncation', type=float, default=1.0, help='Truncation value for noise sampling') parser.add_argument('--episodes',",
"2) # save to disk to_save.append(grid) del to_plot, mask, premask,",
"(1 - mask) * background # Losses losses_g = {}",
"provided in the repo') parser.add_argument('--save_singles', default=False, action='store_true', help='Save single images",
"pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True) ep_range = (0, episodes) # fixed noise sample",
"default='', help='provide path to continue training') parser.add_argument('--sampled_fixed_noise', default=False, action='store_true', help='If",
"fixed noise sample u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt') if not",
"= (0, episodes) # fixed noise sample u_fixed_path = join('imagenet',",
"normalize=True) path = join(sample_path, f'{y}_4_bgs_' + ep_str + '.png') torchvision.utils.save_image(background,",
"SCore if cfg.LOG.INCEPTION_SCORE: score, score_std = inception_score(x_gen) inception_score_val.append(score) def main(cfg):",
"import torchvision from torchvision.transforms import Pad from torchvision.utils import make_grid",
"imagenet.models import CGN from imagenet.config import get_cfg_defaults from shared.losses import",
"= argparse.ArgumentParser() parser.add_argument('--model_name', default='tmp', help='Weights and samples will be saved",
"do dataloading, hence, one episode = one gradient update.\") parser.add_argument('--batch_sz',",
"samples will be saved under experiments/model_name') parser.add_argument('--weights_path', default='', help='provide path",
"losses save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet pbar =",
"def save_sample_single(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device()) ys",
"help='If you want a different noise vector than provided in",
"in conjunciton with batch_acc') parser.add_argument('--batch_acc', type=int, default=4000, help='pseudo_batch_size = batch_acc*batch",
"help='Save single images instead of sheets') parser.add_argument('--truncation', type=float, default=1.0, help='Truncation",
"+ (1 - mask) * background # Losses losses_g =",
"= args.save_iter cfg.LOG.LOSSES = args.log_losses cfg.LOG.INCEPTION_SCORE = True cfg.TRAIN.EPISODES =",
"than provided in the repo') parser.add_argument('--save_singles', default=False, action='store_true', help='Save single",
"start_ep + episodes) else: model_path = join('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path",
"= batch_acc*batch size') parser.add_argument('--save_iter', type=int, default=4000, help='Save samples/weights every n",
"+ '.png') torchvision.utils.save_image(mask, path, normalize=True) path = join(sample_path, f'{y}_3_texture_' +",
"foreground) losses_g['bg'] = L_bg(background_mask) # backprop losses_g = {k: v.mean()",
"default='tmp', help='Weights and samples will be saved under experiments/model_name') parser.add_argument('--weights_path',",
"f'{y}_3_texture_' + ep_str + '.png') torchvision.utils.save_image(foreground, path, normalize=True) path =",
"Losses losses_g = {} losses_g['l1'] = L_l1(x_gen, x_gt) losses_g['perc'] =",
"L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses",
"size, use in conjunciton with batch_acc') parser.add_argument('--batch_acc', type=int, default=4000, help='pseudo_batch_size",
"__name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='tmp', help='Weights and",
"{v:.3f}]\" for k, v in losses_g.items()]) pbar.set_description(msg) # Calculate Inception",
"mask) * background # save_images path = join(sample_path, f'{y}_1_premask_' +",
"episodes *= cfg.TRAIN.BATCH_ACC # directories for experiments time_str = datetime.now().strftime(\"%Y_%m_%d_%H_%M\")",
"from torchvision.transforms import Pad from torchvision.utils import make_grid import repackage",
"382, 385, 483, 559, 751, 938, 947, 999] with torch.no_grad():",
"backprop losses_g = {k: v.mean() for k, v in losses_g.items()}",
"help='provide path to continue training') parser.add_argument('--sampled_fixed_noise', default=False, action='store_true', help='If you",
"for noise sampling') parser.add_argument('--episodes', type=int, default=300, help=\"We don't do dataloading,",
"= list() # total number of episodes, accounted for batch",
"background # build class grid to_plot = [premask, foreground, background,",
"+ ep_str + '.png') torchvision.utils.save_image(mask, path, normalize=True) path = join(sample_path,",
"# Calculate Inception SCore if cfg.LOG.INCEPTION_SCORE: score, score_std = inception_score(x_gen)",
"normalize=True) cgn.train() def fit(cfg, cgn, opts, losses): inception_score_val = list()",
"weights = {k.replace('module.', ''): v for k, v in weights.items()}",
"v in weights.items()} cgn.load_state_dict(weights) # optimizers opts = Optimizers() opts.set('shape',",
"u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt') if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE:",
"premask, foreground, background, x_gen, x_gt # save the image path",
"you want a different noise vector than provided in the",
"conjunciton with batch_acc') parser.add_argument('--batch_acc', type=int, default=4000, help='pseudo_batch_size = batch_acc*batch size')",
"nn, optim from torch.autograd import Variable import torchvision from torchvision.transforms",
"join('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path = join(model_path, 'weights') sample_path = join(model_path,",
"opts.set('shape', cgn.f_shape, cfg.LR.SHAPE) opts.set('texture', cgn.f_text, cfg.LR.TEXTURE) opts.set('bg', cgn.f_bg, cfg.LR.BG) #",
"optimizers opts = Optimizers() opts.set('shape', cgn.f_shape, cfg.LR.SHAPE) opts.set('texture', cgn.f_text, cfg.LR.TEXTURE)",
"(1 - mask) * background # build class grid to_plot",
"foreground, background, x_gen, x_gt # save the image path =",
"L_l1, L_perc, L_binary, L_mask, L_text, L_bg = losses save_samples =",
"L_mask, L_text, L_bg = losses save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES",
"Optimizers() opts.set('shape', cgn.f_shape, cfg.LR.SHAPE) opts.set('texture', cgn.f_text, cfg.LR.TEXTURE) opts.set('bg', cgn.f_bg, cfg.LR.BG)",
"cfg.LR.SHAPE) opts.set('texture', cgn.f_text, cfg.LR.TEXTURE) opts.set('bg', cgn.f_bg, cfg.LR.BG) # losses L_l1",
"path, normalize=True) path = join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png')",
"else 'cpu') cgn = cgn.to(device) losses = (l.to(device) for l",
"* foreground + (1 - mask) * background # save_images",
"foreground + (1 - mask) * background # save_images path",
"grid = torch.cat([mask, grid], 2) # save to disk to_save.append(grid)",
"* foreground + (1 - mask) * background # Losses",
"u_fixed = cgn.get_noise_vec() torch.save(u_fixed, u_fixed_path) else: u_fixed = torch.load(u_fixed_path) #",
"= args.batch_sz cfg.TRAIN.BATCH_ACC = args.batch_acc cfg.MODEL.TRUNCATION = args.truncation return cfg",
"else: u_fixed = torch.load(u_fixed_path) # Training Loop cgn.train() L_l1, L_perc,",
"sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) _, mask, premask, foreground,",
"torchvision.utils.save_image(mask, path, normalize=True) path = join(sample_path, f'{y}_3_texture_' + ep_str +",
"if cfg.WEIGHTS_PATH: weights = torch.load(cfg.WEIGHTS_PATH) weights = {k.replace('module.', ''): v",
"f'{y}_2_mask_' + ep_str + '.png') torchvision.utils.save_image(mask, path, normalize=True) path =",
"ep_str + '.png') torchvision.utils.save_image(x_gen, path, normalize=True) cgn.train() def fit(cfg, cgn,",
"mask, premask, foreground, background, background_mask = cgn() x_gen = mask",
"msg = ''.join([f\"[{k}: {v:.3f}]\" for k, v in losses_g.items()]) pbar.set_description(msg)",
"losses = (L_l1, L_perc, L_binary, L_mask, L_text, L_bg) # push",
"argparse import torch from torch import nn, optim from torch.autograd",
"L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses = (L_l1, L_perc, L_binary, L_mask, L_text,",
"noise vector than provided in the repo') parser.add_argument('--save_singles', default=False, action='store_true',",
"repackage repackage.up() from imagenet.models import CGN from imagenet.config import get_cfg_defaults",
"import repackage repackage.up() from imagenet.models import CGN from imagenet.config import",
"# optimizers opts = Optimizers() opts.set('shape', cgn.f_shape, cfg.LR.SHAPE) opts.set('texture', cgn.f_text,",
"cgn = cgn.to(device) losses = (l.to(device) for l in losses)",
"for k, v in weights.items()} cgn.load_state_dict(weights) # optimizers opts =",
"u_fixed.to(cgn.get_device()) ys = [15, 251, 330, 382, 385, 483, 559,",
"print(cgn) if cfg.WEIGHTS_PATH: weights = torch.load(cfg.WEIGHTS_PATH) weights = {k.replace('module.', ''):",
"cfg.LOG.SAVE_ITER = args.save_iter cfg.LOG.LOSSES = args.log_losses cfg.LOG.INCEPTION_SCORE = True cfg.TRAIN.EPISODES",
"datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if cfg.WEIGHTS_PATH: weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path",
"cfg.LR.BG) # losses L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1) L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC)",
"* def save_sample_sheet(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device())",
"parser.add_argument('--batch_acc', type=int, default=4000, help='pseudo_batch_size = batch_acc*batch size') parser.add_argument('--save_iter', type=int, default=4000,",
"# backprop losses_g = {k: v.mean() for k, v in",
"mask, foreground) losses_g['bg'] = L_bg(background_mask) # backprop losses_g = {k:",
"u_fixed, sample_path, ep_str) torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth')) # Logging",
"build class grid to_plot = [premask, foreground, background, x_gen, x_gt]",
"sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) x_gt, mask, premask, foreground,",
"to_plot = [premask, foreground, background, x_gen, x_gt] grid = make_grid(torch.cat(to_plot).detach().cpu(),",
"cfg.TRAIN.EPISODES episodes *= cfg.TRAIN.BATCH_ACC # directories for experiments time_str =",
"BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses = (L_l1, L_perc, L_binary, L_mask, L_text, L_bg) #",
"parser.add_argument('--weights_path', default='', help='provide path to continue training') parser.add_argument('--sampled_fixed_noise', default=False, action='store_true',",
"import get_cfg_defaults from shared.losses import * from utils import Optimizers",
"add unnormalized mask mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu() grid =",
"if (i+1) % cfg.TRAIN.BATCH_ACC == 0: opts.step(['shape', 'bg', 'texture']) #",
"use in conjunciton with batch_acc') parser.add_argument('--batch_acc', type=int, default=4000, help='pseudo_batch_size =",
"L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg",
"cgn.f_text, cfg.LR.TEXTURE) opts.set('bg', cgn.f_bg, cfg.LR.BG) # losses L_l1 = ReconstructionLoss(mode='l1',",
"cfg.LOG.SAVE_SINGLES else save_sample_sheet pbar = tqdm(range(*ep_range)) for i, ep in",
"[premask, foreground, background, x_gen, x_gt] grid = make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot), padding=2,",
"mask, premask, foreground, background, _ = cgn(inp) x_gen = mask",
"# total number of episodes, accounted for batch accumulation episodes",
"L_text(x_gt, mask, foreground) losses_g['bg'] = L_bg(background_mask) # backprop losses_g =",
"losses_g.items()} g_loss = sum(losses_g.values()) g_loss.backward() if (i+1) % cfg.TRAIN.BATCH_ACC ==",
"grid], 2) # save to disk to_save.append(grid) del to_plot, mask,",
"+ ep_str + '.png') torchvision.utils.save_image(torch.cat(to_save, 1), path) cgn.train() def save_sample_single(cgn,",
"args.log_losses cfg.LOG.INCEPTION_SCORE = True cfg.TRAIN.EPISODES = args.episodes cfg.TRAIN.BATCH_SZ = args.batch_sz",
"= BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg =",
"= True cfg.TRAIN.EPISODES = args.episodes cfg.TRAIN.BATCH_SZ = args.batch_sz cfg.TRAIN.BATCH_ACC =",
"normalize=True) # add unnormalized mask mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu()",
"mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu() grid = torch.cat([mask, grid], 2)",
"join(weights_path, ep_str + '.pth')) # Logging if cfg.LOG.LOSSES: msg =",
"normalize=True) path = join(sample_path, f'{y}_3_texture_' + ep_str + '.png') torchvision.utils.save_image(foreground,",
"# Losses losses_g = {} losses_g['l1'] = L_l1(x_gen, x_gt) losses_g['perc']",
"background # save_images path = join(sample_path, f'{y}_1_premask_' + ep_str +",
"cfg.LOG.SAVE_SINGLES = args.save_singles cfg.LOG.SAVE_ITER = args.save_iter cfg.LOG.LOSSES = args.log_losses cfg.LOG.INCEPTION_SCORE",
"print(\"------CGN-------\") print(cgn) if cfg.WEIGHTS_PATH: weights = torch.load(cfg.WEIGHTS_PATH) weights = {k.replace('module.',",
"join('imagenet', 'experiments', 'u_fixed.pt') if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed =",
"\"__main__\": parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='tmp', help='Weights and samples will",
"pbar.set_description(msg) # Calculate Inception SCore if cfg.LOG.INCEPTION_SCORE: score, score_std =",
"'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path = join(model_path, 'weights') sample_path = join(model_path, 'samples')",
"torchvision.transforms import Pad from torchvision.utils import make_grid import repackage repackage.up()",
"L_bg(background_mask) # backprop losses_g = {k: v.mean() for k, v",
"to device and train device = torch.device('cuda' if torch.cuda.is_available() else",
"dataloading, hence, one episode = one gradient update.\") parser.add_argument('--batch_sz', type=int,",
"+ '.png') torchvision.utils.save_image(torch.cat(to_save, 1), path) cgn.train() def save_sample_single(cgn, u_fixed, sample_path,",
"= join(sample_path, f'cls_sheet_' + ep_str + '.png') torchvision.utils.save_image(torch.cat(to_save, 1), path)",
"[] with torch.no_grad(): for y in ys: # generate y_vec",
"+ '.png') torchvision.utils.save_image(background, path, normalize=True) path = join(sample_path, f'{y}_5_gen_ims_' +",
"+ (1 - mask) * background # save_images path =",
"y_vec.to(dev), cgn.truncation) _, mask, premask, foreground, background, _ = cgn(inp)",
"parser.add_argument('--sampled_fixed_noise', default=False, action='store_true', help='If you want a different noise vector",
"751, 938, 947, 999] to_save = [] with torch.no_grad(): for",
"import os from datetime import datetime from os.path import join",
"if torch.cuda.is_available() else 'cpu') cgn = cgn.to(device) losses = (l.to(device)",
"losses) def merge_args_and_cfg(args, cfg): cfg.MODEL_NAME = args.model_name cfg.WEIGHTS_PATH = args.weights_path",
"shared.losses import * from utils import Optimizers from inception_score import",
"torchvision.utils.save_image(foreground, path, normalize=True) path = join(sample_path, f'{y}_4_bgs_' + ep_str +",
"f'ep_{ep:07}' save_samples(cgn, u_fixed, sample_path, ep_str) torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth'))",
"= Optimizers() opts.set('shape', cgn.f_shape, cfg.LR.SHAPE) opts.set('texture', cgn.f_text, cfg.LR.TEXTURE) opts.set('bg', cgn.f_bg,",
"ep_str + '.png') torchvision.utils.save_image(foreground, path, normalize=True) path = join(sample_path, f'{y}_4_bgs_'",
"= L_l1(x_gen, x_gt) losses_g['perc'] = L_perc(x_gen, x_gt) losses_g['binary'] = L_binary(mask)",
"L_mask(mask) losses_g['perc_text'] = L_text(x_gt, mask, foreground) losses_g['bg'] = L_bg(background_mask) #",
"ys: # generate y_vec = cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev),",
"x_gen, x_gt] grid = make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot), padding=2, normalize=True) # add",
"+ (1 - mask) * background # build class grid",
"L_binary, L_mask, L_text, L_bg) # push to device and train",
"vector than provided in the repo') parser.add_argument('--save_singles', default=False, action='store_true', help='Save",
"background, background_mask = cgn() x_gen = mask * foreground +",
"Optimizers from inception_score import * def save_sample_sheet(cgn, u_fixed, sample_path, ep_str):",
"1, 1)).detach().cpu() grid = torch.cat([mask, grid], 2) # save to",
"saved under experiments/model_name') parser.add_argument('--weights_path', default='', help='provide path to continue training')",
"x_gt) losses_g['binary'] = L_binary(mask) losses_g['mask'] = L_mask(mask) losses_g['perc_text'] = L_text(x_gt,",
"type=int, default=1, help='Batch size, use in conjunciton with batch_acc') parser.add_argument('--batch_acc',",
"losses_g['mask'] = L_mask(mask) losses_g['perc_text'] = L_text(x_gt, mask, foreground) losses_g['bg'] =",
"for l in losses) fit(cfg, cgn, opts, losses) def merge_args_and_cfg(args,",
"'texture']) # Saving if not i % cfg.LOG.SAVE_ITER: ep_str =",
"episodes = cfg.TRAIN.EPISODES episodes *= cfg.TRAIN.BATCH_ACC # directories for experiments",
"save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet pbar = tqdm(range(*ep_range)) for i,",
"from torchvision.utils import make_grid import repackage repackage.up() from imagenet.models import",
"path = join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png') torchvision.utils.save_image(x_gen, path,",
"+ ep_str + '.png') torchvision.utils.save_image(background, path, normalize=True) path = join(sample_path,",
"batch_acc*batch size') parser.add_argument('--save_iter', type=int, default=4000, help='Save samples/weights every n iter')",
"losses_g = {k: v.mean() for k, v in losses_g.items()} g_loss",
"Pad from torchvision.utils import make_grid import repackage repackage.up() from imagenet.models",
"ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1) L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask =",
"enumerate(pbar): x_gt, mask, premask, foreground, background, background_mask = cgn() x_gen",
"+ episodes) else: model_path = join('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path =",
"+ ep_str + '.png') torchvision.utils.save_image(x_gen, path, normalize=True) cgn.train() def fit(cfg,",
"cgn.train() L_l1, L_perc, L_binary, L_mask, L_text, L_bg = losses save_samples",
"947, 999] to_save = [] with torch.no_grad(): for y in",
"= (l.to(device) for l in losses) fit(cfg, cgn, opts, losses)",
"1), path) cgn.train() def save_sample_single(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev",
"= [15, 251, 330, 382, 385, 483, 559, 751, 938,",
"(1 - mask) * background # save_images path = join(sample_path,",
"help='Truncation value for noise sampling') parser.add_argument('--episodes', type=int, default=300, help=\"We don't",
"exist_ok=True) ep_range = (0, episodes) # fixed noise sample u_fixed_path",
"'.png') torchvision.utils.save_image(mask, path, normalize=True) path = join(sample_path, f'{y}_3_texture_' + ep_str",
"for batch accumulation episodes = cfg.TRAIN.EPISODES episodes *= cfg.TRAIN.BATCH_ACC #",
"# Logging if cfg.LOG.LOSSES: msg = ''.join([f\"[{k}: {v:.3f}]\" for k,",
"= u_fixed.to(cgn.get_device()) ys = [15, 251, 330, 382, 385, 483,",
"imagenet.config import get_cfg_defaults from shared.losses import * from utils import",
"# fixed noise sample u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt') if",
"else: model_path = join('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path = join(model_path, 'weights')",
"torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth')) # Logging if cfg.LOG.LOSSES: msg",
"episodes) # fixed noise sample u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt')",
"L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1) L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY)",
"= sum(losses_g.values()) g_loss.backward() if (i+1) % cfg.TRAIN.BATCH_ACC == 0: opts.step(['shape',",
"mask) * background # Losses losses_g = {} losses_g['l1'] =",
"v in losses_g.items()]) pbar.set_description(msg) # Calculate Inception SCore if cfg.LOG.INCEPTION_SCORE:",
"with torch.no_grad(): for y in ys: # generate y_vec =",
"losses_g['perc_text'] = L_text(x_gt, mask, foreground) losses_g['bg'] = L_bg(background_mask) # backprop",
"x_gt, mask, premask, foreground, background, background_mask = cgn() x_gen =",
"different noise vector than provided in the repo') parser.add_argument('--save_singles', default=False,",
"don't do dataloading, hence, one episode = one gradient update.\")",
"normalize=True) path = join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png') torchvision.utils.save_image(x_gen,",
"image path = join(sample_path, f'cls_sheet_' + ep_str + '.png') torchvision.utils.save_image(torch.cat(to_save,",
"for k, v in losses_g.items()} g_loss = sum(losses_g.values()) g_loss.backward() if",
"parser.add_argument('--episodes', type=int, default=300, help=\"We don't do dataloading, hence, one episode",
"Variable import torchvision from torchvision.transforms import Pad from torchvision.utils import",
"751, 938, 947, 999] with torch.no_grad(): for y in ys:",
"k, v in losses_g.items()} g_loss = sum(losses_g.values()) g_loss.backward() if (i+1)",
"score, score_std = inception_score(x_gen) inception_score_val.append(score) def main(cfg): # model init",
"= ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1) L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask",
"number of episodes, accounted for batch accumulation episodes = cfg.TRAIN.EPISODES",
"- mask) * background # save_images path = join(sample_path, f'{y}_1_premask_'",
"for i, ep in enumerate(pbar): x_gt, mask, premask, foreground, background,",
"in losses_g.items()} g_loss = sum(losses_g.values()) g_loss.backward() if (i+1) % cfg.TRAIN.BATCH_ACC",
"938, 947, 999] with torch.no_grad(): for y in ys: #",
"f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path = join(model_path, 'weights') sample_path = join(model_path, 'samples') pathlib.Path(weights_path).mkdir(parents=True,",
"losses_g['binary'] = L_binary(mask) losses_g['mask'] = L_mask(mask) losses_g['perc_text'] = L_text(x_gt, mask,",
"(l.to(device) for l in losses) fit(cfg, cgn, opts, losses) def",
"fit(cfg, cgn, opts, losses): inception_score_val = list() # total number",
"ep_range = (start_ep, start_ep + episodes) else: model_path = join('imagenet',",
"= join(sample_path, f'{y}_2_mask_' + ep_str + '.png') torchvision.utils.save_image(mask, path, normalize=True)",
"weights = torch.load(cfg.WEIGHTS_PATH) weights = {k.replace('module.', ''): v for k,",
"args.save_singles cfg.LOG.SAVE_ITER = args.save_iter cfg.LOG.LOSSES = args.log_losses cfg.LOG.INCEPTION_SCORE = True",
"optim from torch.autograd import Variable import torchvision from torchvision.transforms import",
"losses_g['perc'] = L_perc(x_gen, x_gt) losses_g['binary'] = L_binary(mask) losses_g['mask'] = L_mask(mask)",
"= (start_ep, start_ep + episodes) else: model_path = join('imagenet', 'experiments',",
"argparse.ArgumentParser() parser.add_argument('--model_name', default='tmp', help='Weights and samples will be saved under",
"_, mask, premask, foreground, background, _ = cgn(inp) x_gen =",
"import torch from torch import nn, optim from torch.autograd import",
"accumulation episodes = cfg.TRAIN.EPISODES episodes *= cfg.TRAIN.BATCH_ACC # directories for",
"losses L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1) L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC) L_binary =",
"import * def save_sample_sheet(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev =",
"# add unnormalized mask mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu() grid",
"to continue training') parser.add_argument('--sampled_fixed_noise', default=False, action='store_true', help='If you want a",
"hence, one episode = one gradient update.\") parser.add_argument('--batch_sz', type=int, default=1,",
"action='store_true', help='Save single images instead of sheets') parser.add_argument('--truncation', type=float, default=1.0,",
"ep in enumerate(pbar): x_gt, mask, premask, foreground, background, background_mask =",
"* background # Losses losses_g = {} losses_g['l1'] = L_l1(x_gen,",
"ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device()) ys = [15, 251, 330,",
"= PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses = (L_l1, L_perc, L_binary,",
"get_cfg_defaults from shared.losses import * from utils import Optimizers from",
"# generate y_vec = cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev), y_vec.to(dev),",
"to_plot, mask, premask, foreground, background, x_gen, x_gt # save the",
"Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu() grid = torch.cat([mask, grid], 2) # save",
"cgn, opts, losses) def merge_args_and_cfg(args, cfg): cfg.MODEL_NAME = args.model_name cfg.WEIGHTS_PATH",
"a different noise vector than provided in the repo') parser.add_argument('--save_singles',",
"'experiments', 'u_fixed.pt') if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed = cgn.get_noise_vec()",
"ep_str + '.png') torchvision.utils.save_image(background, path, normalize=True) path = join(sample_path, f'{y}_5_gen_ims_'",
"os from datetime import datetime from os.path import join import",
"'bg', 'texture']) # Saving if not i % cfg.LOG.SAVE_ITER: ep_str",
"opts.set('texture', cgn.f_text, cfg.LR.TEXTURE) opts.set('bg', cgn.f_bg, cfg.LR.BG) # losses L_l1 =",
"from inception_score import * def save_sample_sheet(cgn, u_fixed, sample_path, ep_str): cgn.eval()",
"tqdm(range(*ep_range)) for i, ep in enumerate(pbar): x_gt, mask, premask, foreground,",
"main(cfg): # model init cgn = CGN( batch_sz=cfg.TRAIN.BATCH_SZ, truncation=cfg.MODEL.TRUNCATION, pretrained=True,",
"torch.load(u_fixed_path) # Training Loop cgn.train() L_l1, L_perc, L_binary, L_mask, L_text,",
"+ '.pth')) # Logging if cfg.LOG.LOSSES: msg = ''.join([f\"[{k}: {v:.3f}]\"",
"iter') parser.add_argument('--log_losses', default=False, action='store_true', help='Print out losses') args = parser.parse_args()",
"start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path = weights_path.replace('weights', 'samples') ep_range = (start_ep,",
"path = join(sample_path, f'cls_sheet_' + ep_str + '.png') torchvision.utils.save_image(torch.cat(to_save, 1),",
"path to continue training') parser.add_argument('--sampled_fixed_noise', default=False, action='store_true', help='If you want",
"value for noise sampling') parser.add_argument('--episodes', type=int, default=300, help=\"We don't do",
"default=False, action='store_true', help='Print out losses') args = parser.parse_args() cfg =",
"import nn, optim from torch.autograd import Variable import torchvision from",
"inception_score_val = list() # total number of episodes, accounted for",
"os.path import join import pathlib from tqdm import tqdm import",
"args.batch_acc cfg.MODEL.TRUNCATION = args.truncation return cfg if __name__ == \"__main__\":",
"= join(model_path, 'samples') pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True) ep_range = (0,",
"inception_score import * def save_sample_sheet(cgn, u_fixed, sample_path, ep_str): cgn.eval() dev",
"pretrained=True, ) print(\"------CGN-------\") print(cgn) if cfg.WEIGHTS_PATH: weights = torch.load(cfg.WEIGHTS_PATH) weights",
"x_gt] grid = make_grid(torch.cat(to_plot).detach().cpu(), nrow=len(to_plot), padding=2, normalize=True) # add unnormalized",
"else save_sample_sheet pbar = tqdm(range(*ep_range)) for i, ep in enumerate(pbar):",
"cfg.WEIGHTS_PATH: weights = torch.load(cfg.WEIGHTS_PATH) weights = {k.replace('module.', ''): v for",
"from tqdm import tqdm import argparse import torch from torch",
"noise sampling') parser.add_argument('--episodes', type=int, default=300, help=\"We don't do dataloading, hence,",
"Training Loop cgn.train() L_l1, L_perc, L_binary, L_mask, L_text, L_bg =",
"(L_l1, L_perc, L_binary, L_mask, L_text, L_bg) # push to device",
"one episode = one gradient update.\") parser.add_argument('--batch_sz', type=int, default=1, help='Batch",
"= join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png') torchvision.utils.save_image(x_gen, path, normalize=True)",
"cgn() x_gen = mask * foreground + (1 - mask)",
"v in losses_g.items()} g_loss = sum(losses_g.values()) g_loss.backward() if (i+1) %",
"make_grid import repackage repackage.up() from imagenet.models import CGN from imagenet.config",
"BinaryLoss(loss_weight=cfg.LAMBDA.BINARY) L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK) L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT) L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG)",
"args.episodes cfg.TRAIN.BATCH_SZ = args.batch_sz cfg.TRAIN.BATCH_ACC = args.batch_acc cfg.MODEL.TRUNCATION = args.truncation",
"= args.batch_acc cfg.MODEL.TRUNCATION = args.truncation return cfg if __name__ ==",
"ep_str + '.png') torchvision.utils.save_image(premask, path, normalize=True) path = join(sample_path, f'{y}_2_mask_'",
"losses_g.items()]) pbar.set_description(msg) # Calculate Inception SCore if cfg.LOG.INCEPTION_SCORE: score, score_std",
"nrow=len(to_plot), padding=2, normalize=True) # add unnormalized mask mask = Pad(2)(mask[0].repeat(3,",
"parser.add_argument('--model_name', default='tmp', help='Weights and samples will be saved under experiments/model_name')",
"background, bg_mask = cgn(inp) x_gen = mask * foreground +",
"v for k, v in weights.items()} cgn.load_state_dict(weights) # optimizers opts",
"u_fixed, sample_path, ep_str): cgn.eval() dev = u_fixed.to(cgn.get_device()) ys = [15,",
"opts, losses) def merge_args_and_cfg(args, cfg): cfg.MODEL_NAME = args.model_name cfg.WEIGHTS_PATH =",
"parser.add_argument('--save_singles', default=False, action='store_true', help='Save single images instead of sheets') parser.add_argument('--truncation',",
"= [] with torch.no_grad(): for y in ys: # generate",
"training') parser.add_argument('--sampled_fixed_noise', default=False, action='store_true', help='If you want a different noise",
"+ '.png') torchvision.utils.save_image(x_gen, path, normalize=True) cgn.train() def fit(cfg, cgn, opts,",
"import Optimizers from inception_score import * def save_sample_sheet(cgn, u_fixed, sample_path,",
"default=1, help='Batch size, use in conjunciton with batch_acc') parser.add_argument('--batch_acc', type=int,",
"= L_binary(mask) losses_g['mask'] = L_mask(mask) losses_g['perc_text'] = L_text(x_gt, mask, foreground)",
"*= cfg.TRAIN.BATCH_ACC # directories for experiments time_str = datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if",
"u_fixed_path) else: u_fixed = torch.load(u_fixed_path) # Training Loop cgn.train() L_l1,",
"save to disk to_save.append(grid) del to_plot, mask, premask, foreground, background,",
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') cgn = cgn.to(device)",
"str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:]) sample_path = weights_path.replace('weights', 'samples') ep_range =",
"torch.autograd import Variable import torchvision from torchvision.transforms import Pad from",
"= tqdm(range(*ep_range)) for i, ep in enumerate(pbar): x_gt, mask, premask,",
"= join('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}') weights_path = join(model_path, 'weights') sample_path =",
"* background # build class grid to_plot = [premask, foreground,",
"type=int, default=300, help=\"We don't do dataloading, hence, one episode =",
"Inception SCore if cfg.LOG.INCEPTION_SCORE: score, score_std = inception_score(x_gen) inception_score_val.append(score) def",
"ep_str + '.png') torchvision.utils.save_image(mask, path, normalize=True) path = join(sample_path, f'{y}_3_texture_'",
"= cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation) x_gt, mask,",
"or cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed = cgn.get_noise_vec() torch.save(u_fixed, u_fixed_path) else: u_fixed =",
"= {k: v.mean() for k, v in losses_g.items()} g_loss =",
"[15, 251, 330, 382, 385, 483, 559, 751, 938, 947,",
"repackage.up() from imagenet.models import CGN from imagenet.config import get_cfg_defaults from",
"args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES = args.save_singles cfg.LOG.SAVE_ITER = args.save_iter",
"be saved under experiments/model_name') parser.add_argument('--weights_path', default='', help='provide path to continue",
"truncation=cfg.MODEL.TRUNCATION, pretrained=True, ) print(\"------CGN-------\") print(cgn) if cfg.WEIGHTS_PATH: weights = torch.load(cfg.WEIGHTS_PATH)",
"= args.weights_path cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise cfg.LOG.SAVE_SINGLES = args.save_singles cfg.LOG.SAVE_ITER =",
"u_fixed = torch.load(u_fixed_path) # Training Loop cgn.train() L_l1, L_perc, L_binary,",
"== 0: opts.step(['shape', 'bg', 'texture']) # Saving if not i",
"= save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet pbar = tqdm(range(*ep_range)) for",
"'.pth')) # Logging if cfg.LOG.LOSSES: msg = ''.join([f\"[{k}: {v:.3f}]\" for",
"episode = one gradient update.\") parser.add_argument('--batch_sz', type=int, default=1, help='Batch size,",
"Saving if not i % cfg.LOG.SAVE_ITER: ep_str = f'ep_{ep:07}' save_samples(cgn,",
"# save_images path = join(sample_path, f'{y}_1_premask_' + ep_str + '.png')",
"tqdm import argparse import torch from torch import nn, optim",
"= BackgroundLoss(loss_weight=cfg.LAMBDA.BG) losses = (L_l1, L_perc, L_binary, L_mask, L_text, L_bg)",
"+ '.png') torchvision.utils.save_image(foreground, path, normalize=True) path = join(sample_path, f'{y}_4_bgs_' +",
"instead of sheets') parser.add_argument('--truncation', type=float, default=1.0, help='Truncation value for noise",
"generate y_vec = cgn.get_class_vec(y, sz=1) inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)",
"# save to disk to_save.append(grid) del to_plot, mask, premask, foreground,",
"'weights') sample_path = join(model_path, 'samples') pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True) pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True) ep_range",
"(u_fixed.to(dev), y_vec.to(dev), cgn.truncation) _, mask, premask, foreground, background, _ =",
"L_bg) # push to device and train device = torch.device('cuda'",
"utils import Optimizers from inception_score import * def save_sample_sheet(cgn, u_fixed,",
"background_mask = cgn() x_gen = mask * foreground + (1",
"x_gen, x_gt # save the image path = join(sample_path, f'cls_sheet_'",
"in enumerate(pbar): x_gt, mask, premask, foreground, background, background_mask = cgn()",
"experiments time_str = datetime.now().strftime(\"%Y_%m_%d_%H_%M\") if cfg.WEIGHTS_PATH: weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent) start_ep",
"# Training Loop cgn.train() L_l1, L_perc, L_binary, L_mask, L_text, L_bg",
"foreground + (1 - mask) * background # build class",
"(start_ep, start_ep + episodes) else: model_path = join('imagenet', 'experiments', f'cgn_{time_str}_{cfg.MODEL_NAME}')",
"dev = u_fixed.to(cgn.get_device()) ys = [15, 251, 330, 382, 385,",
"os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE: u_fixed = cgn.get_noise_vec() torch.save(u_fixed, u_fixed_path) else: u_fixed",
"sample u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt') if not os.path.isfile(u_fixed_path) or",
"default=1.0, help='Truncation value for noise sampling') parser.add_argument('--episodes', type=int, default=300, help=\"We",
"ep_range = (0, episodes) # fixed noise sample u_fixed_path =",
"args.truncation return cfg if __name__ == \"__main__\": parser = argparse.ArgumentParser()",
"L_text, L_bg) # push to device and train device =",
"repo') parser.add_argument('--save_singles', default=False, action='store_true', help='Save single images instead of sheets')",
"class grid to_plot = [premask, foreground, background, x_gen, x_gt] grid"
] |
[
"from rest_framework.authtoken.views import obtain_auth_token urlpatterns = [ path('', views.index, name='index'),",
"path, include,re_path from . import views from rest_framework.authtoken.views import obtain_auth_token",
"from . import views from rest_framework.authtoken.views import obtain_auth_token urlpatterns =",
"views.about, name='about'), path('projects', views.projects, name='projects'), path('photos', views.photos, name='photos'), re_path(r'^api/projects/$', views.ProjectList.as_view()),",
"django.urls import path, include,re_path from . import views from rest_framework.authtoken.views",
"obtain_auth_token urlpatterns = [ path('', views.index, name='index'), path('about', views.about, name='about'),",
"path('', views.index, name='index'), path('about', views.about, name='about'), path('projects', views.projects, name='projects'), path('photos',",
"import path, include,re_path from . import views from rest_framework.authtoken.views import",
"= [ path('', views.index, name='index'), path('about', views.about, name='about'), path('projects', views.projects,",
"views.projects, name='projects'), path('photos', views.photos, name='photos'), re_path(r'^api/projects/$', views.ProjectList.as_view()), re_path(r'^api-token-auth/', obtain_auth_token), re_path(r'api/project/project-id/(?P<pk>[0-9]+)/$',",
". import views from rest_framework.authtoken.views import obtain_auth_token urlpatterns = [",
"<reponame>ramza007/Ramza.io<gh_stars>1-10 from django.conf.urls import url from django.urls import path, include,re_path",
"django.conf.urls import url from django.urls import path, include,re_path from .",
"name='index'), path('about', views.about, name='about'), path('projects', views.projects, name='projects'), path('photos', views.photos, name='photos'),",
"[ path('', views.index, name='index'), path('about', views.about, name='about'), path('projects', views.projects, name='projects'),",
"name='projects'), path('photos', views.photos, name='photos'), re_path(r'^api/projects/$', views.ProjectList.as_view()), re_path(r'^api-token-auth/', obtain_auth_token), re_path(r'api/project/project-id/(?P<pk>[0-9]+)/$', views.ProjectDescription.as_view()),",
"urlpatterns = [ path('', views.index, name='index'), path('about', views.about, name='about'), path('projects',",
"rest_framework.authtoken.views import obtain_auth_token urlpatterns = [ path('', views.index, name='index'), path('about',",
"from django.conf.urls import url from django.urls import path, include,re_path from",
"name='about'), path('projects', views.projects, name='projects'), path('photos', views.photos, name='photos'), re_path(r'^api/projects/$', views.ProjectList.as_view()), re_path(r'^api-token-auth/',",
"import views from rest_framework.authtoken.views import obtain_auth_token urlpatterns = [ path('',",
"from django.urls import path, include,re_path from . import views from",
"path('projects', views.projects, name='projects'), path('photos', views.photos, name='photos'), re_path(r'^api/projects/$', views.ProjectList.as_view()), re_path(r'^api-token-auth/', obtain_auth_token),",
"import url from django.urls import path, include,re_path from . import",
"path('about', views.about, name='about'), path('projects', views.projects, name='projects'), path('photos', views.photos, name='photos'), re_path(r'^api/projects/$',",
"views from rest_framework.authtoken.views import obtain_auth_token urlpatterns = [ path('', views.index,",
"views.index, name='index'), path('about', views.about, name='about'), path('projects', views.projects, name='projects'), path('photos', views.photos,",
"path('photos', views.photos, name='photos'), re_path(r'^api/projects/$', views.ProjectList.as_view()), re_path(r'^api-token-auth/', obtain_auth_token), re_path(r'api/project/project-id/(?P<pk>[0-9]+)/$', views.ProjectDescription.as_view()), ]",
"import obtain_auth_token urlpatterns = [ path('', views.index, name='index'), path('about', views.about,",
"include,re_path from . import views from rest_framework.authtoken.views import obtain_auth_token urlpatterns",
"url from django.urls import path, include,re_path from . import views"
] |
[
"system\"\"\" def __init__(self, store_uri=None, artifact_uri=None): path = urllib.parse.urlparse(store_uri).path if store_uri",
"def __init__(self, store_uri=None, artifact_uri=None): path = urllib.parse.urlparse(store_uri).path if store_uri else",
"import urllib from kiwi.store.tracking.file_store import FileStore class PluginFileStore(FileStore): \"\"\"FileStore provided",
"path = urllib.parse.urlparse(store_uri).path if store_uri else None self.is_plugin = True",
"= urllib.parse.urlparse(store_uri).path if store_uri else None self.is_plugin = True super(PluginFileStore,",
"if store_uri else None self.is_plugin = True super(PluginFileStore, self).__init__(path, artifact_uri)",
"from kiwi.store.tracking.file_store import FileStore class PluginFileStore(FileStore): \"\"\"FileStore provided through entrypoints",
"import FileStore class PluginFileStore(FileStore): \"\"\"FileStore provided through entrypoints system\"\"\" def",
"provided through entrypoints system\"\"\" def __init__(self, store_uri=None, artifact_uri=None): path =",
"six.moves import urllib from kiwi.store.tracking.file_store import FileStore class PluginFileStore(FileStore): \"\"\"FileStore",
"urllib from kiwi.store.tracking.file_store import FileStore class PluginFileStore(FileStore): \"\"\"FileStore provided through",
"class PluginFileStore(FileStore): \"\"\"FileStore provided through entrypoints system\"\"\" def __init__(self, store_uri=None,",
"PluginFileStore(FileStore): \"\"\"FileStore provided through entrypoints system\"\"\" def __init__(self, store_uri=None, artifact_uri=None):",
"store_uri=None, artifact_uri=None): path = urllib.parse.urlparse(store_uri).path if store_uri else None self.is_plugin",
"kiwi.store.tracking.file_store import FileStore class PluginFileStore(FileStore): \"\"\"FileStore provided through entrypoints system\"\"\"",
"<reponame>iPieter/kiwi from six.moves import urllib from kiwi.store.tracking.file_store import FileStore class",
"entrypoints system\"\"\" def __init__(self, store_uri=None, artifact_uri=None): path = urllib.parse.urlparse(store_uri).path if",
"artifact_uri=None): path = urllib.parse.urlparse(store_uri).path if store_uri else None self.is_plugin =",
"\"\"\"FileStore provided through entrypoints system\"\"\" def __init__(self, store_uri=None, artifact_uri=None): path",
"through entrypoints system\"\"\" def __init__(self, store_uri=None, artifact_uri=None): path = urllib.parse.urlparse(store_uri).path",
"urllib.parse.urlparse(store_uri).path if store_uri else None self.is_plugin = True super(PluginFileStore, self).__init__(path,",
"from six.moves import urllib from kiwi.store.tracking.file_store import FileStore class PluginFileStore(FileStore):",
"FileStore class PluginFileStore(FileStore): \"\"\"FileStore provided through entrypoints system\"\"\" def __init__(self,",
"__init__(self, store_uri=None, artifact_uri=None): path = urllib.parse.urlparse(store_uri).path if store_uri else None"
] |
[
"x, y, z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert rv.status_code",
"flask_app.test_client() as client: yield client def test_get_keys(client, use_testdb): rv =",
"rv = client.get('/keys') expected_response = [ {'key': 'key1'}, {'key': 'akey'},",
"= client.get('/datasets?limit=2&page=1') assert rv.status_code == 200 response = json.loads(rv.data, object_pairs_hook=OrderedDict)",
"== 400 # no stretch range rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png'",
"x, y, z = raster_file_xyz for stretch_range in ('[0,10000]', '[0,null]',",
"= client.get('/swagger.json') assert rv.status_code == 200 assert json.loads(rv.data) assert __version__",
"'omgomg' rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 explicit_cmap",
"0, 10) rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200 img",
"3) def test_get_compute(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"= client.get('/datasets?limit=2') assert rv.status_code == 200 response = json.loads(rv.data, object_pairs_hook=OrderedDict)",
"terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert",
"== 400 # missing operand rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2'",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo') assert rv.status_code == 400 def test_get_singleband_stretch(client,",
"rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) == 100 def test_get_colormap_invalid(client): rv",
"200 assert ['extra_data'] == json.loads(rv.data)['metadata'] def test_get_metadata_nonexisting(client, use_testdb): rv =",
"rv = client.get('/metadata/val11/x/NONEXISTING/') assert rv.status_code == 404 def test_get_datasets(client, use_testdb):",
"raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert rv.status_code == 200 img =",
"'#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code ==",
"import BytesIO import json import urllib.parse from collections import OrderedDict",
"use_testdb): import terracotta settings = terracotta.get_settings() rv = client.get( f'/compute/val21/x/preview.png'",
"rv = client.get('/datasets?key1=val21') assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) ==",
"assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) == 1 def test_get_datasets_unknown_key(client,",
"f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo') assert",
"== 400 def test_get_colormap(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100') assert rv.status_code ==",
"1 last_datasets = response['datasets'] assert len(last_datasets) == 2 assert OrderedDict([('key1',",
"raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y, z =",
"f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 200 img =",
"= client.get( f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 400",
"stretch range (value) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]' )",
"def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz explicit_cmap",
"terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert",
"keys rv = client.get( f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code",
"y, z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert rv.status_code ==",
"# no page (implicit 0) rv = client.get('/datasets?limit=2') assert rv.status_code",
"settings = terracotta.get_settings() # default tile size x, y, z",
"OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) not in last_datasets #",
"= client.get('/colormap?stretch_range=[0,1]&num_values=100') assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) == 100",
"'?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"as np import pytest @pytest.fixture(scope='module') def flask_app(): from terracotta.server import",
"= terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24')",
"stretch range rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' ) assert rv.status_code",
"2.0: (255, 255, 255, 20), 3: '#ffffff', 4: 'abcabc'} rv",
"3: '#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code",
"f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)' ) assert rv.status_code == 400 # invalid",
"assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in first_datasets #",
"assert len(json.loads(rv.data)['datasets']) == 1 def test_get_datasets_unknown_key(client, use_testdb): rv = client.get('/datasets?UNKNOWN=val21')",
"= raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert rv.status_code == 200 img",
"from collections import OrderedDict from PIL import Image import numpy",
"rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) == 3 rv = client.get('/datasets?key1=val21&key2=val23')",
"z = raster_file_xyz # too few keys rv = client.get(",
"200 assert len(json.loads(rv.data)['colormap']) == 100 def test_get_colormap_invalid(client): rv = client.get('/colormap?stretch_range=[0,1')",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 explicit_cmap =",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_preview(client, use_testdb): import terracotta settings",
"== 400 explicit_cmap = [(255, 255, 255)] rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'",
"200 assert len(json.loads(rv.data)['datasets']) == 3 rv = client.get('/datasets?key1=val21&key2=val23') assert rv.status_code",
"client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"rv.status_code == 400 # no expression rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png'",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_compute_invalid(client,",
"explicit_cmap = {1: (0, 0, 0), 2.0: (255, 255, 255,",
"page (implicit 0) rv = client.get('/datasets?limit=2') assert rv.status_code == 200",
"np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_compute(client, use_testdb, raster_file_xyz): import terracotta",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_cmap(client, use_testdb, raster_file_xyz): import",
"= terracotta.get_settings() x, y, z = raster_file_xyz for stretch_range in",
"client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) == 100 def",
"= raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert rv.status_code == 400 def",
"import terracotta settings = terracotta.get_settings() x, y, z = raster_file_xyz",
"terracotta settings = terracotta.get_settings() # default tile size x, y,",
"z = (0, 0, 10) rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code",
"from io import BytesIO import json import urllib.parse from collections",
"client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)' ) assert rv.status_code == 400 #",
"json.loads(rv.data)['datasets'] # invalid page rv = client.get('/datasets?page=-1') assert rv.status_code ==",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert rv.status_code == 400 def test_get_rgb(client, use_testdb, raster_file_xyz): import",
"= client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]' ) assert rv.status_code == 400",
"y, z = raster_file_xyz for stretch_range in ('[0,10000]', '[0,null]', '[null,",
") assert rv.status_code == 400 # invalid expression rv =",
"len(json.loads(rv.data)['datasets']) == 3 rv = client.get('/datasets?key1=val21&key2=val23') assert rv.status_code == 200",
"def test_get_singleband_stretch(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x,",
"rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code ==",
"len(json.loads(rv.data)['datasets']) == 1 def test_get_datasets_unknown_key(client, use_testdb): rv = client.get('/datasets?UNKNOWN=val21') assert",
"test_get_singleband_cmap(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y,",
"'val12')]) in first_datasets # second page rv = client.get('/datasets?limit=2&page=1') assert",
"client.get('/datasets?key1=val21') assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) == 3 rv",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_compute_invalid(client, use_testdb, raster_file_xyz): x, y,",
"range rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' ) assert rv.status_code ==",
"client.get('/datasets?limit=2') assert rv.status_code == 200 response = json.loads(rv.data, object_pairs_hook=OrderedDict) assert",
"== 1 def test_get_datasets_unknown_key(client, use_testdb): rv = client.get('/datasets?UNKNOWN=val21') assert rv.status_code",
"= raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200 img",
"json.dumps(payload) return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"') def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz): import terracotta",
"client.get('/datasets?limit=2&page=1') assert rv.status_code == 200 response = json.loads(rv.data, object_pairs_hook=OrderedDict) assert",
"0, 0), 2: (255, 255, 255), 3: '#ffffff', 4: 'abcabc'}",
"== 200 response = json.loads(rv.data, object_pairs_hook=OrderedDict) assert response['limit'] == 2",
"200 assert len(json.loads(rv.data)['colormap']) == 100 def test_get_spec(client): from terracotta import",
"def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x,",
"= client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert rv.status_code == 200, rv.data img =",
"assert rv.status_code == 400 # missing operand rv = client.get(",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert rv.status_code == 400 explicit_cmap[3] = 'omgomg'",
"object_pairs_hook=OrderedDict)['datasets'] assert len(datasets) == 4 assert OrderedDict([('key1', 'val11'), ('akey', 'x'),",
"{'key': 'key1'}, {'key': 'akey'}, {'key': 'key2', 'description': 'key2'} ] assert",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_preview(client, use_testdb): import",
"rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)' ) assert rv.status_code ==",
"assert response['limit'] == 2 assert response['page'] == 1 last_datasets =",
"def urlsafe_json(payload): payload_json = json.dumps(payload) return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"') def test_get_singleband_explicit_cmap(client,",
"'x'), ('key2', 'val12')]) in first_datasets # second page rv =",
"assert rv.status_code == 404 def test_get_datasets(client, use_testdb): rv = client.get('/datasets')",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE # custom tile size rv =",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_stretch(client, use_testdb, raster_file_xyz):",
"from PIL import Image import numpy as np import pytest",
"size rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' '&tile_size=[128,128]' ) assert",
"== 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo') assert rv.status_code == 400",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_cmap(client, use_testdb, raster_file_xyz):",
"rv.data img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def",
") assert rv.status_code == 400 # missing operand rv =",
"200 datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets'] assert len(datasets) == 4 assert",
"len(last_datasets) == 2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')])",
"x, y, z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert rv.status_code",
"OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in first_datasets # second",
"400 # missing operand rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2' '&stretch_range=[0,10000)'",
"datasets def test_get_datasets_pagination(client, use_testdb): # no page (implicit 0) rv",
"== 0) def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz): x, y, z =",
"operand rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2' '&stretch_range=[0,10000)' ) assert rv.status_code",
"== json.loads(rv.data)['metadata'] def test_get_metadata_nonexisting(client, use_testdb): rv = client.get('/metadata/val11/x/NONEXISTING/') assert rv.status_code",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_preview(client, use_testdb): import terracotta settings =",
"255, 255), 3: '#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}')",
"z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200",
"terracotta settings = terracotta.get_settings() x, y, z = (0, 0,",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE # custom tile size",
"json.loads(rv.data)['metadata'] def test_get_metadata_nonexisting(client, use_testdb): rv = client.get('/metadata/val11/x/NONEXISTING/') assert rv.status_code ==",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 200, rv.data.decode('utf-8') img = Image.open(BytesIO(rv.data))",
"test_get_rgb_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24')",
"= raster_file_xyz # too few keys rv = client.get( f'/compute/val21/{z}/{x}/{y}.png'",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def urlsafe_json(payload): payload_json = json.dumps(payload) return urllib.parse.quote_plus(payload_json,",
"400 def test_get_rgb(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"settings = terracotta.get_settings() rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert rv.status_code == 200",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 explicit_cmap = [(255, 255,",
"y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code ==",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"400 def test_get_colormap_extra_args(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert rv.status_code == 200",
"BytesIO import json import urllib.parse from collections import OrderedDict from",
"def test_get_datasets_unknown_key(client, use_testdb): rv = client.get('/datasets?UNKNOWN=val21') assert rv.status_code == 400",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_preview(client, use_testdb):",
"(*settings.DEFAULT_TILE_SIZE, 3) def test_get_compute(client, use_testdb, raster_file_xyz): import terracotta settings =",
"(value) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]' ) assert rv.status_code",
"== 100 def test_get_spec(client): from terracotta import __version__ rv =",
"= raster_file_xyz explicit_cmap = {1: (0, 0, 0), 2.0: (255,",
"response = json.loads(rv.data, object_pairs_hook=OrderedDict) assert response['limit'] == 2 assert response['page']",
"def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x,",
"yield client def test_get_keys(client, use_testdb): rv = client.get('/keys') expected_response =",
"rv.status_code == 400 explicit_cmap = [(255, 255, 255)] rv =",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE assert np.all(np.asarray(img)",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_cmap(client, use_testdb, raster_file_xyz): import terracotta settings",
"test_get_datasets(client, use_testdb): rv = client.get('/datasets') assert rv.status_code == 200 datasets",
"4 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in datasets",
"rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) ==",
"'?expression=v1*v2' '&stretch_range=[0,10000)' ) assert rv.status_code == 400 # invalid stretch",
"rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]' ) assert rv.status_code ==",
"response['page'] == 1 last_datasets = response['datasets'] assert len(last_datasets) == 2",
"explicit_cmap[3] = 'omgomg' rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code ==",
"3) def test_get_rgb_stretch(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"no expression rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)' ) assert rv.status_code",
"assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_preview(client, use_testdb): import terracotta",
"rv.status_code == 400 def test_get_datasets_selective(client, use_testdb): rv = client.get('/datasets?key1=val21') assert",
"terracotta settings = terracotta.get_settings() x, y, z = raster_file_xyz rv",
"y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert rv.status_code ==",
"rv.status_code == 400 def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz): import terracotta settings",
"400 # no stretch range rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23'",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE # custom",
"== 200 assert len(json.loads(rv.data)['colormap']) == 100 def test_get_spec(client): from terracotta",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_out_of_bounds(client, use_testdb): import terracotta settings",
"'?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 400 # invalid expression",
"400 # invalid stretch range (value) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png'",
"Image import numpy as np import pytest @pytest.fixture(scope='module') def flask_app():",
"client.get('/datasets?limit=2&page=1000') assert rv.status_code == 200 assert not json.loads(rv.data)['datasets'] # invalid",
"client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' '&tile_size=[128,128]' ) assert rv.status_code == 200",
"= json.loads(rv.data, object_pairs_hook=OrderedDict) assert response['limit'] == 2 assert response['page'] ==",
"test_get_rgb_stretch(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y,",
"test_get_colormap_invalid(client): rv = client.get('/colormap?stretch_range=[0,1') assert rv.status_code == 400 def test_get_colormap_extra_args(client):",
"rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' '&tile_size=[128,128]' ) assert rv.status_code",
"rv = client.get('/datasets') assert rv.status_code == 200 datasets = json.loads(rv.data,",
"# page out of range rv = client.get('/datasets?limit=2&page=1000') assert rv.status_code",
"= raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert rv.status_code == 200 img",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert rv.status_code == 400 def test_get_rgb(client, use_testdb, raster_file_xyz):",
"# invalid stretch range (value) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23'",
"== 400 # invalid stretch range (syntax) rv = client.get(",
"== 100 def test_get_colormap_invalid(client): rv = client.get('/colormap?stretch_range=[0,1') assert rv.status_code ==",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE assert np.all(np.asarray(img) == 0) def",
"__version__ rv = client.get('/swagger.json') assert rv.status_code == 200 assert json.loads(rv.data)",
"('akey', 'x'), ('key2', 'val12')]) not in last_datasets # page out",
"2 assert response['page'] == 0 first_datasets = response['datasets'] assert len(first_datasets)",
"# invalid stretch range (syntax) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23'",
"0) rv = client.get('/datasets?limit=2') assert rv.status_code == 200 response =",
"rv.status_code == 400 # missing operand rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png'",
"== 2 assert response['page'] == 1 last_datasets = response['datasets'] assert",
"400 def test_get_colormap(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100') assert rv.status_code == 200",
"z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert rv.status_code == 200",
"terracotta settings = terracotta.get_settings() rv = client.get( f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]'",
"use_testdb): rv = client.get('/metadata/val11/x/val12/') assert rv.status_code == 200 assert ['extra_data']",
"== 400 explicit_cmap[3] = 'omgomg' rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_compute_invalid(client, use_testdb,",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz):",
"z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert rv.status_code == 200",
"test_get_datasets_pagination(client, use_testdb): # no page (implicit 0) rv = client.get('/datasets?limit=2')",
"= [ {'key': 'key1'}, {'key': 'akey'}, {'key': 'key2', 'description': 'key2'}",
"urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"') def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz): import terracotta settings =",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_out_of_bounds(client, use_testdb): import terracotta",
"'key2'} ] assert rv.status_code == 200 assert expected_response == json.loads(rv.data)['keys']",
"== 200 assert len(json.loads(rv.data)['colormap']) == 100 def test_get_colormap_invalid(client): rv =",
"def test_get_compute_invalid(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz #",
"f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' ) assert rv.status_code == 400 # no expression",
"test_get_compute_invalid(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz # too",
"rv = client.get('/datasets?limit=2&page=1') assert rv.status_code == 200 response = json.loads(rv.data,",
"== 200 assert expected_response == json.loads(rv.data)['keys'] def test_get_metadata(client, use_testdb): rv",
"rv.data.decode('utf-8') rv = client.get('/apidoc') assert rv.status_code == 200 assert b'Terracotta'",
"= client.get('/datasets?limit=-1') assert rv.status_code == 400 def test_get_datasets_selective(client, use_testdb): rv",
"assert rv.status_code == 400 def test_get_rgb(client, use_testdb, raster_file_xyz): import terracotta",
"rv.status_code == 400 def test_get_colormap_extra_args(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert rv.status_code",
"rv.status_code == 400 def test_get_colormap(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100') assert rv.status_code",
"len(json.loads(rv.data)['colormap']) == 100 def test_get_spec(client): from terracotta import __version__ rv",
"rv.status_code == 200, rv.data.decode('utf-8') img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape ==",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz): x, y,",
"{'key': 'key2', 'description': 'key2'} ] assert rv.status_code == 200 assert",
"terracotta.get_settings() rv = client.get( f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code",
"== 400 # invalid stretch range (value) rv = client.get(",
"{1: (0, 0, 0), 2.0: (255, 255, 255, 20), 3:",
"rv.status_code == 400 # invalid limit rv = client.get('/datasets?limit=-1') assert",
"== 200 assert ['extra_data'] == json.loads(rv.data)['metadata'] def test_get_metadata_nonexisting(client, use_testdb): rv",
"'val11'), ('akey', 'x'), ('key2', 'val12')]) in first_datasets # second page",
"3) def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code",
"400 # invalid limit rv = client.get('/datasets?limit=-1') assert rv.status_code ==",
"400 explicit_cmap = [(255, 255, 255)] rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}')",
"y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert rv.status_code ==",
"400 # no expression rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)' )",
"as client: yield client def test_get_keys(client, use_testdb): rv = client.get('/keys')",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_compute(client, use_testdb,",
"range (value) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]' ) assert",
"255), 3: '#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert",
"400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert rv.status_code == 400 explicit_cmap[3] =",
"= 'omgomg' rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_compute(client, use_testdb, raster_file_xyz):",
"== 400 def test_get_datasets_selective(client, use_testdb): rv = client.get('/datasets?key1=val21') assert rv.status_code",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"assert rv.status_code == 200, rv.data.decode('utf-8') img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"10) rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200 img =",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (128, 128) def test_get_compute_preview(client, use_testdb):",
"= raster_file_xyz for stretch_range in ('[0,1]', '[0,null]', '[null, 1]', '[null,null]',",
"rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"(*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_stretch(client, use_testdb, raster_file_xyz): import terracotta settings =",
"client.get('/datasets?key1=val21&key2=val23') assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) == 1 def",
"'?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' '&tile_size=[128,128]' ) assert rv.status_code == 200 img =",
"assert expected_response == json.loads(rv.data)['keys'] def test_get_metadata(client, use_testdb): rv = client.get('/metadata/val11/x/val12/')",
"= json.dumps(payload) return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"') def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz): import",
"== 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE assert",
"0), 2.0: (255, 255, 255, 20), 3: '#ffffff', 4: 'abcabc'}",
"settings.DEFAULT_TILE_SIZE def test_get_compute_invalid(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz",
"3: '#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz): import terracotta",
"def test_get_datasets_pagination(client, use_testdb): # no page (implicit 0) rv =",
"use_testdb): import terracotta settings = terracotta.get_settings() x, y, z =",
"client.get('/keys') expected_response = [ {'key': 'key1'}, {'key': 'akey'}, {'key': 'key2',",
"client def test_get_keys(client, use_testdb): rv = client.get('/keys') expected_response = [",
"def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz rv",
"0, 0), 2.0: (255, 255, 255, 20), 3: '#ffffff', 4:",
"= client.get('/datasets?UNKNOWN=val21') assert rv.status_code == 400 def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz):",
"assert rv.status_code == 400 # no stretch range rv =",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert rv.status_code == 400 def test_get_rgb(client, use_testdb,",
"y, z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert rv.status_code ==",
"z = raster_file_xyz rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' )",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"0) def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz",
"invalid page rv = client.get('/datasets?page=-1') assert rv.status_code == 400 #",
"raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert rv.status_code == 200 img =",
"4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400",
"= client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' '&tile_size=[128,128]' ) assert rv.status_code ==",
"def test_get_metadata_nonexisting(client, use_testdb): rv = client.get('/metadata/val11/x/NONEXISTING/') assert rv.status_code == 404",
"200 assert json.loads(rv.data) assert __version__ in rv.data.decode('utf-8') rv = client.get('/apidoc')",
"= [(255, 255, 255)] rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo')",
"terracotta.get_settings() x, y, z = raster_file_xyz for stretch_range in ('[0,10000]',",
"test_get_metadata_nonexisting(client, use_testdb): rv = client.get('/metadata/val11/x/NONEXISTING/') assert rv.status_code == 404 def",
"raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert rv.status_code == 400 def test_get_rgb(client,",
"== (128, 128) def test_get_compute_preview(client, use_testdb): import terracotta settings =",
"y, z = raster_file_xyz explicit_cmap = {1: (0, 0, 0),",
"= client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' ) assert rv.status_code == 400 #",
"= raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert rv.status_code == 200 img",
"assert rv.status_code == 200 assert expected_response == json.loads(rv.data)['keys'] def test_get_metadata(client,",
"rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) == 100 def test_get_spec(client): from",
"assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) not in last_datasets",
"datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets'] assert len(datasets) == 4 assert OrderedDict([('key1',",
"terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert",
"client.get('/metadata/val11/x/val12/') assert rv.status_code == 200 assert ['extra_data'] == json.loads(rv.data)['metadata'] def",
"collections import OrderedDict from PIL import Image import numpy as",
"terracotta settings = terracotta.get_settings() rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert rv.status_code ==",
"of range rv = client.get('/datasets?limit=2&page=1000') assert rv.status_code == 200 assert",
"= client.get('/apidoc') assert rv.status_code == 200 assert b'Terracotta' in rv.data",
"use_testdb): rv = client.get('/keys') expected_response = [ {'key': 'key1'}, {'key':",
"'&stretch_range=[10000,0]' ) assert rv.status_code == 400 def test_get_colormap(client): rv =",
"255, 20), 3: '#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}')",
"client: yield client def test_get_keys(client, use_testdb): rv = client.get('/keys') expected_response",
"def client(flask_app): with flask_app.test_client() as client: yield client def test_get_keys(client,",
"rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"client.get('/colormap?stretch_range=[0,1') assert rv.status_code == 400 def test_get_colormap_extra_args(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz')",
"10000]', '[null,null]', 'null'): rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert rv.status_code ==",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_preview(client, use_testdb): import terracotta",
"'[0,null]', '[null, 10000]', '[null,null]', 'null'): rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert",
"client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2' '&stretch_range=[0,10000)' ) assert rv.status_code == 400 #",
"('key2', 'val12')]) not in last_datasets # page out of range",
"= client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)' ) assert rv.status_code == 400 #",
"== settings.DEFAULT_TILE_SIZE # custom tile size rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png'",
"assert len(json.loads(rv.data)['colormap']) == 100 def test_get_spec(client): from terracotta import __version__",
"def test_get_keys(client, use_testdb): rv = client.get('/keys') expected_response = [ {'key':",
"0 first_datasets = response['datasets'] assert len(first_datasets) == 2 assert OrderedDict([('key1',",
"assert rv.status_code == 400 def test_get_singleband_stretch(client, use_testdb, raster_file_xyz): import terracotta",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_cmap(client, use_testdb,",
"tile size rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' '&tile_size=[128,128]' )",
"def test_get_colormap_extra_args(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert rv.status_code == 200 assert",
"x, y, z = raster_file_xyz for stretch_range in ('[0,1]', '[0,null]',",
"in first_datasets # second page rv = client.get('/datasets?limit=2&page=1') assert rv.status_code",
"= raster_file_xyz explicit_cmap = {1: (0, 0, 0), 2: (255,",
"use_testdb): rv = client.get('/datasets?key1=val21') assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets'])",
"= client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2' '&stretch_range=[0,10000)' ) assert rv.status_code == 400",
"= client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)' ) assert rv.status_code == 400",
"4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 200,",
"= response['datasets'] assert len(last_datasets) == 2 assert OrderedDict([('key1', 'val11'), ('akey',",
"(0, 0, 0), 2: (255, 255, 255), 3: '#ffffff', 4:",
"create_app() @pytest.fixture(scope='module') def client(flask_app): with flask_app.test_client() as client: yield client",
"'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 200, rv.data.decode('utf-8')",
"raster_file_xyz for stretch_range in ('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]', 'null'):",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def urlsafe_json(payload): payload_json",
"assert __version__ in rv.data.decode('utf-8') rv = client.get('/apidoc') assert rv.status_code ==",
"test_get_metadata(client, use_testdb): rv = client.get('/metadata/val11/x/val12/') assert rv.status_code == 200 assert",
"rv.status_code == 200 assert expected_response == json.loads(rv.data)['keys'] def test_get_metadata(client, use_testdb):",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE assert np.all(np.asarray(img) ==",
"rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2' '&stretch_range=[0,10000)' ) assert rv.status_code ==",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (128, 128) def test_get_compute_preview(client, use_testdb): import",
"128) def test_get_compute_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv",
"rv = client.get('/swagger.json') assert rv.status_code == 200 assert json.loads(rv.data) assert",
"assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in datasets def",
"'[null,null]', 'null'): rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert rv.status_code == 200 img",
"client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' ) assert rv.status_code == 400 # no",
"= {1: (0, 0, 0), 2: (255, 255, 255), 3:",
"400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo') assert rv.status_code == 400 def",
"terracotta import __version__ rv = client.get('/swagger.json') assert rv.status_code == 200",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"def test_get_singleband_cmap(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x,",
"# invalid limit rv = client.get('/datasets?limit=-1') assert rv.status_code == 400",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def urlsafe_json(payload): payload_json =",
"raster_file_xyz): x, y, z = raster_file_xyz explicit_cmap = {1: (0,",
"range rv = client.get('/datasets?limit=2&page=1000') assert rv.status_code == 200 assert not",
"== 400 def test_get_singleband_stretch(client, use_testdb, raster_file_xyz): import terracotta settings =",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_compute_invalid(client, use_testdb, raster_file_xyz):",
"terracotta settings = terracotta.get_settings() rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert rv.status_code ==",
"import pytest @pytest.fixture(scope='module') def flask_app(): from terracotta.server import create_app return",
"import create_app return create_app() @pytest.fixture(scope='module') def client(flask_app): with flask_app.test_client() as",
"200 assert expected_response == json.loads(rv.data)['keys'] def test_get_metadata(client, use_testdb): rv =",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"for stretch_range in ('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]', 'null'): rv",
"z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert rv.status_code == 200",
"f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2' '&stretch_range=[0,10000)' ) assert rv.status_code == 400 # invalid",
"in rv.data.decode('utf-8') rv = client.get('/apidoc') assert rv.status_code == 200 assert",
"f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]' ) assert rv.status_code == 400 # no",
"too few keys rv = client.get( f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' )",
"np.all(np.asarray(img) == 0) def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz): x, y, z",
"('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]', 'null'): rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}')",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_preview(client,",
"1]', '[null,null]', 'null'): rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert rv.status_code == 200",
"assert rv.status_code == 400 explicit_cmap = [(255, 255, 255)] rv",
"# custom tile size rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]'",
"test_get_rgb(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y,",
"z = raster_file_xyz explicit_cmap = {1: (0, 0, 0), 2:",
"def test_get_metadata(client, use_testdb): rv = client.get('/metadata/val11/x/val12/') assert rv.status_code == 200",
"('akey', 'x'), ('key2', 'val12')]) in datasets def test_get_datasets_pagination(client, use_testdb): #",
"f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert",
"y, z = raster_file_xyz for stretch_range in ('[0,1]', '[0,null]', '[null,",
"def test_get_datasets_selective(client, use_testdb): rv = client.get('/datasets?key1=val21') assert rv.status_code == 200",
"raster_file_xyz): x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert",
"raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert rv.status_code == 200 img =",
"('key2', 'val12')]) in first_datasets # second page rv = client.get('/datasets?limit=2&page=1')",
"assert np.asarray(img).shape == (128, 128) def test_get_compute_preview(client, use_testdb): import terracotta",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz): import",
"== 2 assert response['page'] == 0 first_datasets = response['datasets'] assert",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo') assert rv.status_code == 400 def test_get_singleband_stretch(client, use_testdb,",
"'null'): rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert rv.status_code == 200 img =",
"test_get_colormap_extra_args(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap'])",
"= json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets'] assert len(datasets) == 4 assert OrderedDict([('key1', 'val11'),",
"= client.get('/colormap?stretch_range=[0,1') assert rv.status_code == 400 def test_get_colormap_extra_args(client): rv =",
"# invalid page rv = client.get('/datasets?page=-1') assert rv.status_code == 400",
"in ('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]', 'null'): rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&'",
"settings.DEFAULT_TILE_SIZE # custom tile size rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23'",
"raster_file_xyz # too few keys rv = client.get( f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23'",
"size x, y, z = raster_file_xyz rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png'",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_compute(client,",
"settings = terracotta.get_settings() x, y, z = (0, 0, 10)",
"settings = terracotta.get_settings() rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert rv.status_code == 200",
"test_get_rgb_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y,",
"len(json.loads(rv.data)['colormap']) == 100 def test_get_colormap_invalid(client): rv = client.get('/colormap?stretch_range=[0,1') assert rv.status_code",
"= client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) == 100",
"f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert rv.status_code",
"= client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"200 assert not json.loads(rv.data)['datasets'] # invalid page rv = client.get('/datasets?page=-1')",
"'?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]' ) assert rv.status_code == 400 # no stretch",
"= terracotta.get_settings() x, y, z = (0, 0, 10) rv",
"client.get('/colormap?stretch_range=[0,1]&num_values=100') assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) == 100 def",
"'?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)' ) assert rv.status_code == 400 # invalid stretch",
"= client.get('/metadata/val11/x/val12/') assert rv.status_code == 200 assert ['extra_data'] == json.loads(rv.data)['metadata']",
"'x'), ('key2', 'val12')]) in datasets def test_get_datasets_pagination(client, use_testdb): # no",
"== 200, rv.data img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE,",
"client.get('/datasets?limit=-1') assert rv.status_code == 400 def test_get_datasets_selective(client, use_testdb): rv =",
"numpy as np import pytest @pytest.fixture(scope='module') def flask_app(): from terracotta.server",
"default tile size x, y, z = raster_file_xyz rv =",
"f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)' ) assert rv.status_code == 400 # missing operand",
"__version__ in rv.data.decode('utf-8') rv = client.get('/apidoc') assert rv.status_code == 200",
"client.get('/datasets?page=-1') assert rv.status_code == 400 # invalid limit rv =",
"== (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_stretch(client, use_testdb, raster_file_xyz): import terracotta settings",
"= terracotta.get_settings() rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_cmap(client,",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def urlsafe_json(payload): payload_json = json.dumps(payload) return",
"2: (255, 255, 255), 3: '#ffffff', 4: 'abcabc'} rv =",
"== (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings",
"2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in first_datasets",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_explicit_cmap_invalid(client, use_testdb,",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_compute_invalid(client, use_testdb, raster_file_xyz): x,",
"200, rv.data.decode('utf-8') img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def",
"settings.DEFAULT_TILE_SIZE assert np.all(np.asarray(img) == 0) def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz): x,",
"client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"'key2', 'description': 'key2'} ] assert rv.status_code == 200 assert expected_response",
"urlsafe_json(payload): payload_json = json.dumps(payload) return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"') def test_get_singleband_explicit_cmap(client, use_testdb,",
"client.get( f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]' ) assert rv.status_code == 400 #",
"x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert rv.status_code",
"(*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings =",
"OrderedDict from PIL import Image import numpy as np import",
"last_datasets # page out of range rv = client.get('/datasets?limit=2&page=1000') assert",
"x, y, z = raster_file_xyz explicit_cmap = {1: (0, 0,",
"use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() # default tile",
"= terracotta.get_settings() x, y, z = raster_file_xyz explicit_cmap = {1:",
"200, rv.data img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)",
"use_testdb): rv = client.get('/datasets') assert rv.status_code == 200 datasets =",
"raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200 img =",
"assert np.all(np.asarray(img) == 0) def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz): x, y,",
"def test_get_rgb_stretch(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x,",
"missing operand rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2' '&stretch_range=[0,10000)' ) assert",
"def test_get_compute(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() #",
"rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE,",
"rv = client.get( f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code ==",
"# second page rv = client.get('/datasets?limit=2&page=1') assert rv.status_code == 200",
"rv.status_code == 200 assert ['extra_data'] == json.loads(rv.data)['metadata'] def test_get_metadata_nonexisting(client, use_testdb):",
"rv.status_code == 400 def test_get_rgb(client, use_testdb, raster_file_xyz): import terracotta settings",
"'null'): rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert rv.status_code == 200, rv.data",
"rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE",
"f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert rv.status_code == 200, rv.data img = Image.open(BytesIO(rv.data)) assert",
"# too few keys rv = client.get( f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]'",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_stretch(client,",
"test_get_colormap(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100') assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap'])",
"in last_datasets # page out of range rv = client.get('/datasets?limit=2&page=1000')",
"== 400 def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz): import terracotta settings =",
") assert rv.status_code == 400 # no expression rv =",
"'akey'}, {'key': 'key2', 'description': 'key2'} ] assert rv.status_code == 200",
"client.get('/metadata/val11/x/NONEXISTING/') assert rv.status_code == 404 def test_get_datasets(client, use_testdb): rv =",
"# missing operand rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2' '&stretch_range=[0,10000)' )",
"stretch_range in ('[0,1]', '[0,null]', '[null, 1]', '[null,null]', 'null'): rv =",
"first_datasets = response['datasets'] assert len(first_datasets) == 2 assert OrderedDict([('key1', 'val11'),",
"not json.loads(rv.data)['datasets'] # invalid page rv = client.get('/datasets?page=-1') assert rv.status_code",
"json.loads(rv.data, object_pairs_hook=OrderedDict) assert response['limit'] == 2 assert response['page'] == 0",
"response['datasets'] assert len(last_datasets) == 2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'),",
"invalid limit rv = client.get('/datasets?limit=-1') assert rv.status_code == 400 def",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE # custom tile",
"out of range rv = client.get('/datasets?limit=2&page=1000') assert rv.status_code == 200",
"assert rv.status_code == 400 # invalid stretch range (syntax) rv",
"rv = client.get('/colormap?stretch_range=[0,1]&num_values=100') assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) ==",
"rv = client.get( f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code ==",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def urlsafe_json(payload): payload_json = json.dumps(payload)",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz):",
"== settings.DEFAULT_TILE_SIZE def test_get_singleband_preview(client, use_testdb): import terracotta settings = terracotta.get_settings()",
"stretch range (syntax) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)' )",
"rv.status_code == 400 # invalid stretch range (value) rv =",
"'key1'}, {'key': 'akey'}, {'key': 'key2', 'description': 'key2'} ] assert rv.status_code",
"= client.get('/datasets?key1=val21') assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) == 3",
"import json import urllib.parse from collections import OrderedDict from PIL",
"rv = client.get('/datasets?page=-1') assert rv.status_code == 400 # invalid limit",
"z = raster_file_xyz for stretch_range in ('[0,10000]', '[0,null]', '[null, 10000]',",
"json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets'] assert len(datasets) == 4 assert OrderedDict([('key1', 'val11'), ('akey',",
"test_get_keys(client, use_testdb): rv = client.get('/keys') expected_response = [ {'key': 'key1'},",
"= {1: (0, 0, 0), 2.0: (255, 255, 255, 20),",
"create_app return create_app() @pytest.fixture(scope='module') def client(flask_app): with flask_app.test_client() as client:",
"= client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"use_testdb): import terracotta settings = terracotta.get_settings() rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert",
"<filename>tests/server/test_flask_api.py from io import BytesIO import json import urllib.parse from",
"return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"') def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz): import terracotta settings",
") assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"urllib.parse from collections import OrderedDict from PIL import Image import",
"settings.DEFAULT_TILE_SIZE def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv",
"'#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code ==",
"== 200 assert not json.loads(rv.data)['datasets'] # invalid page rv =",
"def test_get_singleband_out_of_bounds(client, use_testdb): import terracotta settings = terracotta.get_settings() x, y,",
"test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz rv =",
"# no expression rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)' ) assert",
"x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert rv.status_code",
"assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz): import",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"settings = terracotta.get_settings() x, y, z = raster_file_xyz for stretch_range",
"import __version__ rv = client.get('/swagger.json') assert rv.status_code == 200 assert",
") assert rv.status_code == 400 # no stretch range rv",
"last_datasets = response['datasets'] assert len(last_datasets) == 2 assert OrderedDict([('key1', 'val11'),",
"use_testdb, raster_file_xyz): x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN')",
"PIL import Image import numpy as np import pytest @pytest.fixture(scope='module')",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE assert np.all(np.asarray(img) == 0)",
"(255, 255, 255, 20), 3: '#ffffff', 4: 'abcabc'} rv =",
"assert rv.status_code == 400 def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz): import terracotta",
"[(255, 255, 255)] rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code ==",
"use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y, z",
"terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert",
"'?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]' ) assert rv.status_code == 400 def test_get_colormap(client): rv",
"terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert",
"invalid stretch range (value) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]'",
"== 400 def test_get_rgb(client, use_testdb, raster_file_xyz): import terracotta settings =",
"z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert rv.status_code == 200",
"'&stretch_range=[0,10000)' ) assert rv.status_code == 400 # invalid stretch range",
"400 # invalid stretch range (syntax) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png'",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert rv.status_code == 400 explicit_cmap[3] = 'omgomg' rv =",
"terracotta settings = terracotta.get_settings() x, y, z = raster_file_xyz for",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (128, 128) def test_get_compute_preview(client,",
"= terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz')",
"rv.status_code == 200, rv.data img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape ==",
"{'key': 'akey'}, {'key': 'key2', 'description': 'key2'} ] assert rv.status_code ==",
"['extra_data'] == json.loads(rv.data)['metadata'] def test_get_metadata_nonexisting(client, use_testdb): rv = client.get('/metadata/val11/x/NONEXISTING/') assert",
"no page (implicit 0) rv = client.get('/datasets?limit=2') assert rv.status_code ==",
"== 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert rv.status_code == 400 explicit_cmap[3]",
"rv.status_code == 400 # invalid expression rv = client.get( f'/compute/val21/x/preview.png'",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (128, 128) def",
"x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert rv.status_code",
"response['limit'] == 2 assert response['page'] == 0 first_datasets = response['datasets']",
"assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) == 100 def test_get_colormap_invalid(client):",
"== 200, rv.data.decode('utf-8') img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE",
"assert rv.status_code == 200 assert json.loads(rv.data) assert __version__ in rv.data.decode('utf-8')",
"terracotta.get_settings() x, y, z = raster_file_xyz for stretch_range in ('[0,1]',",
"terracotta settings = terracotta.get_settings() x, y, z = raster_file_xyz explicit_cmap",
"== settings.DEFAULT_TILE_SIZE def urlsafe_json(payload): payload_json = json.dumps(payload) return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"')",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_extra_args(client, use_testdb,",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'",
"np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_preview(client, use_testdb): import terracotta settings",
"'&stretch_range=[0,10000]' ) assert rv.status_code == 400 # no stretch range",
"== 200 assert len(json.loads(rv.data)['datasets']) == 3 rv = client.get('/datasets?key1=val21&key2=val23') assert",
"== settings.DEFAULT_TILE_SIZE def test_get_singleband_cmap(client, use_testdb, raster_file_xyz): import terracotta settings =",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_preview(client, use_testdb):",
"client.get( f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 200 img",
"rv = client.get('/datasets?key1=val21&key2=val23') assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) ==",
"assert rv.status_code == 200 response = json.loads(rv.data, object_pairs_hook=OrderedDict) assert response['limit']",
"settings = terracotta.get_settings() x, y, z = raster_file_xyz explicit_cmap =",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_preview(client,",
"rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' ) assert rv.status_code == 400",
"raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img =",
"test_get_datasets_selective(client, use_testdb): rv = client.get('/datasets?key1=val21') assert rv.status_code == 200 assert",
"y, z = raster_file_xyz rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]'",
"def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x,",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz): x,",
"255)] rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv",
"3 rv = client.get('/datasets?key1=val21&key2=val23') assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets'])",
"in ('[0,1]', '[0,null]', '[null, 1]', '[null,null]', 'null'): rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}')",
"== 400 def test_get_colormap_extra_args(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert rv.status_code ==",
"400 def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"def test_get_datasets(client, use_testdb): rv = client.get('/datasets') assert rv.status_code == 200",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv =",
"= client.get('/datasets') assert rv.status_code == 200 datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets']",
"terracotta.get_settings() x, y, z = raster_file_xyz explicit_cmap = {1: (0,",
"def test_get_rgb_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv =",
"x, y, z = (0, 0, 10) rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png')",
"== 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400",
"'&stretch_range=[0,10000]' '&tile_size=[128,128]' ) assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"'?stretch_range=[0,10000)' ) assert rv.status_code == 400 # missing operand rv",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_out_of_bounds(client, use_testdb):",
"len(first_datasets) == 2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')])",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_out_of_bounds(client, use_testdb): import",
"def test_get_compute_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv =",
"page rv = client.get('/datasets?limit=2&page=1') assert rv.status_code == 200 response =",
"no stretch range rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' ) assert",
"{1: (0, 0, 0), 2: (255, 255, 255), 3: '#ffffff',",
"= client.get( f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]' ) assert rv.status_code == 400",
"= terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet')",
"(128, 128) def test_get_compute_preview(client, use_testdb): import terracotta settings = terracotta.get_settings()",
"json.loads(rv.data, object_pairs_hook=OrderedDict) assert response['limit'] == 2 assert response['page'] == 1",
"rv.status_code == 200 response = json.loads(rv.data, object_pairs_hook=OrderedDict) assert response['limit'] ==",
"io import BytesIO import json import urllib.parse from collections import",
"100 def test_get_colormap_invalid(client): rv = client.get('/colormap?stretch_range=[0,1') assert rv.status_code == 400",
"'[null, 10000]', '[null,null]', 'null'): rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert rv.status_code",
"rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code ==",
"= terracotta.get_settings() rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert rv.status_code == 200 img",
"raster_file_xyz explicit_cmap = {1: (0, 0, 0), 2.0: (255, 255,",
"settings.DEFAULT_TILE_SIZE def urlsafe_json(payload): payload_json = json.dumps(payload) return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"') def",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv =",
"== 4 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in",
"== 404 def test_get_datasets(client, use_testdb): rv = client.get('/datasets') assert rv.status_code",
"def test_get_singleband_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv =",
"'x'), ('key2', 'val12')]) not in last_datasets # page out of",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def urlsafe_json(payload):",
"== 3 rv = client.get('/datasets?key1=val21&key2=val23') assert rv.status_code == 200 assert",
"z = raster_file_xyz explicit_cmap = {1: (0, 0, 0), 2.0:",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"(255, 255, 255), 3: '#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?'",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv =",
"= client.get('/datasets?limit=2&page=1000') assert rv.status_code == 200 assert not json.loads(rv.data)['datasets'] #",
"def test_get_rgb(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x,",
"3) def test_get_rgb_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv",
"'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv",
"== 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (128, 128)",
"rv.status_code == 400 def test_get_singleband_stretch(client, use_testdb, raster_file_xyz): import terracotta settings",
"rv.status_code == 400 # no stretch range rv = client.get(",
"use_testdb, raster_file_xyz): x, y, z = raster_file_xyz # too few",
"== 1 last_datasets = response['datasets'] assert len(last_datasets) == 2 assert",
"explicit_cmap = [(255, 255, 255)] rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert",
"'[null,null]', 'null'): rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert rv.status_code == 200,",
"expression rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)' ) assert rv.status_code ==",
"'[0,null]', '[null, 1]', '[null,null]', 'null'): rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert rv.status_code",
"test_get_compute(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() # default",
"assert rv.status_code == 400 def test_get_colormap(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100') assert",
"OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in datasets def test_get_datasets_pagination(client,",
"assert len(last_datasets) == 2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2',",
"settings.DEFAULT_TILE_SIZE def test_get_singleband_cmap(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"use_testdb, raster_file_xyz): x, y, z = raster_file_xyz explicit_cmap = {1:",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_out_of_bounds(client, use_testdb): import terracotta settings =",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE # custom tile size rv = client.get(",
"rv = client.get('/datasets?limit=-1') assert rv.status_code == 400 def test_get_datasets_selective(client, use_testdb):",
"rv = client.get( f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]' ) assert rv.status_code ==",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 explicit_cmap = [(255,",
"f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]' ) assert rv.status_code == 400 def test_get_colormap(client):",
"rv = client.get('/metadata/val11/x/val12/') assert rv.status_code == 200 assert ['extra_data'] ==",
"custom tile size rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' '&tile_size=[128,128]'",
"assert rv.status_code == 400 def test_get_datasets_selective(client, use_testdb): rv = client.get('/datasets?key1=val21')",
"test_get_spec(client): from terracotta import __version__ rv = client.get('/swagger.json') assert rv.status_code",
"assert ['extra_data'] == json.loads(rv.data)['metadata'] def test_get_metadata_nonexisting(client, use_testdb): rv = client.get('/metadata/val11/x/NONEXISTING/')",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"client.get('/datasets') assert rv.status_code == 200 datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets'] assert",
"client.get( f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 400 #",
"= client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"test_get_singleband_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y,",
"z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert rv.status_code == 400",
"tile size x, y, z = raster_file_xyz rv = client.get(",
"== 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE #",
"raster_file_xyz): import terracotta settings = terracotta.get_settings() # default tile size",
"rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (128,",
"assert len(datasets) == 4 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2',",
"assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert rv.status_code ==",
"settings.DEFAULT_TILE_SIZE def test_get_singleband_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv",
"f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 explicit_cmap = [(255, 255, 255)]",
"rv = client.get('/datasets?limit=2') assert rv.status_code == 200 response = json.loads(rv.data,",
"response['datasets'] assert len(first_datasets) == 2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'),",
"400 def test_get_datasets_selective(client, use_testdb): rv = client.get('/datasets?key1=val21') assert rv.status_code ==",
"== settings.DEFAULT_TILE_SIZE assert np.all(np.asarray(img) == 0) def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz):",
"client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"== 200 datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets'] assert len(datasets) == 4",
"with flask_app.test_client() as client: yield client def test_get_keys(client, use_testdb): rv",
"np import pytest @pytest.fixture(scope='module') def flask_app(): from terracotta.server import create_app",
"client(flask_app): with flask_app.test_client() as client: yield client def test_get_keys(client, use_testdb):",
") assert rv.status_code == 400 def test_get_colormap(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100')",
"json.loads(rv.data)['keys'] def test_get_metadata(client, use_testdb): rv = client.get('/metadata/val11/x/val12/') assert rv.status_code ==",
"('key2', 'val12')]) in datasets def test_get_datasets_pagination(client, use_testdb): # no page",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"settings = terracotta.get_settings() rv = client.get( f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' )",
"assert len(first_datasets) == 2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2',",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_compute_invalid(client, use_testdb, raster_file_xyz): x, y, z",
"== settings.DEFAULT_TILE_SIZE def test_get_compute_invalid(client, use_testdb, raster_file_xyz): x, y, z =",
"] assert rv.status_code == 200 assert expected_response == json.loads(rv.data)['keys'] def",
"assert rv.status_code == 200, rv.data img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"z = raster_file_xyz for stretch_range in ('[0,1]', '[0,null]', '[null, 1]',",
"255, 255, 20), 3: '#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'",
"settings = terracotta.get_settings() x, y, z = raster_file_xyz rv =",
"from terracotta import __version__ rv = client.get('/swagger.json') assert rv.status_code ==",
"assert rv.status_code == 200 assert not json.loads(rv.data)['datasets'] # invalid page",
"x, y, z = raster_file_xyz rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23'",
"use_testdb): # no page (implicit 0) rv = client.get('/datasets?limit=2') assert",
"assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo') assert rv.status_code",
"raster_file_xyz rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code",
"= terracotta.get_settings() # default tile size x, y, z =",
"assert rv.status_code == 400 # invalid expression rv = client.get(",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def",
"= terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz')",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 200, rv.data.decode('utf-8') img =",
"@pytest.fixture(scope='module') def client(flask_app): with flask_app.test_client() as client: yield client def",
"import terracotta settings = terracotta.get_settings() # default tile size x,",
"test_get_datasets_unknown_key(client, use_testdb): rv = client.get('/datasets?UNKNOWN=val21') assert rv.status_code == 400 def",
"import numpy as np import pytest @pytest.fixture(scope='module') def flask_app(): from",
"rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_extra_args(client, use_testdb,",
"flask_app(): from terracotta.server import create_app return create_app() @pytest.fixture(scope='module') def client(flask_app):",
"== json.loads(rv.data)['keys'] def test_get_metadata(client, use_testdb): rv = client.get('/metadata/val11/x/val12/') assert rv.status_code",
"test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y,",
"= response['datasets'] assert len(first_datasets) == 2 assert OrderedDict([('key1', 'val11'), ('akey',",
"assert response['page'] == 0 first_datasets = response['datasets'] assert len(first_datasets) ==",
"not in last_datasets # page out of range rv =",
"raster_file_xyz for stretch_range in ('[0,1]', '[0,null]', '[null, 1]', '[null,null]', 'null'):",
"for stretch_range in ('[0,1]', '[0,null]', '[null, 1]', '[null,null]', 'null'): rv",
"terracotta.get_settings() rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert rv.status_code == 200 img =",
"payload_json = json.dumps(payload) return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:\"') def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz):",
"rv.status_code == 400 # invalid stretch range (syntax) rv =",
"range (syntax) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)' ) assert",
"np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_stretch(client, use_testdb, raster_file_xyz): import terracotta",
"'val12')]) in datasets def test_get_datasets_pagination(client, use_testdb): # no page (implicit",
"client.get('/swagger.json') assert rv.status_code == 200 assert json.loads(rv.data) assert __version__ in",
"return create_app() @pytest.fixture(scope='module') def client(flask_app): with flask_app.test_client() as client: yield",
"json.loads(rv.data) assert __version__ in rv.data.decode('utf-8') rv = client.get('/apidoc') assert rv.status_code",
"= terracotta.get_settings() x, y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png')",
"= raster_file_xyz rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 200, rv.data.decode('utf-8') img",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}')",
"use_testdb): rv = client.get('/metadata/val11/x/NONEXISTING/') assert rv.status_code == 404 def test_get_datasets(client,",
"assert len(json.loads(rv.data)['datasets']) == 3 rv = client.get('/datasets?key1=val21&key2=val23') assert rv.status_code ==",
"(*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_preview(client, use_testdb): import terracotta settings = terracotta.get_settings()",
"explicit_cmap = {1: (0, 0, 0), 2: (255, 255, 255),",
"= client.get('/datasets?page=-1') assert rv.status_code == 400 # invalid limit rv",
"[ {'key': 'key1'}, {'key': 'akey'}, {'key': 'key2', 'description': 'key2'} ]",
"import terracotta settings = terracotta.get_settings() rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert rv.status_code",
"first_datasets # second page rv = client.get('/datasets?limit=2&page=1') assert rv.status_code ==",
"assert not json.loads(rv.data)['datasets'] # invalid page rv = client.get('/datasets?page=-1') assert",
"rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert rv.status_code == 200, rv.data img",
"rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert rv.status_code == 400",
"assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_stretch(client, use_testdb, raster_file_xyz): import",
"# no stretch range rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' )",
"200 assert len(json.loads(rv.data)['datasets']) == 1 def test_get_datasets_unknown_key(client, use_testdb): rv =",
"'&stretch_range=[0,10000]' ) assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"test_get_singleband_out_of_bounds(client, use_testdb): import terracotta settings = terracotta.get_settings() x, y, z",
"def test_get_spec(client): from terracotta import __version__ rv = client.get('/swagger.json') assert",
"# invalid expression rv = client.get( f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]' )",
"== 400 # no expression rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)'",
"test_get_singleband_greyscale(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y,",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_extra_args(client,",
"pytest @pytest.fixture(scope='module') def flask_app(): from terracotta.server import create_app return create_app()",
"200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_out_of_bounds(client,",
"rv = client.get('/apidoc') assert rv.status_code == 200 assert b'Terracotta' in",
"invalid expression rv = client.get( f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]' ) assert",
"rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)' ) assert rv.status_code == 400",
"rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"assert json.loads(rv.data) assert __version__ in rv.data.decode('utf-8') rv = client.get('/apidoc') assert",
"y, z = raster_file_xyz rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN') assert rv.status_code ==",
"terracotta.get_settings() rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img =",
"rv.status_code == 404 def test_get_datasets(client, use_testdb): rv = client.get('/datasets') assert",
"client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&' f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}') assert rv.status_code == 200, rv.data img = Image.open(BytesIO(rv.data))",
"= terracotta.get_settings() rv = client.get( f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert",
"assert rv.status_code == 400 def test_get_colormap_extra_args(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz') assert",
"client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?stretch_range=[0,10000)' ) assert rv.status_code == 400 # missing",
"import OrderedDict from PIL import Image import numpy as np",
"terracotta.get_settings() # default tile size x, y, z = raster_file_xyz",
") assert rv.status_code == 400 # invalid stretch range (syntax)",
"client.get('/datasets?UNKNOWN=val21') assert rv.status_code == 400 def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz): import",
"rv.status_code == 400 explicit_cmap[3] = 'omgomg' rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}')",
"== (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_preview(client, use_testdb): import terracotta settings =",
"# default tile size x, y, z = raster_file_xyz rv",
"assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape ==",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo') assert rv.status_code == 400 def test_get_singleband_stretch(client, use_testdb, raster_file_xyz):",
"page out of range rv = client.get('/datasets?limit=2&page=1000') assert rv.status_code ==",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz):",
"400 # invalid expression rv = client.get( f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]'",
"== 200 assert json.loads(rv.data) assert __version__ in rv.data.decode('utf-8') rv =",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_cmap(client, use_testdb, raster_file_xyz): import terracotta",
"assert len(json.loads(rv.data)['colormap']) == 100 def test_get_colormap_invalid(client): rv = client.get('/colormap?stretch_range=[0,1') assert",
"400 def test_get_singleband_stretch(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"== 2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) not",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit')",
"== 400 # invalid expression rv = client.get( f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22'",
"def flask_app(): from terracotta.server import create_app return create_app() @pytest.fixture(scope='module') def",
"1 def test_get_datasets_unknown_key(client, use_testdb): rv = client.get('/datasets?UNKNOWN=val21') assert rv.status_code ==",
"rv = client.get('/datasets?UNKNOWN=val21') assert rv.status_code == 400 def test_get_singleband_greyscale(client, use_testdb,",
"20), 3: '#ffffff', 4: 'abcabc'} rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert",
"img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_extra_args(client,",
"rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map=foo') assert rv.status_code ==",
"np.asarray(img).shape == (128, 128) def test_get_compute_preview(client, use_testdb): import terracotta settings",
"'?expression=v1*v2&v1=val22&v2=val23' ) assert rv.status_code == 400 # no expression rv",
"assert rv.status_code == 400 # no expression rv = client.get(",
"assert rv.status_code == 200 assert ['extra_data'] == json.loads(rv.data)['metadata'] def test_get_metadata_nonexisting(client,",
"assert rv.status_code == 400 # invalid limit rv = client.get('/datasets?limit=-1')",
"('[0,1]', '[0,null]', '[null, 1]', '[null,null]', 'null'): rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert",
"test_get_compute_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv = client.get(",
"400 explicit_cmap[3] = 'omgomg' rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code",
"assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE assert np.all(np.asarray(img) == 0) def test_get_singleband_unknown_cmap(client,",
"'description': 'key2'} ] assert rv.status_code == 200 assert expected_response ==",
"test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz explicit_cmap =",
"in datasets def test_get_datasets_pagination(client, use_testdb): # no page (implicit 0)",
"terracotta.get_settings() x, y, z = (0, 0, 10) rv =",
") assert rv.status_code == 400 # invalid stretch range (value)",
"assert rv.status_code == 200 assert len(json.loads(rv.data)['colormap']) == 100 def test_get_spec(client):",
"terracotta.server import create_app return create_app() @pytest.fixture(scope='module') def client(flask_app): with flask_app.test_client()",
"rv = client.get('/colormap?stretch_range=[0,1') assert rv.status_code == 400 def test_get_colormap_extra_args(client): rv",
"raster_file_xyz explicit_cmap = {1: (0, 0, 0), 2: (255, 255,",
"json import urllib.parse from collections import OrderedDict from PIL import",
"0), 2: (255, 255, 255), 3: '#ffffff', 4: 'abcabc'} rv",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?' f'explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet'",
"expected_response == json.loads(rv.data)['keys'] def test_get_metadata(client, use_testdb): rv = client.get('/metadata/val11/x/val12/') assert",
"few keys rv = client.get( f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings",
"100 def test_get_spec(client): from terracotta import __version__ rv = client.get('/swagger.json')",
"rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data))",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE assert np.all(np.asarray(img) == 0) def test_get_singleband_unknown_cmap(client, use_testdb,",
"(syntax) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)' ) assert rv.status_code",
"def test_get_colormap(client): rv = client.get('/colormap?stretch_range=[0,1]&num_values=100') assert rv.status_code == 200 assert",
"x, y, z = raster_file_xyz # too few keys rv",
"== settings.DEFAULT_TILE_SIZE def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz): x, y, z =",
"settings.DEFAULT_TILE_SIZE def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz): x, y, z = raster_file_xyz",
"invalid stretch range (syntax) rv = client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000)'",
"rv.status_code == 200 datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets'] assert len(datasets) ==",
"'val11'), ('akey', 'x'), ('key2', 'val12')]) in datasets def test_get_datasets_pagination(client, use_testdb):",
"len(datasets) == 4 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')])",
"rv = client.get('/datasets?limit=2&page=1000') assert rv.status_code == 200 assert not json.loads(rv.data)['datasets']",
"200 response = json.loads(rv.data, object_pairs_hook=OrderedDict) assert response['limit'] == 2 assert",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape",
"== settings.DEFAULT_TILE_SIZE def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings =",
"'&tile_size=[128,128]' ) assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"= client.get('/keys') expected_response = [ {'key': 'key1'}, {'key': 'akey'}, {'key':",
"expression rv = client.get( f'/compute/val21/x/preview.png' '?expression=__builtins__[\"dir\"](v1)&v1=val22' '&stretch_range=[0,10000]' ) assert rv.status_code",
"def test_get_colormap_invalid(client): rv = client.get('/colormap?stretch_range=[0,1') assert rv.status_code == 400 def",
"y, z = (0, 0, 10) rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert",
"assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) == 3 rv =",
"== settings.DEFAULT_TILE_SIZE def test_get_singleband_out_of_bounds(client, use_testdb): import terracotta settings = terracotta.get_settings()",
"(0, 0, 0), 2.0: (255, 255, 255, 20), 3: '#ffffff',",
"rv.status_code == 200 assert json.loads(rv.data) assert __version__ in rv.data.decode('utf-8') rv",
"np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz): x, y, z",
"def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x,",
"(implicit 0) rv = client.get('/datasets?limit=2') assert rv.status_code == 200 response",
"= Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_stretch(client, use_testdb,",
"f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 200 img =",
"assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_compute(client, use_testdb, raster_file_xyz): import",
"assert rv.status_code == 400 explicit_cmap[3] = 'omgomg' rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'",
"safe=r',.[]{}:\"') def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings()",
"client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 200 img",
"rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) == 1 def test_get_datasets_unknown_key(client, use_testdb):",
"assert rv.status_code == 200 datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets'] assert len(datasets)",
"use_testdb): rv = client.get('/datasets?UNKNOWN=val21') assert rv.status_code == 400 def test_get_singleband_greyscale(client,",
"import urllib.parse from collections import OrderedDict from PIL import Image",
"= raster_file_xyz rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img",
"from terracotta.server import create_app return create_app() @pytest.fixture(scope='module') def client(flask_app): with",
"= client.get( f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 200",
"test_get_singleband_preview(client, use_testdb): import terracotta settings = terracotta.get_settings() rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet')",
"assert rv.status_code == 400 # invalid stretch range (value) rv",
"rv.data.decode('utf-8') img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def test_get_singleband_explicit_cmap_invalid(client,",
"= client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 200",
"client.get( f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[10000,0]' ) assert rv.status_code == 400 def",
"= client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24') assert rv.status_code == 200 img = Image.open(BytesIO(rv.data)) assert",
"response['page'] == 0 first_datasets = response['datasets'] assert len(first_datasets) == 2",
"2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) not in",
"import Image import numpy as np import pytest @pytest.fixture(scope='module') def",
"second page rv = client.get('/datasets?limit=2&page=1') assert rv.status_code == 200 response",
"rv.status_code == 200 assert not json.loads(rv.data)['datasets'] # invalid page rv",
"page rv = client.get('/datasets?page=-1') assert rv.status_code == 400 # invalid",
"test_get_singleband_stretch(client, use_testdb, raster_file_xyz): import terracotta settings = terracotta.get_settings() x, y,",
"@pytest.fixture(scope='module') def flask_app(): from terracotta.server import create_app return create_app() @pytest.fixture(scope='module')",
"expected_response = [ {'key': 'key1'}, {'key': 'akey'}, {'key': 'key2', 'description':",
"y, z = raster_file_xyz # too few keys rv =",
"import terracotta settings = terracotta.get_settings() x, y, z = (0,",
"'val12')]) not in last_datasets # page out of range rv",
"settings.DEFAULT_TILE_SIZE def test_get_singleband_out_of_bounds(client, use_testdb): import terracotta settings = terracotta.get_settings() x,",
"2 assert response['page'] == 1 last_datasets = response['datasets'] assert len(last_datasets)",
"255, 255)] rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400",
"== 400 # invalid limit rv = client.get('/datasets?limit=-1') assert rv.status_code",
"limit rv = client.get('/datasets?limit=-1') assert rv.status_code == 400 def test_get_datasets_selective(client,",
"client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert",
"== 0 first_datasets = response['datasets'] assert len(first_datasets) == 2 assert",
"'val11'), ('akey', 'x'), ('key2', 'val12')]) not in last_datasets # page",
"use_testdb): import terracotta settings = terracotta.get_settings() rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert",
"= client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit') assert rv.status_code == 400 explicit_cmap[3] = 'omgomg' rv",
"(0, 0, 10) rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code == 200",
"= raster_file_xyz for stretch_range in ('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]',",
"f'/compute/val21/x/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' '&tile_size=[128,128]' ) assert rv.status_code == 200 img",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE # custom tile size rv",
"object_pairs_hook=OrderedDict) assert response['limit'] == 2 assert response['page'] == 0 first_datasets",
"f'/compute/val21/{z}/{x}/{y}.png' '?expression=v1*v2&v1=val22&v2=val23' '&stretch_range=[0,10000]' ) assert rv.status_code == 400 # invalid",
"f'&explicit_color_map=foo') assert rv.status_code == 400 def test_get_singleband_stretch(client, use_testdb, raster_file_xyz): import",
"404 def test_get_datasets(client, use_testdb): rv = client.get('/datasets') assert rv.status_code ==",
"assert response['limit'] == 2 assert response['page'] == 0 first_datasets =",
"== (*settings.DEFAULT_TILE_SIZE, 3) def test_get_compute(client, use_testdb, raster_file_xyz): import terracotta settings",
"== 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE def",
"== 2 assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in",
"'&stretch_range=[0,10000]' ) assert rv.status_code == 400 # invalid expression rv",
"raster_file_xyz): x, y, z = raster_file_xyz # too few keys",
"assert rv.status_code == 400 rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet' f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code",
"import terracotta settings = terracotta.get_settings() rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet') assert rv.status_code",
"assert response['page'] == 1 last_datasets = response['datasets'] assert len(last_datasets) ==",
"= client.get('/metadata/val11/x/NONEXISTING/') assert rv.status_code == 404 def test_get_datasets(client, use_testdb): rv",
"f'&explicit_color_map={urlsafe_json(explicit_cmap)}') assert rv.status_code == 200, rv.data.decode('utf-8') img = Image.open(BytesIO(rv.data)) assert",
"import terracotta settings = terracotta.get_settings() rv = client.get( f'/compute/val21/x/preview.png' '?expression=v1*v2&v1=val22&v2=val23'",
"= client.get('/datasets?key1=val21&key2=val23') assert rv.status_code == 200 assert len(json.loads(rv.data)['datasets']) == 1",
"= (0, 0, 10) rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png') assert rv.status_code ==",
"np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz): import terracotta",
"('akey', 'x'), ('key2', 'val12')]) in first_datasets # second page rv",
"== 200 assert len(json.loads(rv.data)['datasets']) == 1 def test_get_datasets_unknown_key(client, use_testdb): rv",
"object_pairs_hook=OrderedDict) assert response['limit'] == 2 assert response['page'] == 1 last_datasets",
"Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3) def test_get_rgb_preview(client, use_testdb): import",
"== 200 img = Image.open(BytesIO(rv.data)) assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)",
"response['limit'] == 2 assert response['page'] == 1 last_datasets = response['datasets']",
"'[null, 1]', '[null,null]', 'null'): rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}') assert rv.status_code ==",
"stretch_range in ('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]', 'null'): rv ="
] |
[
"search the biggest number m such that n^m < x",
"number n from base input_base to base output_base. The following",
"= 0 for i in range(size): baseten += numbers.index(n[i]) *",
"conversion n = str(n) size = len(n) baseten = 0",
"+ 1): coeff = baseten / (output_base ** (max_power -",
"** (max_power + 1) <= baseten: max_power += 1 result",
"result will be a string. \"\"\" numbers = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ##",
"output_base ** (max_power + 1) <= baseten: max_power += 1",
"if input_base <= 10, and a string otherwise. The result",
"** (max_power - i)) baseten -= coeff * (output_base **",
"output_base. The following symbols are used to represent numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
"input_base=10, output_base=16) == \"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\", input_base=16, output_base=8) == \"154673563503165277\") assert(base_conv(\"154673563503165277\",",
"numbers.index(n[i]) * input_base ** (size - 1 - i) ##",
"n = str(n) size = len(n) baseten = 0 for",
"\"42\") assert(base_conv(5673576) == \"5673576\") assert(base_conv(10, input_base=2) == \"2\") assert(base_conv(101010, input_base=2)",
"10, and a string otherwise. The result will be a",
"== \"154673563503165277\") assert(base_conv(\"154673563503165277\", input_base=8, output_base=10) == \"3830404793297599\") assert(base_conv(0, input_base=3, output_base=50)",
"for i in range(max_power + 1): coeff = baseten /",
"a string otherwise. The result will be a string. \"\"\"",
"\"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\", input_base=16, output_base=8) == \"154673563503165277\") assert(base_conv(\"154673563503165277\", input_base=8, output_base=10) ==",
"== \"10\") assert(base_conv(42) == \"42\") assert(base_conv(5673576) == \"5673576\") assert(base_conv(10, input_base=2)",
"coeff * (output_base ** (max_power - i)) result += numbers[coeff]",
"return result if __name__ == \"__main__\": assert(base_conv(10) == \"10\") assert(base_conv(42)",
"a string. \"\"\" numbers = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ## base 10 conversion",
"assert(base_conv(43, input_base=10, output_base=2) == \"101011\") assert(base_conv(256**3 - 1, input_base=10, output_base=16)",
"base input_base to base output_base. The following symbols are used",
"= str(n) size = len(n) baseten = 0 for i",
"== \"__main__\": assert(base_conv(10) == \"10\") assert(base_conv(42) == \"42\") assert(base_conv(5673576) ==",
"<= 10, and a string otherwise. The result will be",
"1) <= baseten: max_power += 1 result = \"\" for",
"baseten: max_power += 1 result = \"\" for i in",
"= \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ## base 10 conversion n = str(n) size",
"input_base=16, output_base=8) == \"154673563503165277\") assert(base_conv(\"154673563503165277\", input_base=8, output_base=10) == \"3830404793297599\") assert(base_conv(0,",
"numbers = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ## base 10 conversion n = str(n)",
"used to represent numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n can be an int",
"\"5673576\") assert(base_conv(10, input_base=2) == \"2\") assert(base_conv(101010, input_base=2) == \"42\") assert(base_conv(43,",
"result += numbers[coeff] return result if __name__ == \"__main__\": assert(base_conv(10)",
"# we search the biggest number m such that n^m",
"output_base=2) == \"101011\") assert(base_conv(256**3 - 1, input_base=10, output_base=16) == \"ffffff\")",
"(output_base ** (max_power - i)) result += numbers[coeff] return result",
"conversion # we search the biggest number m such that",
"base 10 conversion n = str(n) size = len(n) baseten",
"1 result = \"\" for i in range(max_power + 1):",
"(max_power - i)) result += numbers[coeff] return result if __name__",
"numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n can be an int if input_base <=",
"input_base=10, output_base=2) == \"101011\") assert(base_conv(256**3 - 1, input_base=10, output_base=16) ==",
"= \"\" for i in range(max_power + 1): coeff =",
"len(n) baseten = 0 for i in range(size): baseten +=",
"** (max_power - i)) result += numbers[coeff] return result if",
"\"\"\" Converts a number n from base input_base to base",
"input_base=2) == \"42\") assert(base_conv(43, input_base=10, output_base=2) == \"101011\") assert(base_conv(256**3 -",
"such that n^m < x max_power = 0 while output_base",
"* input_base ** (size - 1 - i) ## base",
"we search the biggest number m such that n^m <",
"symbols are used to represent numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n can be",
"__name__ == \"__main__\": assert(base_conv(10) == \"10\") assert(base_conv(42) == \"42\") assert(base_conv(5673576)",
"that n^m < x max_power = 0 while output_base **",
"string. \"\"\" numbers = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ## base 10 conversion n",
"n^m < x max_power = 0 while output_base ** (max_power",
"\"154673563503165277\") assert(base_conv(\"154673563503165277\", input_base=8, output_base=10) == \"3830404793297599\") assert(base_conv(0, input_base=3, output_base=50) ==",
"Converts a number n from base input_base to base output_base.",
"size = len(n) baseten = 0 for i in range(size):",
"baseten / (output_base ** (max_power - i)) baseten -= coeff",
"input_base to base output_base. The following symbols are used to",
"represent numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n can be an int if input_base",
"The result will be a string. \"\"\" numbers = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"",
"max_power += 1 result = \"\" for i in range(max_power",
"<filename>url.py def base_conv(n, input_base=10, output_base=10): \"\"\" Converts a number n",
"- 1 - i) ## base output_base conversion # we",
"<= baseten: max_power += 1 result = \"\" for i",
"i in range(max_power + 1): coeff = baseten / (output_base",
"i) ## base output_base conversion # we search the biggest",
"assert(base_conv(256**3 - 1, input_base=10, output_base=16) == \"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\", input_base=16, output_base=8)",
"== \"101011\") assert(base_conv(256**3 - 1, input_base=10, output_base=16) == \"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\",",
"string otherwise. The result will be a string. \"\"\" numbers",
"1): coeff = baseten / (output_base ** (max_power - i))",
"biggest number m such that n^m < x max_power =",
"result if __name__ == \"__main__\": assert(base_conv(10) == \"10\") assert(base_conv(42) ==",
"the biggest number m such that n^m < x max_power",
"* (output_base ** (max_power - i)) result += numbers[coeff] return",
"1, input_base=10, output_base=16) == \"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\", input_base=16, output_base=8) == \"154673563503165277\")",
"otherwise. The result will be a string. \"\"\" numbers =",
"** (size - 1 - i) ## base output_base conversion",
"max_power = 0 while output_base ** (max_power + 1) <=",
"be an int if input_base <= 10, and a string",
"== \"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\", input_base=16, output_base=8) == \"154673563503165277\") assert(base_conv(\"154673563503165277\", input_base=8, output_base=10)",
"if __name__ == \"__main__\": assert(base_conv(10) == \"10\") assert(base_conv(42) == \"42\")",
"== \"42\") assert(base_conv(5673576) == \"5673576\") assert(base_conv(10, input_base=2) == \"2\") assert(base_conv(101010,",
"a number n from base input_base to base output_base. The",
"assert(base_conv(5673576) == \"5673576\") assert(base_conv(10, input_base=2) == \"2\") assert(base_conv(101010, input_base=2) ==",
"are used to represent numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n can be an",
"def base_conv(n, input_base=10, output_base=10): \"\"\" Converts a number n from",
"(max_power + 1) <= baseten: max_power += 1 result =",
"(output_base ** (max_power - i)) baseten -= coeff * (output_base",
"output_base=8) == \"154673563503165277\") assert(base_conv(\"154673563503165277\", input_base=8, output_base=10) == \"3830404793297599\") assert(base_conv(0, input_base=3,",
"/ (output_base ** (max_power - i)) baseten -= coeff *",
"The following symbols are used to represent numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n",
"- i)) baseten -= coeff * (output_base ** (max_power -",
"to represent numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n can be an int if",
"n can be an int if input_base <= 10, and",
"following symbols are used to represent numbers: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n can",
"\"42\") assert(base_conv(43, input_base=10, output_base=2) == \"101011\") assert(base_conv(256**3 - 1, input_base=10,",
"+= 1 result = \"\" for i in range(max_power +",
"1 - i) ## base output_base conversion # we search",
"= baseten / (output_base ** (max_power - i)) baseten -=",
"range(max_power + 1): coeff = baseten / (output_base ** (max_power",
"output_base=10): \"\"\" Converts a number n from base input_base to",
"str(n) size = len(n) baseten = 0 for i in",
"## base 10 conversion n = str(n) size = len(n)",
"output_base conversion # we search the biggest number m such",
"output_base=16) == \"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\", input_base=16, output_base=8) == \"154673563503165277\") assert(base_conv(\"154673563503165277\", input_base=8,",
"input_base <= 10, and a string otherwise. The result will",
"0 while output_base ** (max_power + 1) <= baseten: max_power",
"\"101011\") assert(base_conv(256**3 - 1, input_base=10, output_base=16) == \"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\", input_base=16,",
"number m such that n^m < x max_power = 0",
"in range(size): baseten += numbers.index(n[i]) * input_base ** (size -",
"while output_base ** (max_power + 1) <= baseten: max_power +=",
"== \"2\") assert(base_conv(101010, input_base=2) == \"42\") assert(base_conv(43, input_base=10, output_base=2) ==",
"== \"5673576\") assert(base_conv(10, input_base=2) == \"2\") assert(base_conv(101010, input_base=2) == \"42\")",
"input_base=2) == \"2\") assert(base_conv(101010, input_base=2) == \"42\") assert(base_conv(43, input_base=10, output_base=2)",
"- i) ## base output_base conversion # we search the",
"== \"42\") assert(base_conv(43, input_base=10, output_base=2) == \"101011\") assert(base_conv(256**3 - 1,",
"assert(base_conv(10, input_base=2) == \"2\") assert(base_conv(101010, input_base=2) == \"42\") assert(base_conv(43, input_base=10,",
"range(size): baseten += numbers.index(n[i]) * input_base ** (size - 1",
"+= numbers.index(n[i]) * input_base ** (size - 1 - i)",
"-= coeff * (output_base ** (max_power - i)) result +=",
"\"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ## base 10 conversion n = str(n) size =",
"in range(max_power + 1): coeff = baseten / (output_base **",
"int if input_base <= 10, and a string otherwise. The",
"assert(base_conv(\"154673563503165277\", input_base=8, output_base=10) == \"3830404793297599\") assert(base_conv(0, input_base=3, output_base=50) == \"0\")",
"\"\" for i in range(max_power + 1): coeff = baseten",
"result = \"\" for i in range(max_power + 1): coeff",
"0 for i in range(size): baseten += numbers.index(n[i]) * input_base",
"assert(base_conv(42) == \"42\") assert(base_conv(5673576) == \"5673576\") assert(base_conv(10, input_base=2) == \"2\")",
"assert(base_conv(101010, input_base=2) == \"42\") assert(base_conv(43, input_base=10, output_base=2) == \"101011\") assert(base_conv(256**3",
"and a string otherwise. The result will be a string.",
"= 0 while output_base ** (max_power + 1) <= baseten:",
"(max_power - i)) baseten -= coeff * (output_base ** (max_power",
"= len(n) baseten = 0 for i in range(size): baseten",
"i in range(size): baseten += numbers.index(n[i]) * input_base ** (size",
"x max_power = 0 while output_base ** (max_power + 1)",
"i)) baseten -= coeff * (output_base ** (max_power - i))",
"base_conv(n, input_base=10, output_base=10): \"\"\" Converts a number n from base",
"to base output_base. The following symbols are used to represent",
"\"10\") assert(base_conv(42) == \"42\") assert(base_conv(5673576) == \"5673576\") assert(base_conv(10, input_base=2) ==",
"+= numbers[coeff] return result if __name__ == \"__main__\": assert(base_conv(10) ==",
"input_base ** (size - 1 - i) ## base output_base",
"## base output_base conversion # we search the biggest number",
"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ n can be an int if input_base <= 10,",
"assert(base_conv(10) == \"10\") assert(base_conv(42) == \"42\") assert(base_conv(5673576) == \"5673576\") assert(base_conv(10,",
"baseten += numbers.index(n[i]) * input_base ** (size - 1 -",
"10 conversion n = str(n) size = len(n) baseten =",
"m such that n^m < x max_power = 0 while",
"base output_base. The following symbols are used to represent numbers:",
"n from base input_base to base output_base. The following symbols",
"be a string. \"\"\" numbers = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ## base 10",
"baseten = 0 for i in range(size): baseten += numbers.index(n[i])",
"- i)) result += numbers[coeff] return result if __name__ ==",
"(size - 1 - i) ## base output_base conversion #",
"i)) result += numbers[coeff] return result if __name__ == \"__main__\":",
"an int if input_base <= 10, and a string otherwise.",
"from base input_base to base output_base. The following symbols are",
"will be a string. \"\"\" numbers = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ## base",
"- 1, input_base=10, output_base=16) == \"ffffff\") assert(base_conv(\"d9bbb9d0ceabf\", input_base=16, output_base=8) ==",
"assert(base_conv(\"d9bbb9d0ceabf\", input_base=16, output_base=8) == \"154673563503165277\") assert(base_conv(\"154673563503165277\", input_base=8, output_base=10) == \"3830404793297599\")",
"baseten -= coeff * (output_base ** (max_power - i)) result",
"\"\"\" numbers = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" ## base 10 conversion n =",
"+ 1) <= baseten: max_power += 1 result = \"\"",
"coeff = baseten / (output_base ** (max_power - i)) baseten",
"\"__main__\": assert(base_conv(10) == \"10\") assert(base_conv(42) == \"42\") assert(base_conv(5673576) == \"5673576\")",
"input_base=10, output_base=10): \"\"\" Converts a number n from base input_base",
"\"2\") assert(base_conv(101010, input_base=2) == \"42\") assert(base_conv(43, input_base=10, output_base=2) == \"101011\")",
"can be an int if input_base <= 10, and a",
"base output_base conversion # we search the biggest number m",
"< x max_power = 0 while output_base ** (max_power +",
"for i in range(size): baseten += numbers.index(n[i]) * input_base **",
"numbers[coeff] return result if __name__ == \"__main__\": assert(base_conv(10) == \"10\")"
] |
[
"isinstance(ugate, U2Gate): phi, lam = ugate.params qc.rz(lam - np.pi /",
"logging.info( f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)},",
"logging.getLogger(__name__) class RemoveSmallRotations(TransformationPass): \"\"\"Return a circuit with small rotation gates",
"new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) if len(odd) > 0: for node in",
"phi, lam = ugate.params if theta == np.pi/2: # a",
"output dag where ``CX`` was expanded. \"\"\" # Walk through",
"in predecessors): nodes_to_remove.add(successor) for node_to_remove in nodes_to_remove: dag.remove_op_node(node_to_remove) return dag",
"ii in range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs) else: dag.apply_operation_back(Delay(duration), qargs, cargs)",
"in dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for ii,",
"CRZGate, CU1Gate, RZZGate) nodes_to_remove = set() for input_node in (dag.input_map.values()):",
"isinstance(node.op, (CRXGate, CRYGate, CRZGate)): if node.op.is_parameterized(): # for parameterized gates",
"removal \"\"\" if s.type == 'in': return True if s.type",
"lst = list(dag.qubits) for el in partition: for q in",
"for input_node in (dag.input_map.values()): try: successor = next(dag.quantum_successors(input_node)) except StopIteration:",
"if successor.type == \"op\" and isinstance(successor.op, diagonal_2q_gates): predecessors = dag.quantum_predecessors(successor)",
"which require a delay gate \"\"\" partition = layer['partition'] lst",
"reset are also included. \"\"\" def run(self, dag): \"\"\"Run the",
"partition = layer['partition'] if len(partition) == 0: continue lst =",
"elementary rotations Rx, Ry, Rz The U gates are decomposed",
"/ 2, 0) else: # from https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam, 0) qc.rx(np.pi",
"new_dag, [el], []) for q in durations: if max_duration -",
"DAGCircuit: the optimized DAG. \"\"\" diagonal_1q_gates = (RZGate, ZGate, TGate,",
"self.gate_durations = gate_durations self.delay_quantum = delay_quantum def add_delay_to_dag(self, duration, dag,",
"all(valid_predecessor(s) for s in predecessors): nodes_to_remove.add(successor) for node_to_remove in nodes_to_remove:",
"import numpy as np import qiskit from qiskit.circuit import Barrier,",
"u2 gate qc.rz(lam - np.pi / 2, 0) qc.rx(np.pi /",
"q in lst: lst.remove(q) return lst def run(self, dag): new_dag",
"pass else: phi = float(node.op.params[0]) if self.mod2pi: phi = modulo_2pi(phi)",
"decomposition. \"\"\" def __init__(self, verbose=0): \"\"\" Args: \"\"\" super().__init__() self._subdags",
"import DAGCircuit from qiskit.transpiler.basepasses import TransformationPass logger = logging.getLogger(__name__) class",
"if len(even) > 0: for node in even: new_dag.apply_operation_back(node.op, node.qargs,",
"CX into CZ and single qubit rotations \"\"\" def __init__(self,",
"= (CZGate, CRZGate, CU1Gate, RZZGate) nodes_to_remove = set() for input_node",
"else: self.decomposition.h(1) self.decomposition.cz(0, 1) self.decomposition.h(1) self._dag = circuit_to_dag(self.decomposition) def run(self,",
"lst = DelayPass._determine_delay_target_qubits(dag, layer) logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}')",
"= modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag1) elif isinstance(node.op,",
"def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the pass on",
"mode: str = 'ry'): \"\"\" Args: \"\"\" super().__init__() self._subdags: List",
"return dag class RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove diagonal gates (including diagonal 2Q",
"not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in other_gates:",
"Dict, List, Optional import numpy as np import qiskit from",
"gates removed.\"\"\" def __init__(self, epsilon: float = 0, modulo2pi=False): \"\"\"Remove",
"the pass on `dag`. Args: dag: input dag. Returns: Output",
"\"\"\"Run the Decompose pass on `dag`. Args: dag: input dag.",
"Walk through the DAG and expand each node if required",
"If True, then rotations multiples of 2pi are removed as",
"U2Gate, U3Gate, UGate)): subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node, subdag) return dag",
"\"\"\" Args: \"\"\" super().__init__() self._subdags: List = [] self.initial_layout =",
"qiskit.circuit.quantumcircuit import QuantumCircuit from qiskit.converters.circuit_to_dag import circuit_to_dag from qiskit.dagcircuit import",
"self.epsilon = epsilon self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi",
"= float(node.op.params[0]) if self.mod2pi: phi = modulo_2pi(phi) if np.abs(phi) <=",
"and isinstance(successor.op, diagonal_1q_gates): nodes_to_remove.add(successor) def valid_predecessor(s): \"\"\" Return True of",
"layer['graph'].op_nodes(): if len(node.qargs) == 2: gates_2q.append(node) elif len(node.qargs) == 1:",
"between gates such that no two qubit gates are executed",
"circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node, subdag) return dag class DecomposeCX(TransformationPass): \"\"\" Decompose CX",
"pass on `dag`. Args: dag: input dag. Returns: output dag",
"rotation gates removed.\"\"\" def __init__(self, epsilon: float = 0, modulo2pi=False):",
"\"\"\"Run the pass on `dag`. Args: dag: input dag. Returns:",
"set() for input_node in (dag.input_map.values()): try: successor = next(dag.quantum_successors(input_node)) except",
"gates are decomposed using McKay decomposition. \"\"\" def __init__(self, verbose=0):",
"required for node in dag.op_nodes(): if isinstance(node.op, (PhaseGate, U1Gate, U2Gate,",
"small rotation gates removed.\"\"\" def __init__(self, epsilon: float = 0,",
"\"\"\" def modulo_2pi(x): x = float(x) return np.mod(x + np.pi,",
"valid predecessor for removal \"\"\" if s.type == 'in': return",
"are placed between gates such that no two qubit gates",
"= ugate.params qc.rz(lam - np.pi / 2, 0) qc.rx(np.pi /",
"1) else: self.decomposition.h(1) self.decomposition.cz(0, 1) self.decomposition.h(1) self._dag = circuit_to_dag(self.decomposition) def",
"only single qubit gates on non-neighboring qubits can be executed",
"DAG where ``U`` gates have been decomposed. \"\"\" # Walk",
"run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the pass on `dag`.",
"node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for",
"Optional[float] = None): \"\"\" Args: gate_durations: Gate durations in the",
"numpy as np import qiskit from qiskit.circuit import Barrier, Delay,",
"Exception(f'unknown gate type {ugate}') return qc def run(self, dag: DAGCircuit)",
"- durations[q] > 0: self.add_delay_to_dag(max_duration - durations[q], new_dag, [q], [])",
"in dag.op_nodes(): if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)): subdag",
"\"\"\" super().__init__() self.epsilon = epsilon self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2 =",
"(CRXGate, CRYGate, CRZGate, CZGate, PhaseGate, RXGate, RYGate, RZGate, U1Gate, U2Gate,",
"every layer of the circuit it finds the gate that",
"s in predecessors): nodes_to_remove.add(successor) for node_to_remove in nodes_to_remove: dag.remove_op_node(node_to_remove) return",
"int(duration/self.delay_quantum) for ii in range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs) else: dag.apply_operation_back(Delay(duration),",
"else: logger.info('layer {layer_idx}, could not find duration for node {node.name}')",
"True, then rotations multiples of 2pi are removed as well",
"gates into elementary rotations Rx, Ry, Rz The U gates",
"{} for node in layer['graph'].op_nodes(): if node.name in self.gate_durations: max_duration",
"layer) logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}') for el in",
"+ np.pi, 2 * np.pi) - np.pi for node in",
"for qreg in dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg)",
"Returns: Output dag with small rotations removed \"\"\" def modulo_2pi(x):",
"qubits can be executed in parallel. It assumes a linear",
"circuit with small rotation gates removed.\"\"\" def __init__(self, epsilon: float",
"a reset are also included. \"\"\" def run(self, dag): \"\"\"Run",
"DelayPass(TransformationPass): \"\"\"Adds delay gates when the qubits are idle. For",
"\"\"\" def __init__(self, mode: str = 'ry'): \"\"\" Args: \"\"\"",
"= epsilon self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi =",
"return dag class SequentialPass(TransformationPass): \"\"\"Adds barriers between gates to make",
"\"\"\" Args: gate_durations: Gate durations in the units of dt",
"qargs, cargs) else: dag.apply_operation_back(Delay(duration), qargs, cargs) @staticmethod def _determine_delay_target_qubits(dag, layer):",
"np.pi / 2, 0) else: # from https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam, 0)",
"of a circuit. Including diagonal 2Q gates. Nodes after a",
"subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node, subdag) return dag class DecomposeCX(TransformationPass): \"\"\"",
"in dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for layer_idx,",
"parameterized gates we do not optimize pass else: phi =",
"= QuantumCircuit(1) if isinstance(ugate, (U3Gate, UGate)): theta, phi, lam =",
"`dag`. Args: dag: input dag. Returns: output dag where ``CX``",
"of 2pi are removed as well \"\"\" super().__init__() self.epsilon =",
"in gates_1q: if node.qargs[0].index % 2 == 0: even.append(node) else:",
"layer of the circuit it finds the gate that lasts",
"self.mod2pi = modulo2pi def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run",
"== 1: gates_1q.append(node) else: logging.info(f'layer {ii}: other type of node",
"Z, etc) at the start of a circuit. Including diagonal",
"ugate.params if theta == np.pi/2: # a u2 gate qc.rz(lam",
"non-basis node for node in dag.op_nodes(self.gate): dag.substitute_node_with_dag(node, self._dag) return dag",
"diagonal_1q_gates): nodes_to_remove.add(successor) def valid_predecessor(s): \"\"\" Return True of node is",
"new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class LinearTopologyParallelPass(TransformationPass): \"\"\"Adds barriers to",
"/ 2, 0) qc.rz(theta + np.pi, 0) qc.rx(np.pi / 2,",
"in layer['graph'].op_nodes(): if len(node.qargs) == 2: gates_2q.append(node) elif len(node.qargs) ==",
"0) qc.rx(np.pi / 2, 0) qc.rz(theta + np.pi, 0) qc.rx(np.pi",
"in dag.op_nodes(): if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)): if node.op.is_parameterized():",
"2 == 0: even.append(node) else: odd.append(node) logging.info( f'layer {ii}: 2q",
"gates_2q: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits),",
"of dt \"\"\" super().__init__() self.gate_durations = gate_durations self.delay_quantum = delay_quantum",
"in dag.op_nodes(): new_dag.apply_operation_back(node.op, node.qargs, node.cargs) logger.info('SequentialPass: adding node {node.name}') if",
"dag.quantum_predecessors(successor) if all(valid_predecessor(s) for s in predecessors): nodes_to_remove.add(successor) for node_to_remove",
"PhaseGate)): lam, = ugate.params qc.rz(lam, 0) else: raise Exception(f'unknown gate",
"the circuit it finds the gate that lasts the longest",
"[] for node in layer['graph'].op_nodes(): if len(node.qargs) == 2: gates_2q.append(node)",
"Args: \"\"\" super().__init__() self._subdags: List = [] self.initial_layout = None",
"other_gates = [] for node in layer['graph'].op_nodes(): if len(node.qargs) ==",
"= dag.quantum_predecessors(successor) if all(valid_predecessor(s) for s in predecessors): nodes_to_remove.add(successor) for",
"len(partition) == 0: continue lst = DelayPass._determine_delay_target_qubits(dag, layer) logger.info(f'layer: {layer_idx}:",
"\"\"\"Return a circuit with small rotation gates removed.\"\"\" def __init__(self,",
"self.add_delay_to_dag(max_duration, new_dag, [el], []) for q in durations: if max_duration",
"= [] self.verbose = verbose self.initial_layout = None def ugate_replacement_circuit(self,",
"= ugate.params if theta == np.pi/2: # a u2 gate",
"applies appropriate delays on the other qubits. \"\"\" def __init__(self,",
"nodes_to_remove = set() for input_node in (dag.input_map.values()): try: successor =",
"[]) for node in other_gates: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not",
"np.pi for node in dag.op_nodes(): if isinstance(node.op, (PhaseGate, RXGate, RYGate,",
"to remove diagonal gates (like RZ, T, Z, etc) at",
"pass on `dag`. Args: dag (DAGCircuit): the DAG to be",
"using McKay decomposition. \"\"\" def __init__(self, verbose=0): \"\"\" Args: \"\"\"",
"Rx, Ry, Rz The U gates are decomposed using McKay",
"been decomposed. \"\"\" # Walk through the DAG and expand",
"2, 0) else: # from https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam, 0) qc.rx(np.pi /",
"dag: input dag. Returns: output dag where ``CX`` was expanded.",
"TGate, SGate, TdgGate, SdgGate, U1Gate) diagonal_2q_gates = (CZGate, CRZGate, CU1Gate,",
"dag with small rotations removed \"\"\" def modulo_2pi(x): x =",
"DecomposeU(TransformationPass): \"\"\" Decompose U gates into elementary rotations Rx, Ry,",
"node.qargs[0].index % 2 == 0: even.append(node) else: odd.append(node) logging.info( f'layer",
"Ry, Rz The U gates are decomposed using McKay decomposition.",
"successor = next(dag.quantum_successors(input_node)) except StopIteration: continue if successor.type == \"op\"",
"mode == 'ry': self.decomposition.ry(-np.pi / 2, 1) self.decomposition.cz(0, 1) self.decomposition.ry(np.pi",
"node {node}') other_gates.append(node) even = [] odd = [] for",
"the same time and only single qubit gates on non-neighboring",
"0: for node in odd: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not",
"in parallel. It assumes a linear topology.\"\"\" def run(self, dag):",
"<= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag1) elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)): if",
"two qubit gates are executed at the same time and",
"= None def ugate_replacement_circuit(self, ugate): qc = QuantumCircuit(1) if isinstance(ugate,",
"between gates to make the circuit sequential.\"\"\" def run(self, dag):",
"import (CU1Gate, RZZGate, SdgGate, SGate, TdgGate, TGate, ZGate) from qiskit.circuit.quantumcircuit",
"self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi = modulo2pi def run(self, dag: DAGCircuit)",
"\"\"\" super().__init__() self._subdags = [] self.verbose = verbose self.initial_layout =",
"UGate)): theta, phi, lam = ugate.params if theta == np.pi/2:",
"from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate, PhaseGate, RXGate, RYGate,",
"gate that lasts the longest and applies appropriate delays on",
"2, 0) elif isinstance(ugate, (U1Gate, PhaseGate)): lam, = ugate.params qc.rz(lam,",
"input dag. Returns: output dag where ``CX`` was expanded. \"\"\"",
"self.decomposition.cz(0, 1) self.decomposition.h(1) self._dag = circuit_to_dag(self.decomposition) def run(self, dag: DAGCircuit)",
"Barrier, Delay, Reset from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate,",
"np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag1) elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)):",
"Args: dag (DAGCircuit): the DAG to be optimized. Returns: DAGCircuit:",
"2, 0) qc.rz(phi + np.pi / 2, 0) else: #",
"\"\"\" diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate)",
"SequentialPass(TransformationPass): \"\"\"Adds barriers between gates to make the circuit sequential.\"\"\"",
"single qubit gates on non-neighboring qubits can be executed in",
"decomposed using McKay decomposition. \"\"\" def __init__(self, verbose=0): \"\"\" Args:",
"for el in lst: logger.info(f'apply_operation_back: {[el]}') self.add_delay_to_dag(max_duration, new_dag, [el], [])",
"pass to remove diagonal gates (like RZ, T, Z, etc)",
"self._dag) return dag class SequentialPass(TransformationPass): \"\"\"Adds barriers between gates to",
"qiskit.dagcircuit import DAGCircuit from qiskit.transpiler.basepasses import TransformationPass logger = logging.getLogger(__name__)",
"\"\"\"Adds barriers between gates to make the circuit sequential.\"\"\" def",
"even: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits),",
"self.gate_durations[node.name] else: logger.info('layer {layer_idx}, could not find duration for node",
"from typing import Dict, List, Optional import numpy as np",
"/ 2, 0) elif isinstance(ugate, (U1Gate, PhaseGate)): lam, = ugate.params",
"{lst}, durations {durations}') for el in lst: logger.info(f'apply_operation_back: {[el]}') self.add_delay_to_dag(max_duration,",
"dag. Returns: output dag where ``CX`` was expanded. \"\"\" #",
"ugate.params qc.rz(lam, 0) else: raise Exception(f'unknown gate type {ugate}') return",
"(including diagonal 2Q gates) at the start of a circuit.",
"be optimized. Returns: DAGCircuit: the optimized DAG. \"\"\" diagonal_1q_gates =",
"{ii}: 2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}')",
"gates when the qubits are idle. For every layer of",
"self.delay_quantum = delay_quantum def add_delay_to_dag(self, duration, dag, qargs, cargs): if",
"gate \"\"\" partition = layer['partition'] lst = list(dag.qubits) for el",
"rotations removed \"\"\" def modulo_2pi(x): x = float(x) return np.mod(x",
"List = [] self.initial_layout = None self.gate = qiskit.circuit.library.CXGate self.decomposition",
"remove diagonal gates (like RZ, T, Z, etc) at the",
"(PhaseGate, RXGate, RYGate, RZGate)): if node.op.is_parameterized(): # for parameterized gates",
"into CZ and single qubit rotations \"\"\" def __init__(self, mode:",
"RXGate, RYGate, RZGate)): if node.op.is_parameterized(): # for parameterized gates we",
"a delay gate \"\"\" partition = layer['partition'] lst = list(dag.qubits)",
"we do not optimize pass else: phi = float(node.op.params[0]) if",
"qc.rz(phi + np.pi / 2, 0) else: # from https://arxiv.org/pdf/1707.03429.pdf",
"2Q gates. Nodes after a reset are also included. \"\"\"",
"@staticmethod def _determine_delay_target_qubits(dag, layer): \"\"\" Determine qubits in specified layer",
"U3Gate, UGate) from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate, SGate, TdgGate,",
"def add_delay_to_dag(self, duration, dag, qargs, cargs): if self.delay_quantum: number_of_delays =",
"node.qargs, node.cargs) logger.info('SequentialPass: adding node {node.name}') if node.name in ['barrier',",
"0) elif isinstance(ugate, U2Gate): phi, lam = ugate.params qc.rz(lam -",
"not optimize pass else: phi = float(node.op.params[0]) if self.mod2pi: phi",
"circuit sequential.\"\"\" def run(self, dag): new_dag = DAGCircuit() for qreg",
"removed \"\"\" def modulo_2pi(x): x = float(x) return np.mod(x +",
"= None): \"\"\" Args: gate_durations: Gate durations in the units",
"2pi are removed as well \"\"\" super().__init__() self.epsilon = epsilon",
"dag.op_nodes(self.gate): dag.substitute_node_with_dag(node, self._dag) return dag class SequentialPass(TransformationPass): \"\"\"Adds barriers between",
"== 2: gates_2q.append(node) elif len(node.qargs) == 1: gates_1q.append(node) else: logging.info(f'layer",
"delay gate \"\"\" partition = layer['partition'] lst = list(dag.qubits) for",
"DAG to be optimized. Returns: DAGCircuit: the optimized DAG. \"\"\"",
"enforce a linear topology The barrier are placed between gates",
"{ugate}') return qc def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run",
"on `dag`. Args: dag (DAGCircuit): the DAG to be optimized.",
"\"\"\" super().__init__() self._subdags: List = [] self.initial_layout = None self.gate",
"when the qubits are idle. For every layer of the",
"if self.mod2pi: phi = modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node,",
"def __init__(self, verbose=0): \"\"\" Args: \"\"\" super().__init__() self._subdags = []",
"np import qiskit from qiskit.circuit import Barrier, Delay, Reset from",
"lasts the longest and applies appropriate delays on the other",
"= modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag2) return dag",
"= float(x) return np.mod(x + np.pi, 2 * np.pi) -",
"in (dag.input_map.values()): try: successor = next(dag.quantum_successors(input_node)) except StopIteration: continue if",
"`dag`. Args: dag: input DAG. Returns: Output DAG where ``U``",
"if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class",
"at the start of a circuit. Transpiler pass to remove",
"phi = modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag2) return",
"of node is valid predecessor for removal \"\"\" if s.type",
"dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for ii, layer",
"2, 1) else: self.decomposition.h(1) self.decomposition.cz(0, 1) self.decomposition.h(1) self._dag = circuit_to_dag(self.decomposition)",
"dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for layer_idx, layer",
"= (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate) diagonal_2q_gates =",
"len(node.qargs) == 2: gates_2q.append(node) elif len(node.qargs) == 1: gates_1q.append(node) else:",
"{len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}') if len(even) >",
"node.name in self.gate_durations: max_duration = max(max_duration, self.gate_durations[node.name]) for q in",
"np.mod(x + np.pi, 2 * np.pi) - np.pi for node",
"in even: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()),",
"for q in node.qargs: durations[q] = self.gate_durations[node.name] else: logger.info('layer {layer_idx},",
"Args: \"\"\" super().__init__() self._subdags = [] self.verbose = verbose self.initial_layout",
"np.pi / 2, 0) elif isinstance(ugate, (U1Gate, PhaseGate)): lam, =",
"self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag1) elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)): if node.op.is_parameterized():",
"lam = ugate.params qc.rz(lam - np.pi / 2, 0) qc.rx(np.pi",
"diagonal gates (including diagonal 2Q gates) at the start of",
"\"op\" and isinstance(successor.op, diagonal_1q_gates): nodes_to_remove.add(successor) def valid_predecessor(s): \"\"\" Return True",
"node if required for node in dag.op_nodes(): if isinstance(node.op, (PhaseGate,",
"= logging.getLogger(__name__) class RemoveSmallRotations(TransformationPass): \"\"\"Return a circuit with small rotation",
"node_to_remove in nodes_to_remove: dag.remove_op_node(node_to_remove) return dag class DecomposeU(TransformationPass): \"\"\" Decompose",
"2, 0) qc.rz(phi + np.pi, 0) elif isinstance(ugate, U2Gate): phi,",
"time and only single qubit gates on non-neighboring qubits can",
"``CX`` was expanded. \"\"\" # Walk through the DAG and",
"dag.apply_operation_back(Delay(duration), qargs, cargs) @staticmethod def _determine_delay_target_qubits(dag, layer): \"\"\" Determine qubits",
"verbose self.initial_layout = None def ugate_replacement_circuit(self, ugate): qc = QuantumCircuit(1)",
"It assumes a linear topology.\"\"\" def run(self, dag): new_dag =",
"be removed modulo2pi: If True, then rotations multiples of 2pi",
"dag.substitute_node_with_dag(node, self._empty_dag2) return dag class RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove diagonal gates (including",
"DAGCircuit: \"\"\"Run the pass on `dag`. Args: dag: input dag.",
"the optimized DAG. \"\"\" diagonal_1q_gates = (RZGate, ZGate, TGate, SGate,",
"= 0, modulo2pi=False): \"\"\"Remove all small rotations from a circuit",
"in enumerate(dag.layers()): gates_1q = [] gates_2q = [] other_gates =",
"node for node in dag.op_nodes(self.gate): dag.substitute_node_with_dag(node, self._dag) return dag class",
"if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag2) return dag class RemoveDiagonalGatesAfterInput(TransformationPass):",
"\"\"\" def run(self, dag): \"\"\"Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`.",
"dt \"\"\" super().__init__() self.gate_durations = gate_durations self.delay_quantum = delay_quantum def",
"run(self, dag): \"\"\"Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`. Args: dag",
"def ugate_replacement_circuit(self, ugate): qc = QuantumCircuit(1) if isinstance(ugate, (U3Gate, UGate)):",
"in the units of dt \"\"\" super().__init__() self.gate_durations = gate_durations",
"def __init__(self, epsilon: float = 0, modulo2pi=False): \"\"\"Remove all small",
"predecessor for removal \"\"\" if s.type == 'in': return True",
"logging.info(f'layer {ii}: other type of node {node}') other_gates.append(node) even =",
"if node.qargs[0].index % 2 == 0: even.append(node) else: odd.append(node) logging.info(",
"a u2 gate qc.rz(lam - np.pi / 2, 0) qc.rx(np.pi",
"# Walk through the DAG and expand each node if",
"node in dag.op_nodes(): new_dag.apply_operation_back(node.op, node.qargs, node.cargs) logger.info('SequentialPass: adding node {node.name}')",
"return qc def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the",
"continue lst = DelayPass._determine_delay_target_qubits(dag, layer) logger.info(f'layer: {layer_idx}: lst {lst}, durations",
"0: even.append(node) else: odd.append(node) logging.info( f'layer {ii}: 2q gates {len(gates_2q)},",
"run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the Decompose pass on",
"qc = QuantumCircuit(1) if isinstance(ugate, (U3Gate, UGate)): theta, phi, lam",
"= layer['partition'] if len(partition) == 0: continue lst = DelayPass._determine_delay_target_qubits(dag,",
"= circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node, subdag) return dag class DecomposeCX(TransformationPass): \"\"\" Decompose",
"node {node.name}') new_dag.apply_operation_back(node.op, node.qargs, node.cargs) partition = layer['partition'] if len(partition)",
"U2Gate, U3Gate, UGate) from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate, SGate,",
"2, 0) qc.rz(phi + np.pi / 2, 0) elif isinstance(ugate,",
"len(odd) > 0: for node in odd: new_dag.apply_operation_back(node.op, node.qargs, node.cargs)",
"Transpiler pass to remove diagonal gates (like RZ, T, Z,",
"type of node {node}') other_gates.append(node) even = [] odd =",
"[] for node in gates_1q: if node.qargs[0].index % 2 ==",
"ugate.params qc.rz(lam - np.pi / 2, 0) qc.rx(np.pi / 2,",
"are removed as well \"\"\" super().__init__() self.epsilon = epsilon self._empty_dag1",
"lst: lst.remove(q) return lst def run(self, dag): new_dag = DAGCircuit()",
"for parameterized gates we do not optimize pass else: phi",
"start of a circuit. Transpiler pass to remove diagonal gates",
"node.name in ['barrier', 'measure']: continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag",
"[] gates_2q = [] other_gates = [] for node in",
"the longest and applies appropriate delays on the other qubits.",
"executed at the same time and only single qubit gates",
"DAG and expand each node if required for node in",
"gates_1q.append(node) else: logging.info(f'layer {ii}: other type of node {node}') other_gates.append(node)",
"qc.rz(theta + np.pi, 0) qc.rx(np.pi / 2, 0) qc.rz(phi +",
"the units of dt \"\"\" super().__init__() self.gate_durations = gate_durations self.delay_quantum",
"self._empty_dag2) return dag class RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove diagonal gates (including diagonal",
"not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) if len(odd) > 0:",
"optimized DAG. \"\"\" diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate,",
"-> DAGCircuit: \"\"\"Run the Decompose pass on `dag`. Args: dag:",
"> 0: for node in even: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if",
"for node in dag.op_nodes(): new_dag.apply_operation_back(node.op, node.qargs, node.cargs) logger.info('SequentialPass: adding node",
"new_dag class DelayPass(TransformationPass): \"\"\"Adds delay gates when the qubits are",
"1) self.decomposition.h(1) self._dag = circuit_to_dag(self.decomposition) def run(self, dag: DAGCircuit) ->",
"lst {lst}, durations {durations}') for el in lst: logger.info(f'apply_operation_back: {[el]}')",
"where ``U`` gates have been decomposed. \"\"\" # Walk through",
"range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs) else: dag.apply_operation_back(Delay(duration), qargs, cargs) @staticmethod def",
"float(node.op.params[0]) if self.mod2pi: phi = modulo_2pi(phi) if np.abs(phi) <= self.epsilon:",
"then rotations multiples of 2pi are removed as well \"\"\"",
"if theta == np.pi/2: # a u2 gate qc.rz(lam -",
"nodes_to_remove.add(successor) for node_to_remove in nodes_to_remove: dag.remove_op_node(node_to_remove) return dag class DecomposeU(TransformationPass):",
"node in dag.op_nodes(self.gate): dag.substitute_node_with_dag(node, self._dag) return dag class SequentialPass(TransformationPass): \"\"\"Adds",
"same time and only single qubit gates on non-neighboring qubits",
"small rotations removed \"\"\" def modulo_2pi(x): x = float(x) return",
"Decompose pass on `dag`. Args: dag: input DAG. Returns: Output",
"qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate, PhaseGate, RXGate, RYGate, RZGate,",
"\"\"\" Decompose U gates into elementary rotations Rx, Ry, Rz",
"\"\"\"Remove all small rotations from a circuit Args: epsilon: Threshold",
"np.pi / 2, 0) qc.rx(np.pi / 2, 0) qc.rz(phi +",
"layer in enumerate(dag.layers()): gates_1q = [] gates_2q = [] other_gates",
"other_gates: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits),",
"q in durations: if max_duration - durations[q] > 0: self.add_delay_to_dag(max_duration",
"decomposed. \"\"\" # Walk through the DAG and expand each",
"+ np.pi, 0) qc.rx(np.pi / 2, 0) qc.rz(phi + np.pi,",
"= [] self.initial_layout = None self.gate = qiskit.circuit.library.CXGate self.decomposition =",
"{len(odd)}, other {len(other_gates)}') if len(even) > 0: for node in",
"TdgGate, TGate, ZGate) from qiskit.circuit.quantumcircuit import QuantumCircuit from qiskit.converters.circuit_to_dag import",
"# for parameterized gates we do not optimize pass else:",
"from qiskit.dagcircuit import DAGCircuit from qiskit.transpiler.basepasses import TransformationPass logger =",
"0, modulo2pi=False): \"\"\"Remove all small rotations from a circuit Args:",
"isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in gates_2q: new_dag.apply_operation_back(node.op,",
"new_dag.add_creg(creg) for ii, layer in enumerate(dag.layers()): gates_1q = [] gates_2q",
"modulo2pi=False): \"\"\"Remove all small rotations from a circuit Args: epsilon:",
"isinstance(ugate, (U1Gate, PhaseGate)): lam, = ugate.params qc.rz(lam, 0) else: raise",
"True of node is valid predecessor for removal \"\"\" if",
"isinstance(successor.op, diagonal_1q_gates): nodes_to_remove.add(successor) def valid_predecessor(s): \"\"\" Return True of node",
"input dag. Returns: Output dag with small rotations removed \"\"\"",
"for node_to_remove in nodes_to_remove: dag.remove_op_node(node_to_remove) return dag class DecomposeU(TransformationPass): \"\"\"",
"for rotation angle to be removed modulo2pi: If True, then",
"(U3Gate, UGate)): theta, phi, lam = ugate.params if theta ==",
"number_of_delays = int(duration/self.delay_quantum) for ii in range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs)",
"for creg in dag.cregs.values(): new_dag.add_creg(creg) for layer_idx, layer in enumerate(dag.layers()):",
"dag.substitute_node_with_dag(node, subdag) return dag class DecomposeCX(TransformationPass): \"\"\" Decompose CX into",
"import Dict, List, Optional import numpy as np import qiskit",
"appropriate delays on the other qubits. \"\"\" def __init__(self, gate_durations:",
"layer which require a delay gate \"\"\" partition = layer['partition']",
"0) qc.rx(np.pi / 2, 0) qc.rz(phi + np.pi, 0) elif",
"qc.rz(lam, 0) else: raise Exception(f'unknown gate type {ugate}') return qc",
"logger.info('layer {layer_idx}, could not find duration for node {node.name}') new_dag.apply_operation_back(node.op,",
"and isinstance(s.op, Reset): return True return False if successor.type ==",
"= 'ry'): \"\"\" Args: \"\"\" super().__init__() self._subdags: List = []",
"UGate)): subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node, subdag) return dag class DecomposeCX(TransformationPass):",
"if node.op.is_parameterized(): # for parameterized gates we do not optimize",
"gates. Nodes after a reset are also included. \"\"\" def",
"np.pi, 0) elif isinstance(ugate, U2Gate): phi, lam = ugate.params qc.rz(lam",
"modulo2pi: If True, then rotations multiples of 2pi are removed",
"= {} for node in layer['graph'].op_nodes(): if node.name in self.gate_durations:",
"\"\"\" super().__init__() self.gate_durations = gate_durations self.delay_quantum = delay_quantum def add_delay_to_dag(self,",
"[] self.initial_layout = None self.gate = qiskit.circuit.library.CXGate self.decomposition = QuantumCircuit(2)",
"len(even) > 0: for node in even: new_dag.apply_operation_back(node.op, node.qargs, node.cargs)",
"np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag2) return dag class RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove",
"node.qargs, node.cargs) partition = layer['partition'] if len(partition) == 0: continue",
"new_dag.apply_operation_back(node.op, node.qargs, node.cargs) logger.info('SequentialPass: adding node {node.name}') if node.name in",
"nodes_to_remove.add(successor) def valid_predecessor(s): \"\"\" Return True of node is valid",
"DAGCircuit: \"\"\"Run the Decompose pass on `dag`. Args: dag: input",
"2 * np.pi) - np.pi for node in dag.op_nodes(): if",
"else: logging.info(f'layer {ii}: other type of node {node}') other_gates.append(node) even",
"dag.substitute_node_with_dag(node, self._dag) return dag class SequentialPass(TransformationPass): \"\"\"Adds barriers between gates",
"even = [] odd = [] for node in gates_1q:",
"import circuit_to_dag from qiskit.dagcircuit import DAGCircuit from qiskit.transpiler.basepasses import TransformationPass",
"CZGate, PhaseGate, RXGate, RYGate, RZGate, U1Gate, U2Gate, U3Gate, UGate) from",
"CRYGate, CRZGate)): if node.op.is_parameterized(): # for parameterized gates we do",
"0: for node in even: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not",
"if len(node.qargs) == 2: gates_2q.append(node) elif len(node.qargs) == 1: gates_1q.append(node)",
"diagonal gates (like RZ, T, Z, etc) at the start",
"qiskit.converters.circuit_to_dag import circuit_to_dag from qiskit.dagcircuit import DAGCircuit from qiskit.transpiler.basepasses import",
"for node {node.name}') new_dag.apply_operation_back(node.op, node.qargs, node.cargs) partition = layer['partition'] if",
"None def ugate_replacement_circuit(self, ugate): qc = QuantumCircuit(1) if isinstance(ugate, (U3Gate,",
"gates have been decomposed. \"\"\" # Walk through the DAG",
"units of dt \"\"\" super().__init__() self.gate_durations = gate_durations self.delay_quantum =",
"in specified layer which require a delay gate \"\"\" partition",
"qubit gates are executed at the same time and only",
"gates_2q = [] other_gates = [] for node in layer['graph'].op_nodes():",
"gates_1q: if node.qargs[0].index % 2 == 0: even.append(node) else: odd.append(node)",
"= list(dag.qubits) for el in partition: for q in el:",
"== \"op\" and isinstance(successor.op, diagonal_1q_gates): nodes_to_remove.add(successor) def valid_predecessor(s): \"\"\" Return",
"modulo2pi def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the pass",
"None self.gate = qiskit.circuit.library.CXGate self.decomposition = QuantumCircuit(2) if mode ==",
"(PhaseGate, U1Gate, U2Gate, U3Gate, UGate)): subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node, subdag)",
"each node if required for node in dag.op_nodes(): if isinstance(node.op,",
"QuantumCircuit from qiskit.converters.circuit_to_dag import circuit_to_dag from qiskit.dagcircuit import DAGCircuit from",
"diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate) diagonal_2q_gates",
"np.pi/2: # a u2 gate qc.rz(lam - np.pi / 2,",
"self.delay_quantum: number_of_delays = int(duration/self.delay_quantum) for ii in range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum), qargs,",
"cargs) else: dag.apply_operation_back(Delay(duration), qargs, cargs) @staticmethod def _determine_delay_target_qubits(dag, layer): \"\"\"",
"[] other_gates = [] for node in layer['graph'].op_nodes(): if len(node.qargs)",
"node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) if len(odd)",
"the Decompose pass on `dag`. Args: dag: input dag. Returns:",
"/ 2, 1) else: self.decomposition.h(1) self.decomposition.cz(0, 1) self.decomposition.h(1) self._dag =",
"RZZGate, SdgGate, SGate, TdgGate, TGate, ZGate) from qiskit.circuit.quantumcircuit import QuantumCircuit",
"well \"\"\" super().__init__() self.epsilon = epsilon self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2",
"for node in dag.op_nodes(): if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate,",
"float = 0, modulo2pi=False): \"\"\"Remove all small rotations from a",
"str = 'ry'): \"\"\" Args: \"\"\" super().__init__() self._subdags: List =",
"new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in other_gates: new_dag.apply_operation_back(node.op, node.qargs, node.cargs)",
"circuit_to_dag from qiskit.dagcircuit import DAGCircuit from qiskit.transpiler.basepasses import TransformationPass logger",
"/ 2, 0) qc.rz(phi + np.pi / 2, 0) elif",
"gates) at the start of a circuit. Transpiler pass to",
"new_dag.add_creg(creg) for node in dag.op_nodes(): new_dag.apply_operation_back(node.op, node.qargs, node.cargs) logger.info('SequentialPass: adding",
"qubits in specified layer which require a delay gate \"\"\"",
"at the start of a circuit. Including diagonal 2Q gates.",
"in dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for node",
"class RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove diagonal gates (including diagonal 2Q gates) at",
"class DecomposeU(TransformationPass): \"\"\" Decompose U gates into elementary rotations Rx,",
"(DAGCircuit): the DAG to be optimized. Returns: DAGCircuit: the optimized",
"dag. Returns: Output dag with small rotations removed \"\"\" def",
"on non-neighboring qubits can be executed in parallel. It assumes",
"super().__init__() self.gate_durations = gate_durations self.delay_quantum = delay_quantum def add_delay_to_dag(self, duration,",
"list(new_dag.qubits), []) for node in other_gates: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if",
"QuantumCircuit(1) if isinstance(ugate, (U3Gate, UGate)): theta, phi, lam = ugate.params",
"etc) at the start of a circuit. Including diagonal 2Q",
"\"\"\"Run the Decompose pass on `dag`. Args: dag: input DAG.",
"node.qargs: durations[q] = self.gate_durations[node.name] else: logger.info('layer {layer_idx}, could not find",
"Including diagonal 2Q gates. Nodes after a reset are also",
"in other_gates: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()),",
"= next(dag.quantum_successors(input_node)) except StopIteration: continue if successor.type == \"op\" and",
"with small rotation gates removed.\"\"\" def __init__(self, epsilon: float =",
"super().__init__() self._subdags = [] self.verbose = verbose self.initial_layout = None",
"a circuit. Transpiler pass to remove diagonal gates (like RZ,",
"/ 2, 0) qc.rz(phi + np.pi, 0) elif isinstance(ugate, U2Gate):",
"qiskit.transpiler.basepasses import TransformationPass logger = logging.getLogger(__name__) class RemoveSmallRotations(TransformationPass): \"\"\"Return a",
"the qubits are idle. For every layer of the circuit",
"all small rotations from a circuit Args: epsilon: Threshold for",
"if self.delay_quantum: number_of_delays = int(duration/self.delay_quantum) for ii in range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum),",
"= ugate.params qc.rz(lam, 0) else: raise Exception(f'unknown gate type {ugate}')",
"gates are executed at the same time and only single",
"delay gates when the qubits are idle. For every layer",
"from https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam, 0) qc.rx(np.pi / 2, 0) qc.rz(theta +",
"= None self.gate = qiskit.circuit.library.CXGate self.decomposition = QuantumCircuit(2) if mode",
"was expanded. \"\"\" # Walk through the DAG and expand",
"For every layer of the circuit it finds the gate",
"be executed in parallel. It assumes a linear topology.\"\"\" def",
"the DAG and expand each node if required for node",
"cargs): if self.delay_quantum: number_of_delays = int(duration/self.delay_quantum) for ii in range(number_of_delays):",
"in dag.cregs.values(): new_dag.add_creg(creg) for layer_idx, layer in enumerate(dag.layers()): max_duration =",
"RemoveSmallRotations(TransformationPass): \"\"\"Return a circuit with small rotation gates removed.\"\"\" def",
"(U1Gate, PhaseGate)): lam, = ugate.params qc.rz(lam, 0) else: raise Exception(f'unknown",
"other type of node {node}') other_gates.append(node) even = [] odd",
"Reset): return True return False if successor.type == \"op\" and",
"/ 2, 0) qc.rz(phi + np.pi / 2, 0) else:",
"pass on `dag`. Args: dag: input dag. Returns: Output dag",
"{node.name}') if node.name in ['barrier', 'measure']: continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])",
"modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag1) elif isinstance(node.op, (CRXGate,",
"qubit rotations \"\"\" def __init__(self, mode: str = 'ry'): \"\"\"",
"np.pi, 0) qc.rx(np.pi / 2, 0) qc.rz(phi + np.pi, 0)",
"from qiskit.converters.circuit_to_dag import circuit_to_dag from qiskit.dagcircuit import DAGCircuit from qiskit.transpiler.basepasses",
"odd.append(node) logging.info( f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)} odd",
"\"op\" and isinstance(s.op, Reset): return True return False if successor.type",
"= DelayPass._determine_delay_target_qubits(dag, layer) logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}') for",
"{durations}') for el in lst: logger.info(f'apply_operation_back: {[el]}') self.add_delay_to_dag(max_duration, new_dag, [el],",
"and isinstance(successor.op, diagonal_2q_gates): predecessors = dag.quantum_predecessors(successor) if all(valid_predecessor(s) for s",
"f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other",
"max_duration = max(max_duration, self.gate_durations[node.name]) for q in node.qargs: durations[q] =",
"node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return",
"except StopIteration: continue if successor.type == \"op\" and isinstance(successor.op, diagonal_1q_gates):",
"self.decomposition = QuantumCircuit(2) if mode == 'ry': self.decomposition.ry(-np.pi / 2,",
"non-neighboring qubits can be executed in parallel. It assumes a",
"self._subdags: List = [] self.initial_layout = None self.gate = qiskit.circuit.library.CXGate",
"Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class DelayPass(TransformationPass): \"\"\"Adds delay",
"== 0: continue lst = DelayPass._determine_delay_target_qubits(dag, layer) logger.info(f'layer: {layer_idx}: lst",
"from qiskit.circuit import Barrier, Delay, Reset from qiskit.circuit.library import (CRXGate,",
"[] self.verbose = verbose self.initial_layout = None def ugate_replacement_circuit(self, ugate):",
"gates such that no two qubit gates are executed at",
"diagonal 2Q gates. Nodes after a reset are also included.",
"/ 2, 1) self.decomposition.cz(0, 1) self.decomposition.ry(np.pi / 2, 1) else:",
"if q in lst: lst.remove(q) return lst def run(self, dag):",
"logger.info(f'apply_operation_back: {[el]}') self.add_delay_to_dag(max_duration, new_dag, [el], []) for q in durations:",
"topology The barrier are placed between gates such that no",
"def valid_predecessor(s): \"\"\" Return True of node is valid predecessor",
"isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)): subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node,",
"the DAG to be optimized. Returns: DAGCircuit: the optimized DAG.",
"if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)): subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op))",
"else: odd.append(node) logging.info( f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)}",
"after a reset are also included. \"\"\" def run(self, dag):",
"self._subdags = [] self.verbose = verbose self.initial_layout = None def",
"sequential.\"\"\" def run(self, dag): new_dag = DAGCircuit() for qreg in",
"dag: input DAG. Returns: Output DAG where ``U`` gates have",
"(RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate) diagonal_2q_gates = (CZGate,",
"(CRXGate, CRYGate, CRZGate)): if node.op.is_parameterized(): # for parameterized gates we",
"Returns: DAGCircuit: the optimized DAG. \"\"\" diagonal_1q_gates = (RZGate, ZGate,",
"node is valid predecessor for removal \"\"\" if s.type ==",
"new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for node in dag.op_nodes():",
"SdgGate, U1Gate) diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate) nodes_to_remove =",
"and only single qubit gates on non-neighboring qubits can be",
"node in gates_1q: if node.qargs[0].index % 2 == 0: even.append(node)",
"if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in",
"True if s.type == \"op\" and isinstance(s.op, Reset): return True",
"node in other_gates: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier):",
"finds the gate that lasts the longest and applies appropriate",
"if max_duration - durations[q] > 0: self.add_delay_to_dag(max_duration - durations[q], new_dag,",
"qc.rz(lam - np.pi / 2, 0) qc.rx(np.pi / 2, 0)",
"[]) for node in gates_2q: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not",
"qargs, cargs): if self.delay_quantum: number_of_delays = int(duration/self.delay_quantum) for ii in",
"0) qc.rz(phi + np.pi / 2, 0) elif isinstance(ugate, (U1Gate,",
"the Decompose pass on `dag`. Args: dag: input DAG. Returns:",
"== 'ry': self.decomposition.ry(-np.pi / 2, 1) self.decomposition.cz(0, 1) self.decomposition.ry(np.pi /",
"[]) return new_dag class DelayPass(TransformationPass): \"\"\"Adds delay gates when the",
"TdgGate, SdgGate, U1Gate) diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate) nodes_to_remove",
"DAGCircuit() for qreg in dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values():",
"run(self, dag): new_dag = DAGCircuit() for qreg in dag.qregs.values(): new_dag.add_qreg(qreg)",
"multiples of 2pi are removed as well \"\"\" super().__init__() self.epsilon",
"T, Z, etc) at the start of a circuit. Including",
"Args: dag: input dag. Returns: output dag where ``CX`` was",
"self.verbose = verbose self.initial_layout = None def ugate_replacement_circuit(self, ugate): qc",
"for node in gates_2q: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op,",
"RZGate, U1Gate, U2Gate, U3Gate, UGate) from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate,",
"are idle. For every layer of the circuit it finds",
"theta == np.pi/2: # a u2 gate qc.rz(lam - np.pi",
"creg in dag.cregs.values(): new_dag.add_creg(creg) for node in dag.op_nodes(): new_dag.apply_operation_back(node.op, node.qargs,",
"if isinstance(ugate, (U3Gate, UGate)): theta, phi, lam = ugate.params if",
"rotations Rx, Ry, Rz The U gates are decomposed using",
"duration for node {node.name}') new_dag.apply_operation_back(node.op, node.qargs, node.cargs) partition = layer['partition']",
"True return False if successor.type == \"op\" and isinstance(successor.op, diagonal_2q_gates):",
"delay_quantum def add_delay_to_dag(self, duration, dag, qargs, cargs): if self.delay_quantum: number_of_delays",
"from qiskit.circuit.quantumcircuit import QuantumCircuit from qiskit.converters.circuit_to_dag import circuit_to_dag from qiskit.dagcircuit",
"= set() for input_node in (dag.input_map.values()): try: successor = next(dag.quantum_successors(input_node))",
"else: # from https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam, 0) qc.rx(np.pi / 2, 0)",
"DAGCircuit) -> DAGCircuit: \"\"\"Run the pass on `dag`. Args: dag:",
"dag.remove_op_node(node_to_remove) return dag class DecomposeU(TransformationPass): \"\"\" Decompose U gates into",
"in ['barrier', 'measure']: continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class",
"- np.pi for node in dag.op_nodes(): if isinstance(node.op, (PhaseGate, RXGate,",
"gate type {ugate}') return qc def run(self, dag: DAGCircuit) ->",
"{layer_idx}, could not find duration for node {node.name}') new_dag.apply_operation_back(node.op, node.qargs,",
"dag.op_nodes(): if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)): subdag =",
"node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) if",
"`dag`. Args: dag (DAGCircuit): the DAG to be optimized. Returns:",
"CZ and single qubit rotations \"\"\" def __init__(self, mode: str",
"U3Gate, UGate)): subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node, subdag) return dag class",
"to enforce a linear topology The barrier are placed between",
"list(new_dag.qubits), []) if len(odd) > 0: for node in odd:",
"Returns: Output DAG where ``U`` gates have been decomposed. \"\"\"",
"Decompose U gates into elementary rotations Rx, Ry, Rz The",
"[] odd = [] for node in gates_1q: if node.qargs[0].index",
"qubit gates on non-neighboring qubits can be executed in parallel.",
"== 'in': return True if s.type == \"op\" and isinstance(s.op,",
"it finds the gate that lasts the longest and applies",
"= verbose self.initial_layout = None def ugate_replacement_circuit(self, ugate): qc =",
"dag where ``CX`` was expanded. \"\"\" # Walk through the",
"for q in el: if q in lst: lst.remove(q) return",
"return new_dag class DelayPass(TransformationPass): \"\"\"Adds delay gates when the qubits",
"= self.gate_durations[node.name] else: logger.info('layer {layer_idx}, could not find duration for",
"[]) if len(odd) > 0: for node in odd: new_dag.apply_operation_back(node.op,",
"for node in dag.op_nodes(): if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)):",
"PhaseGate, RXGate, RYGate, RZGate, U1Gate, U2Gate, U3Gate, UGate) from qiskit.circuit.library.standard_gates",
"logger.info('SequentialPass: adding node {node.name}') if node.name in ['barrier', 'measure']: continue",
"cargs) @staticmethod def _determine_delay_target_qubits(dag, layer): \"\"\" Determine qubits in specified",
"isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) if len(odd) > 0: for",
"elif isinstance(ugate, (U1Gate, PhaseGate)): lam, = ugate.params qc.rz(lam, 0) else:",
"node in layer['graph'].op_nodes(): if len(node.qargs) == 2: gates_2q.append(node) elif len(node.qargs)",
"self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi = modulo2pi def",
"delay_quantum: Optional[float] = None): \"\"\" Args: gate_durations: Gate durations in",
"> 0: self.add_delay_to_dag(max_duration - durations[q], new_dag, [q], []) return new_dag",
"= gate_durations self.delay_quantum = delay_quantum def add_delay_to_dag(self, duration, dag, qargs,",
"lst def run(self, dag): new_dag = DAGCircuit() for qreg in",
"dag (DAGCircuit): the DAG to be optimized. Returns: DAGCircuit: the",
"import qiskit from qiskit.circuit import Barrier, Delay, Reset from qiskit.circuit.library",
"expand each non-basis node for node in dag.op_nodes(self.gate): dag.substitute_node_with_dag(node, self._dag)",
"odd {len(odd)}, other {len(other_gates)}') if len(even) > 0: for node",
"# Walk through the DAG and expand each non-basis node",
"gate_durations self.delay_quantum = delay_quantum def add_delay_to_dag(self, duration, dag, qargs, cargs):",
"partition: for q in el: if q in lst: lst.remove(q)",
"= max(max_duration, self.gate_durations[node.name]) for q in node.qargs: durations[q] = self.gate_durations[node.name]",
"for creg in dag.cregs.values(): new_dag.add_creg(creg) for ii, layer in enumerate(dag.layers()):",
"list(new_dag.qubits), []) return new_dag class DelayPass(TransformationPass): \"\"\"Adds delay gates when",
"`dag`. Args: dag: input dag. Returns: Output dag with small",
"__init__(self, mode: str = 'ry'): \"\"\" Args: \"\"\" super().__init__() self._subdags:",
"on `dag`. Args: dag: input dag. Returns: Output dag with",
"False if successor.type == \"op\" and isinstance(successor.op, diagonal_2q_gates): predecessors =",
"+ np.pi, 0) elif isinstance(ugate, U2Gate): phi, lam = ugate.params",
"self.mod2pi: phi = modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag2)",
"qc.rz(phi + np.pi, 0) elif isinstance(ugate, U2Gate): phi, lam =",
"dag.cregs.values(): new_dag.add_creg(creg) for node in dag.op_nodes(): new_dag.apply_operation_back(node.op, node.qargs, node.cargs) logger.info('SequentialPass:",
"idle. For every layer of the circuit it finds the",
"\"\"\" partition = layer['partition'] lst = list(dag.qubits) for el in",
"x = float(x) return np.mod(x + np.pi, 2 * np.pi)",
"specified layer which require a delay gate \"\"\" partition =",
"node in dag.op_nodes(): if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)):",
"\"op\" and isinstance(successor.op, diagonal_2q_gates): predecessors = dag.quantum_predecessors(successor) if all(valid_predecessor(s) for",
"through the DAG and expand each node if required for",
"2Q gates) at the start of a circuit. Transpiler pass",
"successor.type == \"op\" and isinstance(successor.op, diagonal_2q_gates): predecessors = dag.quantum_predecessors(successor) if",
"gates (like RZ, T, Z, etc) at the start of",
"self.initial_layout = None def ugate_replacement_circuit(self, ugate): qc = QuantumCircuit(1) if",
"__init__(self, epsilon: float = 0, modulo2pi=False): \"\"\"Remove all small rotations",
"for removal \"\"\" if s.type == 'in': return True if",
"in nodes_to_remove: dag.remove_op_node(node_to_remove) return dag class DecomposeU(TransformationPass): \"\"\" Decompose U",
"qc def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the Decompose",
"float], delay_quantum: Optional[float] = None): \"\"\" Args: gate_durations: Gate durations",
"gates_2q.append(node) elif len(node.qargs) == 1: gates_1q.append(node) else: logging.info(f'layer {ii}: other",
"2, 0) qc.rx(np.pi / 2, 0) qc.rz(phi + np.pi /",
"Rz The U gates are decomposed using McKay decomposition. \"\"\"",
"CRZGate, CZGate, PhaseGate, RXGate, RYGate, RZGate, U1Gate, U2Gate, U3Gate, UGate)",
"in odd: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()),",
"def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] = None): \"\"\"",
"dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for node in",
"Dict[str, float], delay_quantum: Optional[float] = None): \"\"\" Args: gate_durations: Gate",
"TGate, ZGate) from qiskit.circuit.quantumcircuit import QuantumCircuit from qiskit.converters.circuit_to_dag import circuit_to_dag",
"in self.gate_durations: max_duration = max(max_duration, self.gate_durations[node.name]) for q in node.qargs:",
"where ``CX`` was expanded. \"\"\" # Walk through the DAG",
"phi, lam = ugate.params qc.rz(lam - np.pi / 2, 0)",
"of node {node}') other_gates.append(node) even = [] odd = []",
"RYGate, RZGate)): if node.op.is_parameterized(): # for parameterized gates we do",
"= modulo2pi def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the",
"dag.cregs.values(): new_dag.add_creg(creg) for ii, layer in enumerate(dag.layers()): gates_1q = []",
"[]) for q in durations: if max_duration - durations[q] >",
"Output DAG where ``U`` gates have been decomposed. \"\"\" #",
"DelayPass._determine_delay_target_qubits(dag, layer) logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}') for el",
"pass on `dag`. Args: dag: input DAG. Returns: Output DAG",
"for el in partition: for q in el: if q",
"for s in predecessors): nodes_to_remove.add(successor) for node_to_remove in nodes_to_remove: dag.remove_op_node(node_to_remove)",
"self.decomposition.h(1) self.decomposition.cz(0, 1) self.decomposition.h(1) self._dag = circuit_to_dag(self.decomposition) def run(self, dag:",
"qc.rx(np.pi / 2, 0) qc.rz(phi + np.pi / 2, 0)",
"DAG. Returns: Output DAG where ``U`` gates have been decomposed.",
"2, 1) self.decomposition.cz(0, 1) self.decomposition.ry(np.pi / 2, 1) else: self.decomposition.h(1)",
"self.mod2pi: phi = modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag1)",
"return new_dag class LinearTopologyParallelPass(TransformationPass): \"\"\"Adds barriers to enforce a linear",
"are decomposed using McKay decomposition. \"\"\" def __init__(self, verbose=0): \"\"\"",
"assumes a linear topology.\"\"\" def run(self, dag): new_dag = DAGCircuit()",
"gates on non-neighboring qubits can be executed in parallel. It",
"circuit. Including diagonal 2Q gates. Nodes after a reset are",
"2, 0) qc.rz(theta + np.pi, 0) qc.rx(np.pi / 2, 0)",
"layer_idx, layer in enumerate(dag.layers()): max_duration = 0 durations = {}",
"0: continue lst = DelayPass._determine_delay_target_qubits(dag, layer) logger.info(f'layer: {layer_idx}: lst {lst},",
"def __init__(self, mode: str = 'ry'): \"\"\" Args: \"\"\" super().__init__()",
"(CU1Gate, RZZGate, SdgGate, SGate, TdgGate, TGate, ZGate) from qiskit.circuit.quantumcircuit import",
"return False if successor.type == \"op\" and isinstance(successor.op, diagonal_2q_gates): predecessors",
"gates to make the circuit sequential.\"\"\" def run(self, dag): new_dag",
"circuit_to_dag(self.decomposition) def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the Decompose",
"{ii}: other type of node {node}') other_gates.append(node) even = []",
"[el], []) for q in durations: if max_duration - durations[q]",
"removed.\"\"\" def __init__(self, epsilon: float = 0, modulo2pi=False): \"\"\"Remove all",
"of a circuit. Transpiler pass to remove diagonal gates (like",
"= circuit_to_dag(self.decomposition) def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the",
"at the same time and only single qubit gates on",
"'ry'): \"\"\" Args: \"\"\" super().__init__() self._subdags: List = [] self.initial_layout",
"from qiskit.transpiler.basepasses import TransformationPass logger = logging.getLogger(__name__) class RemoveSmallRotations(TransformationPass): \"\"\"Return",
"rotations multiples of 2pi are removed as well \"\"\" super().__init__()",
"the other qubits. \"\"\" def __init__(self, gate_durations: Dict[str, float], delay_quantum:",
"durations = {} for node in layer['graph'].op_nodes(): if node.name in",
"in lst: lst.remove(q) return lst def run(self, dag): new_dag =",
"\"\"\"Adds barriers to enforce a linear topology The barrier are",
"2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}') if",
"epsilon: float = 0, modulo2pi=False): \"\"\"Remove all small rotations from",
"Optional import numpy as np import qiskit from qiskit.circuit import",
"on the other qubits. \"\"\" def __init__(self, gate_durations: Dict[str, float],",
"0 durations = {} for node in layer['graph'].op_nodes(): if node.name",
"Nodes after a reset are also included. \"\"\" def run(self,",
"1) self.decomposition.ry(np.pi / 2, 1) else: self.decomposition.h(1) self.decomposition.cz(0, 1) self.decomposition.h(1)",
"circuit. Transpiler pass to remove diagonal gates (like RZ, T,",
"self.gate = qiskit.circuit.library.CXGate self.decomposition = QuantumCircuit(2) if mode == 'ry':",
"dag.substitute_node_with_dag(node, self._empty_dag1) elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)): if node.op.is_parameterized(): #",
"qubits. \"\"\" def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] =",
"= delay_quantum def add_delay_to_dag(self, duration, dag, qargs, cargs): if self.delay_quantum:",
"\"\"\" Decompose CX into CZ and single qubit rotations \"\"\"",
"return lst def run(self, dag): new_dag = DAGCircuit() for qreg",
"enumerate(dag.layers()): max_duration = 0 durations = {} for node in",
"for node in layer['graph'].op_nodes(): if len(node.qargs) == 2: gates_2q.append(node) elif",
"in dag.cregs.values(): new_dag.add_creg(creg) for ii, layer in enumerate(dag.layers()): gates_1q =",
"U1Gate) diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate) nodes_to_remove = set()",
"U2Gate): phi, lam = ugate.params qc.rz(lam - np.pi / 2,",
"{node}') other_gates.append(node) even = [] odd = [] for node",
"https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam, 0) qc.rx(np.pi / 2, 0) qc.rz(theta + np.pi,",
"isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)): if node.op.is_parameterized(): # for parameterized",
"elif isinstance(ugate, U2Gate): phi, lam = ugate.params qc.rz(lam - np.pi",
"predecessors = dag.quantum_predecessors(successor) if all(valid_predecessor(s) for s in predecessors): nodes_to_remove.add(successor)",
"barriers between gates to make the circuit sequential.\"\"\" def run(self,",
"can be executed in parallel. It assumes a linear topology.\"\"\"",
"CU1Gate, RZZGate) nodes_to_remove = set() for input_node in (dag.input_map.values()): try:",
"node in gates_2q: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier):",
"= [] odd = [] for node in gates_1q: if",
"in layer['graph'].op_nodes(): if node.name in self.gate_durations: max_duration = max(max_duration, self.gate_durations[node.name])",
"dag): new_dag = DAGCircuit() for qreg in dag.qregs.values(): new_dag.add_qreg(qreg) for",
"in enumerate(dag.layers()): max_duration = 0 durations = {} for node",
"if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)): if node.op.is_parameterized(): # for",
"as well \"\"\" super().__init__() self.epsilon = epsilon self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1))",
"+ np.pi / 2, 0) else: # from https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam,",
"diagonal 2Q gates) at the start of a circuit. Transpiler",
"nodes_to_remove: dag.remove_op_node(node_to_remove) return dag class DecomposeU(TransformationPass): \"\"\" Decompose U gates",
"\"\"\" def __init__(self, verbose=0): \"\"\" Args: \"\"\" super().__init__() self._subdags =",
"Walk through the DAG and expand each non-basis node for",
"node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag",
"np.pi) - np.pi for node in dag.op_nodes(): if isinstance(node.op, (PhaseGate,",
"<= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag2) return dag class RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove diagonal",
"continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class LinearTopologyParallelPass(TransformationPass): \"\"\"Adds barriers",
"_determine_delay_target_qubits(dag, layer): \"\"\" Determine qubits in specified layer which require",
"creg in dag.cregs.values(): new_dag.add_creg(creg) for layer_idx, layer in enumerate(dag.layers()): max_duration",
"layer in enumerate(dag.layers()): max_duration = 0 durations = {} for",
"could not find duration for node {node.name}') new_dag.apply_operation_back(node.op, node.qargs, node.cargs)",
"import QuantumCircuit from qiskit.converters.circuit_to_dag import circuit_to_dag from qiskit.dagcircuit import DAGCircuit",
"dag class DecomposeU(TransformationPass): \"\"\" Decompose U gates into elementary rotations",
"elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)): if node.op.is_parameterized(): # for parameterized",
"import TransformationPass logger = logging.getLogger(__name__) class RemoveSmallRotations(TransformationPass): \"\"\"Return a circuit",
"lam, = ugate.params qc.rz(lam, 0) else: raise Exception(f'unknown gate type",
"gate qc.rz(lam - np.pi / 2, 0) qc.rx(np.pi / 2,",
"require a delay gate \"\"\" partition = layer['partition'] lst =",
"__init__(self, verbose=0): \"\"\" Args: \"\"\" super().__init__() self._subdags = [] self.verbose",
"SGate, TdgGate, SdgGate, U1Gate) diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate)",
"dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the Decompose pass on `dag`.",
"not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class DelayPass(TransformationPass):",
"Reset from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate, PhaseGate, RXGate,",
"as np import qiskit from qiskit.circuit import Barrier, Delay, Reset",
"phi = modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag1) elif",
"for layer_idx, layer in enumerate(dag.layers()): max_duration = 0 durations =",
"dag: input dag. Returns: Output dag with small rotations removed",
"isinstance(s.op, Reset): return True return False if successor.type == \"op\"",
"layer['partition'] if len(partition) == 0: continue lst = DelayPass._determine_delay_target_qubits(dag, layer)",
"other {len(other_gates)}') if len(even) > 0: for node in even:",
"1) self.decomposition.cz(0, 1) self.decomposition.ry(np.pi / 2, 1) else: self.decomposition.h(1) self.decomposition.cz(0,",
"predecessors): nodes_to_remove.add(successor) for node_to_remove in nodes_to_remove: dag.remove_op_node(node_to_remove) return dag class",
"are also included. \"\"\" def run(self, dag): \"\"\"Run the RemoveDiagonalGatesBeforeMeasure",
"\"\"\" # Walk through the DAG and expand each non-basis",
"dag class SequentialPass(TransformationPass): \"\"\"Adds barriers between gates to make the",
"even.append(node) else: odd.append(node) logging.info( f'layer {ii}: 2q gates {len(gates_2q)}, even",
"dag, qargs, cargs): if self.delay_quantum: number_of_delays = int(duration/self.delay_quantum) for ii",
"make the circuit sequential.\"\"\" def run(self, dag): new_dag = DAGCircuit()",
"qc.rx(np.pi / 2, 0) qc.rz(theta + np.pi, 0) qc.rx(np.pi /",
"else: phi = float(node.op.params[0]) if self.mod2pi: phi = modulo_2pi(phi) if",
"isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in other_gates: new_dag.apply_operation_back(node.op,",
"return dag class DecomposeU(TransformationPass): \"\"\" Decompose U gates into elementary",
"dag class RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove diagonal gates (including diagonal 2Q gates)",
"is valid predecessor for removal \"\"\" if s.type == 'in':",
"``U`` gates have been decomposed. \"\"\" # Walk through the",
"of the circuit it finds the gate that lasts the",
"in el: if q in lst: lst.remove(q) return lst def",
"qiskit.circuit.library.CXGate self.decomposition = QuantumCircuit(2) if mode == 'ry': self.decomposition.ry(-np.pi /",
"small rotations from a circuit Args: epsilon: Threshold for rotation",
"topology.\"\"\" def run(self, dag): new_dag = DAGCircuit() for qreg in",
"* np.pi) - np.pi for node in dag.op_nodes(): if isinstance(node.op,",
"type {ugate}') return qc def run(self, dag: DAGCircuit) -> DAGCircuit:",
"node in odd: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier):",
"not find duration for node {node.name}') new_dag.apply_operation_back(node.op, node.qargs, node.cargs) partition",
"\"\"\"Remove diagonal gates (including diagonal 2Q gates) at the start",
"durations: if max_duration - durations[q] > 0: self.add_delay_to_dag(max_duration - durations[q],",
"q in el: if q in lst: lst.remove(q) return lst",
"{[el]}') self.add_delay_to_dag(max_duration, new_dag, [el], []) for q in durations: if",
"add_delay_to_dag(self, duration, dag, qargs, cargs): if self.delay_quantum: number_of_delays = int(duration/self.delay_quantum)",
"qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi = modulo2pi def run(self, dag:",
"/ 2, 0) qc.rx(np.pi / 2, 0) qc.rz(phi + np.pi",
"to make the circuit sequential.\"\"\" def run(self, dag): new_dag =",
"Return True of node is valid predecessor for removal \"\"\"",
"if len(odd) > 0: for node in odd: new_dag.apply_operation_back(node.op, node.qargs,",
"U1Gate, U2Gate, U3Gate, UGate)): subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op)) dag.substitute_node_with_dag(node, subdag) return",
"creg in dag.cregs.values(): new_dag.add_creg(creg) for ii, layer in enumerate(dag.layers()): gates_1q",
"class DecomposeCX(TransformationPass): \"\"\" Decompose CX into CZ and single qubit",
"McKay decomposition. \"\"\" def __init__(self, verbose=0): \"\"\" Args: \"\"\" super().__init__()",
"el in lst: logger.info(f'apply_operation_back: {[el]}') self.add_delay_to_dag(max_duration, new_dag, [el], []) for",
"odd: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits),",
"do not optimize pass else: phi = float(node.op.params[0]) if self.mod2pi:",
"if len(partition) == 0: continue lst = DelayPass._determine_delay_target_qubits(dag, layer) logger.info(f'layer:",
"qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi = modulo2pi def run(self, dag: DAGCircuit) -> DAGCircuit:",
"durations[q] > 0: self.add_delay_to_dag(max_duration - durations[q], new_dag, [q], []) return",
"= DAGCircuit() for qreg in dag.qregs.values(): new_dag.add_qreg(qreg) for creg in",
"odd = [] for node in gates_1q: if node.qargs[0].index %",
"= [] for node in gates_1q: if node.qargs[0].index % 2",
"import Barrier, Delay, Reset from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate,",
"expand each node if required for node in dag.op_nodes(): if",
"adding node {node.name}') if node.name in ['barrier', 'measure']: continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()),",
"qargs, cargs) @staticmethod def _determine_delay_target_qubits(dag, layer): \"\"\" Determine qubits in",
"gate_durations: Gate durations in the units of dt \"\"\" super().__init__()",
"DAGCircuit) -> DAGCircuit: \"\"\"Run the Decompose pass on `dag`. Args:",
"in node.qargs: durations[q] = self.gate_durations[node.name] else: logger.info('layer {layer_idx}, could not",
"durations {durations}') for el in lst: logger.info(f'apply_operation_back: {[el]}') self.add_delay_to_dag(max_duration, new_dag,",
"and single qubit rotations \"\"\" def __init__(self, mode: str =",
"def modulo_2pi(x): x = float(x) return np.mod(x + np.pi, 2",
"qc.rx(np.pi / 2, 0) qc.rz(phi + np.pi, 0) elif isinstance(ugate,",
"not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in gates_2q:",
"DAG. \"\"\" diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate,",
"gates we do not optimize pass else: phi = float(node.op.params[0])",
"= [] other_gates = [] for node in layer['graph'].op_nodes(): if",
"for node in odd: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op,",
"lst.remove(q) return lst def run(self, dag): new_dag = DAGCircuit() for",
"new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])",
"circuit it finds the gate that lasts the longest and",
"The U gates are decomposed using McKay decomposition. \"\"\" def",
"\"\"\" Determine qubits in specified layer which require a delay",
"the RemoveDiagonalGatesBeforeMeasure pass on `dag`. Args: dag (DAGCircuit): the DAG",
"barrier are placed between gates such that no two qubit",
"if s.type == \"op\" and isinstance(s.op, Reset): return True return",
"class LinearTopologyParallelPass(TransformationPass): \"\"\"Adds barriers to enforce a linear topology The",
"node.op.is_parameterized(): # for parameterized gates we do not optimize pass",
"from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate, SGate, TdgGate, TGate, ZGate)",
"'measure']: continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class LinearTopologyParallelPass(TransformationPass): \"\"\"Adds",
"typing import Dict, List, Optional import numpy as np import",
"if all(valid_predecessor(s) for s in predecessors): nodes_to_remove.add(successor) for node_to_remove in",
"isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class DelayPass(TransformationPass): \"\"\"Adds",
"dag): \"\"\"Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`. Args: dag (DAGCircuit):",
"== \"op\" and isinstance(successor.op, diagonal_2q_gates): predecessors = dag.quantum_predecessors(successor) if all(valid_predecessor(s)",
"self.decomposition.cz(0, 1) self.decomposition.ry(np.pi / 2, 1) else: self.decomposition.h(1) self.decomposition.cz(0, 1)",
"if successor.type == \"op\" and isinstance(successor.op, diagonal_1q_gates): nodes_to_remove.add(successor) def valid_predecessor(s):",
"rotation angle to be removed modulo2pi: If True, then rotations",
"start of a circuit. Including diagonal 2Q gates. Nodes after",
"\"\"\" # Walk through the DAG and expand each node",
"for q in durations: if max_duration - durations[q] > 0:",
"self._dag = circuit_to_dag(self.decomposition) def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run",
"> 0: for node in odd: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if",
"= 0 durations = {} for node in layer['graph'].op_nodes(): if",
"0) qc.rz(phi + np.pi / 2, 0) else: # from",
"modulo_2pi(phi) if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag2) return dag class",
"that no two qubit gates are executed at the same",
"import (CRXGate, CRYGate, CRZGate, CZGate, PhaseGate, RXGate, RYGate, RZGate, U1Gate,",
"def run(self, dag): \"\"\"Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`. Args:",
"U gates are decomposed using McKay decomposition. \"\"\" def __init__(self,",
"node.cargs) logger.info('SequentialPass: adding node {node.name}') if node.name in ['barrier', 'measure']:",
"if node.name in self.gate_durations: max_duration = max(max_duration, self.gate_durations[node.name]) for q",
"0) qc.rx(np.pi / 2, 0) qc.rz(phi + np.pi / 2,",
"rotations \"\"\" def __init__(self, mode: str = 'ry'): \"\"\" Args:",
"and expand each non-basis node for node in dag.op_nodes(self.gate): dag.substitute_node_with_dag(node,",
"el: if q in lst: lst.remove(q) return lst def run(self,",
"rotations from a circuit Args: epsilon: Threshold for rotation angle",
"self.decomposition.ry(np.pi / 2, 1) else: self.decomposition.h(1) self.decomposition.cz(0, 1) self.decomposition.h(1) self._dag",
"The barrier are placed between gates such that no two",
"\"\"\"Adds delay gates when the qubits are idle. For every",
"for ii in range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs) else: dag.apply_operation_back(Delay(duration), qargs,",
"RZZGate) nodes_to_remove = set() for input_node in (dag.input_map.values()): try: successor",
"- np.pi / 2, 0) qc.rx(np.pi / 2, 0) qc.rz(phi",
"angle to be removed modulo2pi: If True, then rotations multiples",
"return np.mod(x + np.pi, 2 * np.pi) - np.pi for",
"'in': return True if s.type == \"op\" and isinstance(s.op, Reset):",
"\"\"\" Args: \"\"\" super().__init__() self._subdags = [] self.verbose = verbose",
"have been decomposed. \"\"\" # Walk through the DAG and",
"are executed at the same time and only single qubit",
"= [] gates_2q = [] other_gates = [] for node",
"that lasts the longest and applies appropriate delays on the",
"lam = ugate.params if theta == np.pi/2: # a u2",
"in durations: if max_duration - durations[q] > 0: self.add_delay_to_dag(max_duration -",
"Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in gates_2q: new_dag.apply_operation_back(node.op, node.qargs,",
"try: successor = next(dag.quantum_successors(input_node)) except StopIteration: continue if successor.type ==",
"List, Optional import numpy as np import qiskit from qiskit.circuit",
"isinstance(successor.op, diagonal_2q_gates): predecessors = dag.quantum_predecessors(successor) if all(valid_predecessor(s) for s in",
"other_gates.append(node) even = [] odd = [] for node in",
"s.type == \"op\" and isinstance(s.op, Reset): return True return False",
"epsilon self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi = modulo2pi",
"next(dag.quantum_successors(input_node)) except StopIteration: continue if successor.type == \"op\" and isinstance(successor.op,",
"layer['graph'].op_nodes(): if node.name in self.gate_durations: max_duration = max(max_duration, self.gate_durations[node.name]) for",
"a linear topology The barrier are placed between gates such",
"RemoveDiagonalGatesBeforeMeasure pass on `dag`. Args: dag (DAGCircuit): the DAG to",
"barriers to enforce a linear topology The barrier are placed",
"in partition: for q in el: if q in lst:",
"to be removed modulo2pi: If True, then rotations multiples of",
"\"\"\"Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`. Args: dag (DAGCircuit): the",
"gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}') if len(even)",
"gates_1q = [] gates_2q = [] other_gates = [] for",
"float(x) return np.mod(x + np.pi, 2 * np.pi) - np.pi",
"valid_predecessor(s): \"\"\" Return True of node is valid predecessor for",
"None): \"\"\" Args: gate_durations: Gate durations in the units of",
"{node.name}') new_dag.apply_operation_back(node.op, node.qargs, node.cargs) partition = layer['partition'] if len(partition) ==",
"epsilon: Threshold for rotation angle to be removed modulo2pi: If",
"also included. \"\"\" def run(self, dag): \"\"\"Run the RemoveDiagonalGatesBeforeMeasure pass",
"even {len(even)} odd {len(odd)}, other {len(other_gates)}') if len(even) > 0:",
"SdgGate, SGate, TdgGate, TGate, ZGate) from qiskit.circuit.quantumcircuit import QuantumCircuit from",
"phi = float(node.op.params[0]) if self.mod2pi: phi = modulo_2pi(phi) if np.abs(phi)",
"verbose=0): \"\"\" Args: \"\"\" super().__init__() self._subdags = [] self.verbose =",
"node in layer['graph'].op_nodes(): if node.name in self.gate_durations: max_duration = max(max_duration,",
"= QuantumCircuit(2) if mode == 'ry': self.decomposition.ry(-np.pi / 2, 1)",
"parallel. It assumes a linear topology.\"\"\" def run(self, dag): new_dag",
"U gates into elementary rotations Rx, Ry, Rz The U",
"def run(self, dag): new_dag = DAGCircuit() for qreg in dag.qregs.values():",
"[]) return new_dag class LinearTopologyParallelPass(TransformationPass): \"\"\"Adds barriers to enforce a",
"Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in other_gates: new_dag.apply_operation_back(node.op, node.qargs,",
"['barrier', 'measure']: continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class LinearTopologyParallelPass(TransformationPass):",
"optimized. Returns: DAGCircuit: the optimized DAG. \"\"\" diagonal_1q_gates = (RZGate,",
"in gates_2q: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()),",
"qreg in dag.qregs.values(): new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for",
"RXGate, RYGate, RZGate, U1Gate, U2Gate, U3Gate, UGate) from qiskit.circuit.library.standard_gates import",
"self.initial_layout = None self.gate = qiskit.circuit.library.CXGate self.decomposition = QuantumCircuit(2) if",
"__init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] = None): \"\"\" Args:",
"node.cargs) if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node",
"Gate durations in the units of dt \"\"\" super().__init__() self.gate_durations",
"self.decomposition.h(1) self._dag = circuit_to_dag(self.decomposition) def run(self, dag: DAGCircuit) -> DAGCircuit:",
"if s.type == 'in': return True if s.type == \"op\"",
"Output dag with small rotations removed \"\"\" def modulo_2pi(x): x",
"gates (including diagonal 2Q gates) at the start of a",
"'ry': self.decomposition.ry(-np.pi / 2, 1) self.decomposition.cz(0, 1) self.decomposition.ry(np.pi / 2,",
"for ii, layer in enumerate(dag.layers()): gates_1q = [] gates_2q =",
"raise Exception(f'unknown gate type {ugate}') return qc def run(self, dag:",
"placed between gates such that no two qubit gates are",
"the start of a circuit. Including diagonal 2Q gates. Nodes",
"Args: dag: input DAG. Returns: Output DAG where ``U`` gates",
"for node in gates_1q: if node.qargs[0].index % 2 == 0:",
"to be optimized. Returns: DAGCircuit: the optimized DAG. \"\"\" diagonal_1q_gates",
"subdag) return dag class DecomposeCX(TransformationPass): \"\"\" Decompose CX into CZ",
"removed as well \"\"\" super().__init__() self.epsilon = epsilon self._empty_dag1 =",
"in dag.cregs.values(): new_dag.add_creg(creg) for node in dag.op_nodes(): new_dag.apply_operation_back(node.op, node.qargs, node.cargs)",
"TransformationPass logger = logging.getLogger(__name__) class RemoveSmallRotations(TransformationPass): \"\"\"Return a circuit with",
"input DAG. Returns: Output DAG where ``U`` gates have been",
"== 0: even.append(node) else: odd.append(node) logging.info( f'layer {ii}: 2q gates",
"for node in other_gates: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op,",
"longest and applies appropriate delays on the other qubits. \"\"\"",
"other qubits. \"\"\" def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float]",
"def _determine_delay_target_qubits(dag, layer): \"\"\" Determine qubits in specified layer which",
"a circuit with small rotation gates removed.\"\"\" def __init__(self, epsilon:",
"node.cargs) partition = layer['partition'] if len(partition) == 0: continue lst",
"on `dag`. Args: dag: input dag. Returns: output dag where",
"the gate that lasts the longest and applies appropriate delays",
"partition = layer['partition'] lst = list(dag.qubits) for el in partition:",
"CRYGate, CRZGate, CZGate, PhaseGate, RXGate, RYGate, RZGate, U1Gate, U2Gate, U3Gate,",
"isinstance(ugate, (U3Gate, UGate)): theta, phi, lam = ugate.params if theta",
"optimize pass else: phi = float(node.op.params[0]) if self.mod2pi: phi =",
"dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the pass on `dag`. Args:",
"-> DAGCircuit: \"\"\"Run the pass on `dag`. Args: dag: input",
"else: raise Exception(f'unknown gate type {ugate}') return qc def run(self,",
"new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for layer_idx, layer in",
"Args: epsilon: Threshold for rotation angle to be removed modulo2pi:",
"self.decomposition.ry(-np.pi / 2, 1) self.decomposition.cz(0, 1) self.decomposition.ry(np.pi / 2, 1)",
"class DelayPass(TransformationPass): \"\"\"Adds delay gates when the qubits are idle.",
"Determine qubits in specified layer which require a delay gate",
"RZ, T, Z, etc) at the start of a circuit.",
"dag class DecomposeCX(TransformationPass): \"\"\" Decompose CX into CZ and single",
"+ np.pi / 2, 0) elif isinstance(ugate, (U1Gate, PhaseGate)): lam,",
"for node in layer['graph'].op_nodes(): if node.name in self.gate_durations: max_duration =",
"super().__init__() self.epsilon = epsilon self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2))",
"RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove diagonal gates (including diagonal 2Q gates) at the",
"node {node.name}') if node.name in ['barrier', 'measure']: continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits),",
"on `dag`. Args: dag: input DAG. Returns: Output DAG where",
"successor.type == \"op\" and isinstance(successor.op, diagonal_1q_gates): nodes_to_remove.add(successor) def valid_predecessor(s): \"\"\"",
"layer['partition'] lst = list(dag.qubits) for el in partition: for q",
"self.gate_durations[node.name]) for q in node.qargs: durations[q] = self.gate_durations[node.name] else: logger.info('layer",
"np.pi, 2 * np.pi) - np.pi for node in dag.op_nodes():",
"el in partition: for q in el: if q in",
"in lst: logger.info(f'apply_operation_back: {[el]}') self.add_delay_to_dag(max_duration, new_dag, [el], []) for q",
"2: gates_2q.append(node) elif len(node.qargs) == 1: gates_1q.append(node) else: logging.info(f'layer {ii}:",
"dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs) else: dag.apply_operation_back(Delay(duration), qargs, cargs) @staticmethod def _determine_delay_target_qubits(dag,",
"def run(self, dag: DAGCircuit) -> DAGCircuit: \"\"\"Run the Decompose pass",
"== np.pi/2: # a u2 gate qc.rz(lam - np.pi /",
"= qiskit.circuit.library.CXGate self.decomposition = QuantumCircuit(2) if mode == 'ry': self.decomposition.ry(-np.pi",
"theta, phi, lam = ugate.params if theta == np.pi/2: #",
"find duration for node {node.name}') new_dag.apply_operation_back(node.op, node.qargs, node.cargs) partition =",
"# a u2 gate qc.rz(lam - np.pi / 2, 0)",
"a linear topology.\"\"\" def run(self, dag): new_dag = DAGCircuit() for",
"new_dag.add_creg(creg) for layer_idx, layer in enumerate(dag.layers()): max_duration = 0 durations",
"RYGate, RZGate, U1Gate, U2Gate, U3Gate, UGate) from qiskit.circuit.library.standard_gates import (CU1Gate,",
"logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}') for el in lst:",
"logging from typing import Dict, List, Optional import numpy as",
"in dag.op_nodes(self.gate): dag.substitute_node_with_dag(node, self._dag) return dag class SequentialPass(TransformationPass): \"\"\"Adds barriers",
"else: dag.apply_operation_back(Delay(duration), qargs, cargs) @staticmethod def _determine_delay_target_qubits(dag, layer): \"\"\" Determine",
"max_duration - durations[q] > 0: self.add_delay_to_dag(max_duration - durations[q], new_dag, [q],",
"self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag2) return dag class RemoveDiagonalGatesAfterInput(TransformationPass): \"\"\"Remove diagonal gates",
"dag.cregs.values(): new_dag.add_creg(creg) for layer_idx, layer in enumerate(dag.layers()): max_duration = 0",
"for node in dag.op_nodes(self.gate): dag.substitute_node_with_dag(node, self._dag) return dag class SequentialPass(TransformationPass):",
"single qubit rotations \"\"\" def __init__(self, mode: str = 'ry'):",
"a circuit Args: epsilon: Threshold for rotation angle to be",
"{len(even)} odd {len(odd)}, other {len(other_gates)}') if len(even) > 0: for",
"class RemoveSmallRotations(TransformationPass): \"\"\"Return a circuit with small rotation gates removed.\"\"\"",
"QuantumCircuit(2) if mode == 'ry': self.decomposition.ry(-np.pi / 2, 1) self.decomposition.cz(0,",
"SGate, TdgGate, TGate, ZGate) from qiskit.circuit.quantumcircuit import QuantumCircuit from qiskit.converters.circuit_to_dag",
"from a circuit Args: epsilon: Threshold for rotation angle to",
"executed in parallel. It assumes a linear topology.\"\"\" def run(self,",
"qc.rz(lam, 0) qc.rx(np.pi / 2, 0) qc.rz(theta + np.pi, 0)",
"if mode == 'ry': self.decomposition.ry(-np.pi / 2, 1) self.decomposition.cz(0, 1)",
"self._empty_dag1) elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)): if node.op.is_parameterized(): # for",
"== \"op\" and isinstance(s.op, Reset): return True return False if",
"ZGate) from qiskit.circuit.quantumcircuit import QuantumCircuit from qiskit.converters.circuit_to_dag import circuit_to_dag from",
"qiskit from qiskit.circuit import Barrier, Delay, Reset from qiskit.circuit.library import",
"if node.name in ['barrier', 'measure']: continue new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return",
"Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) if len(odd) > 0: for node",
"delays on the other qubits. \"\"\" def __init__(self, gate_durations: Dict[str,",
"{layer_idx}: lst {lst}, durations {durations}') for el in lst: logger.info(f'apply_operation_back:",
"DAGCircuit from qiskit.transpiler.basepasses import TransformationPass logger = logging.getLogger(__name__) class RemoveSmallRotations(TransformationPass):",
"with small rotations removed \"\"\" def modulo_2pi(x): x = float(x)",
"# from https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam, 0) qc.rx(np.pi / 2, 0) qc.rz(theta",
"dag.op_nodes(): if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)): if node.op.is_parameterized(): #",
"Decompose CX into CZ and single qubit rotations \"\"\" def",
"through the DAG and expand each non-basis node for node",
"= [] for node in layer['graph'].op_nodes(): if len(node.qargs) == 2:",
"0) qc.rz(phi + np.pi, 0) elif isinstance(ugate, U2Gate): phi, lam",
"0) else: # from https://arxiv.org/pdf/1707.03429.pdf qc.rz(lam, 0) qc.rx(np.pi / 2,",
"and applies appropriate delays on the other qubits. \"\"\" def",
"dag.op_nodes(): new_dag.apply_operation_back(node.op, node.qargs, node.cargs) logger.info('SequentialPass: adding node {node.name}') if node.name",
"new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) return new_dag class DelayPass(TransformationPass): \"\"\"Adds delay gates",
"ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate) diagonal_2q_gates = (CZGate, CRZGate,",
"removed modulo2pi: If True, then rotations multiples of 2pi are",
"LinearTopologyParallelPass(TransformationPass): \"\"\"Adds barriers to enforce a linear topology The barrier",
"expanded. \"\"\" # Walk through the DAG and expand each",
"max(max_duration, self.gate_durations[node.name]) for q in node.qargs: durations[q] = self.gate_durations[node.name] else:",
"qubits are idle. For every layer of the circuit it",
"each non-basis node for node in dag.op_nodes(self.gate): dag.substitute_node_with_dag(node, self._dag) return",
"Args: gate_durations: Gate durations in the units of dt \"\"\"",
"return True return False if successor.type == \"op\" and isinstance(successor.op,",
"if np.abs(phi) <= self.epsilon: dag.substitute_node_with_dag(node, self._empty_dag1) elif isinstance(node.op, (CRXGate, CRYGate,",
"node in even: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op, Barrier):",
"new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) for node in gates_2q: new_dag.apply_operation_back(node.op, node.qargs, node.cargs)",
"the start of a circuit. Transpiler pass to remove diagonal",
"into elementary rotations Rx, Ry, Rz The U gates are",
"DAG and expand each non-basis node for node in dag.op_nodes(self.gate):",
"self.gate_durations: max_duration = max(max_duration, self.gate_durations[node.name]) for q in node.qargs: durations[q]",
"a circuit. Including diagonal 2Q gates. Nodes after a reset",
"for node in even: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if not isinstance(node.op,",
"U1Gate, U2Gate, U3Gate, UGate) from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate,",
"the DAG and expand each non-basis node for node in",
"new_dag class LinearTopologyParallelPass(TransformationPass): \"\"\"Adds barriers to enforce a linear topology",
"len(node.qargs) == 1: gates_1q.append(node) else: logging.info(f'layer {ii}: other type of",
"1: gates_1q.append(node) else: logging.info(f'layer {ii}: other type of node {node}')",
"Decompose pass on `dag`. Args: dag: input dag. Returns: output",
"input_node in (dag.input_map.values()): try: successor = next(dag.quantum_successors(input_node)) except StopIteration: continue",
"= qiskit.converters.circuit_to_dag(QuantumCircuit(1)) self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi = modulo2pi def run(self,",
"Args: dag: input dag. Returns: Output dag with small rotations",
"return True if s.type == \"op\" and isinstance(s.op, Reset): return",
"Threshold for rotation angle to be removed modulo2pi: If True,",
"no two qubit gates are executed at the same time",
"Returns: output dag where ``CX`` was expanded. \"\"\" # Walk",
"\"\"\" if s.type == 'in': return True if s.type ==",
"max_duration = 0 durations = {} for node in layer['graph'].op_nodes():",
"duration, dag, qargs, cargs): if self.delay_quantum: number_of_delays = int(duration/self.delay_quantum) for",
"list(dag.qubits) for el in partition: for q in el: if",
"elif len(node.qargs) == 1: gates_1q.append(node) else: logging.info(f'layer {ii}: other type",
"for creg in dag.cregs.values(): new_dag.add_creg(creg) for node in dag.op_nodes(): new_dag.apply_operation_back(node.op,",
"diagonal_2q_gates): predecessors = dag.quantum_predecessors(successor) if all(valid_predecessor(s) for s in predecessors):",
"durations[q] = self.gate_durations[node.name] else: logger.info('layer {layer_idx}, could not find duration",
"0) else: raise Exception(f'unknown gate type {ugate}') return qc def",
"qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate, SGate, TdgGate, TGate, ZGate) from",
"qiskit.circuit import Barrier, Delay, Reset from qiskit.circuit.library import (CRXGate, CRYGate,",
"circuit Args: epsilon: Threshold for rotation angle to be removed",
"(like RZ, T, Z, etc) at the start of a",
"\"\"\" Return True of node is valid predecessor for removal",
"such that no two qubit gates are executed at the",
"UGate) from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate, SGate, TdgGate, TGate,",
"qc.rz(phi + np.pi / 2, 0) elif isinstance(ugate, (U1Gate, PhaseGate)):",
"DecomposeCX(TransformationPass): \"\"\" Decompose CX into CZ and single qubit rotations",
"{len(other_gates)}') if len(even) > 0: for node in even: new_dag.apply_operation_back(node.op,",
"0) qc.rz(theta + np.pi, 0) qc.rx(np.pi / 2, 0) qc.rz(phi",
"= layer['partition'] lst = list(dag.qubits) for el in partition: for",
"RZGate)): if node.op.is_parameterized(): # for parameterized gates we do not",
"class SequentialPass(TransformationPass): \"\"\"Adds barriers between gates to make the circuit",
"= int(duration/self.delay_quantum) for ii in range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs) else:",
"the circuit sequential.\"\"\" def run(self, dag): new_dag = DAGCircuit() for",
"import logging from typing import Dict, List, Optional import numpy",
"new_dag.add_qreg(qreg) for creg in dag.cregs.values(): new_dag.add_creg(creg) for ii, layer in",
"0) elif isinstance(ugate, (U1Gate, PhaseGate)): lam, = ugate.params qc.rz(lam, 0)",
"layer): \"\"\" Determine qubits in specified layer which require a",
"q in node.qargs: durations[q] = self.gate_durations[node.name] else: logger.info('layer {layer_idx}, could",
"ii, layer in enumerate(dag.layers()): gates_1q = [] gates_2q = []",
"= qiskit.converters.circuit_to_dag(QuantumCircuit(2)) self.mod2pi = modulo2pi def run(self, dag: DAGCircuit) ->",
"diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate) nodes_to_remove = set() for",
"linear topology.\"\"\" def run(self, dag): new_dag = DAGCircuit() for qreg",
"included. \"\"\" def run(self, dag): \"\"\"Run the RemoveDiagonalGatesBeforeMeasure pass on",
"return dag class DecomposeCX(TransformationPass): \"\"\" Decompose CX into CZ and",
"s.type == 'in': return True if s.type == \"op\" and",
"and expand each node if required for node in dag.op_nodes():",
"StopIteration: continue if successor.type == \"op\" and isinstance(successor.op, diagonal_1q_gates): nodes_to_remove.add(successor)",
"lst: logger.info(f'apply_operation_back: {[el]}') self.add_delay_to_dag(max_duration, new_dag, [el], []) for q in",
"logger = logging.getLogger(__name__) class RemoveSmallRotations(TransformationPass): \"\"\"Return a circuit with small",
"modulo_2pi(x): x = float(x) return np.mod(x + np.pi, 2 *",
"enumerate(dag.layers()): gates_1q = [] gates_2q = [] other_gates = []",
"% 2 == 0: even.append(node) else: odd.append(node) logging.info( f'layer {ii}:",
"durations in the units of dt \"\"\" super().__init__() self.gate_durations =",
"(dag.input_map.values()): try: successor = next(dag.quantum_successors(input_node)) except StopIteration: continue if successor.type",
"new_dag.apply_operation_back(node.op, node.qargs, node.cargs) partition = layer['partition'] if len(partition) == 0:",
"list(new_dag.qubits), []) for node in gates_2q: new_dag.apply_operation_back(node.op, node.qargs, node.cargs) if",
"list(new_dag.qubits), []) return new_dag class LinearTopologyParallelPass(TransformationPass): \"\"\"Adds barriers to enforce",
"ugate): qc = QuantumCircuit(1) if isinstance(ugate, (U3Gate, UGate)): theta, phi,",
"linear topology The barrier are placed between gates such that",
"if not isinstance(node.op, Barrier): new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), []) if len(odd) >",
"CRZGate)): if node.op.is_parameterized(): # for parameterized gates we do not",
"super().__init__() self._subdags: List = [] self.initial_layout = None self.gate =",
"new_dag = DAGCircuit() for qreg in dag.qregs.values(): new_dag.add_qreg(qreg) for creg",
"gate_durations: Dict[str, float], delay_quantum: Optional[float] = None): \"\"\" Args: gate_durations:",
"continue if successor.type == \"op\" and isinstance(successor.op, diagonal_1q_gates): nodes_to_remove.add(successor) def",
"node in dag.op_nodes(): if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)): if",
"ugate_replacement_circuit(self, ugate): qc = QuantumCircuit(1) if isinstance(ugate, (U3Gate, UGate)): theta,",
"\"\"\" def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] = None):",
"Delay, Reset from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate, PhaseGate,",
"(CZGate, CRZGate, CU1Gate, RZZGate) nodes_to_remove = set() for input_node in",
"if required for node in dag.op_nodes(): if isinstance(node.op, (PhaseGate, U1Gate,",
"in range(number_of_delays): dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs) else: dag.apply_operation_back(Delay(duration), qargs, cargs) @staticmethod"
] |
[
"1e-5, #tolerance for the stopping criteria AAtinv=[], #not implemented normU=1,",
"of filters: H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape) #print(H) #print(type(H)) Ht=H.transpose() Hf=lambda x: H@x",
"#whether to print internal steps maxit=1000, #maximum iterations at the",
"the objective function is less than TolVar stopTest == 2",
"of mu0 >= mu >= muf The observation matrix A",
"np.power(muf/mu0,1/MaxIntIter) mu = mu0 Gammat= np.power(TolVar/0.1,1/MaxIntIter) TolVar = 0.1 for",
"sequence of values of mu0 >= mu >= muf The",
"AtAAtb = At( AAtinv(b) ) else: #default AtAAtb = Atb",
"the inner loop x0=[], #initial solution, if not provided, will",
"column is the value of f_mu at every step '''",
"Hft=lambda x: Ht@x HU=lambda x: Hf(U(x)) UtHt=lambda x: Ut(Hft(x)) typemin=''",
"delta<0: raise Exception('Delta must not be negative') if not callable(A):",
"if L2w>0: typemin+=\"aniso\" typemin+='tropic ' if callable(H): typemin+='filtering norm '",
"raise Exception('Delta must not be negative') if not callable(A): #If",
"enforces y = Ax #delta = sqrt(m + 2*sqrt(2*m))*sigma, where",
"len(x0)==0: x0 = AtAAtb if len(H)==0: Hf=identity Hft=identity else: if",
"typemin='' if L1w>0: typemin+=\"iso\" if L2w>0: typemin+=\"aniso\" typemin+='tropic ' if",
"implemented normU=1, #if U is provided, this should be norm(U)",
"Nesterov's algorithm with a decreasing sequence of values of mu0",
"accepts the string 'tv' as input, #in that case, calculates",
"function A=lambda x:np.matmul(A,x) At=lambda x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1)) Atb=At(b) if sig_size==0: sig_size=Atb.shape",
"np.power(TolVar/0.1,1/MaxIntIter) TolVar = 0.1 for i in range(MaxIntIter): mu =",
"residuals=res else: residuals = np.vstack((residuals, res)) return xk.reshape(sig_size) if __name__",
"niter_int + niter if i==0: residuals=res else: residuals = np.vstack((residuals,",
"stopTest == 2 : stop with the l_infinity norm of",
"A=lambda x:np.matmul(A,x) At=lambda x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1)) Atb=At(b) if sig_size==0: sig_size=Atb.shape if",
"Ht=H.transpose() Hf=lambda x: H@x Hft=lambda x: Ht@x HU=lambda x: Hf(U(x))",
"variable #must fit the observations b, i.e. || y -",
"m x 1 array A=identity,At=identity, # measurement matrix and adjoint",
"Atb=At(b) if sig_size==0: sig_size=Atb.shape if callable(AAtinv): AtAAtb = At( AAtinv(b)",
"f_mu at every step ''' import IAFNNesterov import numpy as",
"(anisotropic) and L2(isotropic) norms verbose=0, #whether to print internal steps",
"delta If no filter is provided, solves the L1. Continuation",
"and L2(isotropic) norms verbose=0, #whether to print internal steps maxit=1000,",
"HU=lambda x: Hf(U(x)) UtHt=lambda x: Ut(Hft(x)) typemin='' if L1w>0: typemin+=\"iso\"",
"#tolerance for the stopping criteria AAtinv=[], #not implemented normU=1, #if",
"sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise). L1w=1,L2w=0, #weights of L1 (anisotropic)",
"every step, #second column is the value of f_mu at",
"decreasing sequence of values of mu0 >= mu >= muf",
"will be At(b) U=identity,Ut=identity, #Analysis/Synthesis operators stopTest=1, #stopTest == 1",
"''' Isotropic-Anisotropic Filtering Norm Nesterov Algorithm Solves the filtering norm",
"= niter_int + niter if i==0: residuals=res else: residuals =",
"the xk variable is less than TolVar TolVar = 1e-5,",
"Must be a sparse matrix, a list of filters or",
"projector (non projector not implemented yet) Inputs: IAFNNESTA(b, #Observed data,",
"is less than TolVar TolVar = 1e-5, #tolerance for the",
"if i==0: residuals=res else: residuals = np.vstack((residuals, res)) return xk.reshape(sig_size)",
"be negative') if not callable(A): #If not function A=lambda x:np.matmul(A,x)",
"(either a matrix, function handles) muf=0.0001, #final mu value, smaller",
"string 'tv' as input, #in that case, calculates the tv",
"algorithm with a decreasing sequence of values of mu0 >=",
"calculates the tv norm Outputs: return xk, #estimated x reconstructed",
"if not provided, will be At(b) U=identity,Ut=identity, #Analysis/Synthesis operators stopTest=1,",
"matrix, function handles) muf=0.0001, #final mu value, smaller leads to",
"Minimization; mu = %g\\n\" %(typemin,mu)) xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft)",
"else: if len(AAtinv)>0: AAtinv=lambda x: np.matmul(AAtinv,x) AtAAtb = At( AAtinv(b)",
"observation matrix A must be a projector (non projector not",
"AAtinv=[], #not implemented normU=1, #if U is provided, this should",
"the observations b, i.e. || y - Ax ||_2 <=",
"string tv') else: #list of filters: H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape) #print(H) #print(type(H))",
"x def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]): if delta<0: raise Exception('Delta must",
"of iterations residuals #first column is the residual at every",
"every step ''' import IAFNNesterov import numpy as np from",
"l_infinity norm of difference in the xk variable is less",
"else: #default AtAAtb = Atb AAtinv=identity if len(x0)==0: x0 =",
"be a sparse matrix, a list of filters or the",
"TolVar = 0.1 for i in range(MaxIntIter): mu = mu*Gamma",
"is the value of f_mu at every step ''' import",
"if callable(AAtinv): AtAAtb = At( AAtinv(b) ) else: if len(AAtinv)>0:",
"implemented yet) Inputs: IAFNNESTA(b, #Observed data, a m x 1",
"delta, #l2 error bound. This enforces how close the variable",
"0 Gamma = np.power(muf/mu0,1/MaxIntIter) mu = mu0 Gammat= np.power(TolVar/0.1,1/MaxIntIter) TolVar",
"stopping criteria AAtinv=[], #not implemented normU=1, #if U is provided,",
"residual at every step, #second column is the value of",
"L2w>0: typemin+=\"aniso\" typemin+='tropic ' if callable(H): typemin+='filtering norm ' mu0=0",
"x 1 array A=identity,At=identity, # measurement matrix and adjoint (either",
"TolVar=TolVar*Gammat; if verbose>0: #if k%verbose==0: print(\"\\tBeginning %s Minimization; mu =",
"a projector (non projector not implemented yet) Inputs: IAFNNESTA(b, #Observed",
"hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size) else: print('H not recognized. Must be a",
"H=='tv': hs=[] hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size) else: print('H not recognized. Must",
"verbose=0, #whether to print internal steps maxit=1000, #maximum iterations at",
"import fil2mat def identity(x): return x def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]):",
"np from scipy import sparse import fil2mat def identity(x): return",
"#first column is the residual at every step, #second column",
"list of filters or the string tv') else: #list of",
"column is the residual at every step, #second column is",
"#if k%verbose==0: print(\"\\tBeginning %s Minimization; mu = %g\\n\" %(typemin,mu)) xk,niter_int,res",
"TolVar stopTest == 2 : stop with the l_infinity norm",
"norm(U) H=[],Ht=[]): #filter operations in sparse matrix form #also accepts",
"is the residual at every step, #second column is the",
"= mu0 Gammat= np.power(TolVar/0.1,1/MaxIntIter) TolVar = 0.1 for i in",
"#estimated x reconstructed signal niter, #number of iterations residuals #first",
"provided, will be At(b) U=identity,Ut=identity, #Analysis/Synthesis operators stopTest=1, #stopTest ==",
"U=identity,Ut=identity, #Analysis/Synthesis operators stopTest=1, #stopTest == 1 : stop when",
"the string 'tv' as input, #in that case, calculates the",
"xk variable is less than TolVar TolVar = 1e-5, #tolerance",
"H=[],Ht=[]): #filter operations in sparse matrix form #also accepts the",
"be norm(U) H=[],Ht=[]): #filter operations in sparse matrix form #also",
"muf=0.0001, #final mu value, smaller leads to higher accuracy delta,",
"applying Nesterov's algorithm with a decreasing sequence of values of",
"#If not function A=lambda x:np.matmul(A,x) At=lambda x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1)) Atb=At(b) if",
"|| y - Ax ||_2 <= delta #If delta =",
"if len(AAtinv)>0: AAtinv=lambda x: np.matmul(AAtinv,x) AtAAtb = At( AAtinv(b) )",
"mu value, smaller leads to higher accuracy delta, #l2 error",
"filters: H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape) #print(H) #print(type(H)) Ht=H.transpose() Hf=lambda x: H@x Hft=lambda",
"%g\\n\" %(typemin,mu)) xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug = xk",
"the L1. Continuation is performed by sequentially applying Nesterov's algorithm",
"verbose>0: #if k%verbose==0: print(\"\\tBeginning %s Minimization; mu = %g\\n\" %(typemin,mu))",
"IAFNNESTA(b, #Observed data, a m x 1 array A=identity,At=identity, #",
"enforces how close the variable #must fit the observations b,",
"sequentially applying Nesterov's algorithm with a decreasing sequence of values",
"Atb AAtinv=identity if len(x0)==0: x0 = AtAAtb if len(H)==0: Hf=identity",
"of f_mu at every step ''' import IAFNNesterov import numpy",
"AtAAtb = Atb AAtinv=identity if len(x0)==0: x0 = AtAAtb if",
"Outputs: return xk, #estimated x reconstructed signal niter, #number of",
"L1. Continuation is performed by sequentially applying Nesterov's algorithm with",
": stop with the l_infinity norm of difference in the",
"||_2 <= delta #If delta = 0, enforces y =",
"def identity(x): return x def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]): if delta<0:",
"= 0 Gamma = np.power(muf/mu0,1/MaxIntIter) mu = mu0 Gammat= np.power(TolVar/0.1,1/MaxIntIter)",
"leads to higher accuracy delta, #l2 error bound. This enforces",
"= Atb AAtinv=identity if len(x0)==0: x0 = AtAAtb if len(H)==0:",
"If no filter is provided, solves the L1. Continuation is",
"must not be negative') if not callable(A): #If not function",
"stopTest=1, #stopTest == 1 : stop when the relative change",
"= np.vstack((residuals, res)) return xk.reshape(sig_size) if __name__ == \"__main__\": print(help())",
"the relative change in the objective function is less than",
"is provided, this should be norm(U) H=[],Ht=[]): #filter operations in",
"if len(x0)==0: x0 = AtAAtb if len(H)==0: Hf=identity Hft=identity else:",
"by sequentially applying Nesterov's algorithm with a decreasing sequence of",
"residuals = np.vstack((residuals, res)) return xk.reshape(sig_size) if __name__ == \"__main__\":",
"Gamma = np.power(muf/mu0,1/MaxIntIter) mu = mu0 Gammat= np.power(TolVar/0.1,1/MaxIntIter) TolVar =",
"Hf=lambda x: H@x Hft=lambda x: Ht@x HU=lambda x: Hf(U(x)) UtHt=lambda",
"= 0.1 for i in range(MaxIntIter): mu = mu*Gamma TolVar=TolVar*Gammat;",
") else: if len(AAtinv)>0: AAtinv=lambda x: np.matmul(AAtinv,x) AtAAtb = At(",
"- Ax ||_2 <= delta #If delta = 0, enforces",
"norm minimization + quadratic term problem Nesterov algorithm, with continuation:",
"performed by sequentially applying Nesterov's algorithm with a decreasing sequence",
"the filtering norm minimization + quadratic term problem Nesterov algorithm,",
"of filters or the string tv') else: #list of filters:",
"'tv' as input, #in that case, calculates the tv norm",
"#in that case, calculates the tv norm Outputs: return xk,",
"typemin+=\"iso\" if L2w>0: typemin+=\"aniso\" typemin+='tropic ' if callable(H): typemin+='filtering norm",
"accuracy delta, #l2 error bound. This enforces how close the",
"isinstance(H, str): if H=='tv': hs=[] hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size) else: print('H",
"is performed by sequentially applying Nesterov's algorithm with a decreasing",
"if len(H)==0: Hf=identity Hft=identity else: if not sparse.issparse(H): if isinstance(H,",
"error bound. This enforces how close the variable #must fit",
"identity(x): return x def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]): if delta<0: raise",
"mu = mu0 Gammat= np.power(TolVar/0.1,1/MaxIntIter) TolVar = 0.1 for i",
"Nesterov algorithm, with continuation: argmin_x || iaFN(x) ||_1/2 subjected to",
"import IAFNNesterov import numpy as np from scipy import sparse",
"H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape) #print(H) #print(type(H)) Ht=H.transpose() Hf=lambda x: H@x Hft=lambda x:",
"to print internal steps maxit=1000, #maximum iterations at the inner",
"of difference in the xk variable is less than TolVar",
"the variable #must fit the observations b, i.e. || y",
"#print(H) #print(type(H)) Ht=H.transpose() Hf=lambda x: H@x Hft=lambda x: Ht@x HU=lambda",
"step, #second column is the value of f_mu at every",
"not function A=lambda x:np.matmul(A,x) At=lambda x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1)) Atb=At(b) if sig_size==0:",
"with the l_infinity norm of difference in the xk variable",
"sparse matrix form #also accepts the string 'tv' as input,",
"== 1 : stop when the relative change in the",
"subjected to ||b - Ax||_2^2 < delta If no filter",
"Inputs: IAFNNESTA(b, #Observed data, a m x 1 array A=identity,At=identity,",
"sparse.issparse(H): if isinstance(H, str): if H=='tv': hs=[] hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size)",
"= Ax #delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise). L1w=1,L2w=0,",
"relative change in the objective function is less than TolVar",
"case, calculates the tv norm Outputs: return xk, #estimated x",
"values of mu0 >= mu >= muf The observation matrix",
"%s Minimization; mu = %g\\n\" %(typemin,mu)) xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar =",
"= At( AAtinv(b) ) else: if len(AAtinv)>0: AAtinv=lambda x: np.matmul(AAtinv,x)",
"mu0 Gammat= np.power(TolVar/0.1,1/MaxIntIter) TolVar = 0.1 for i in range(MaxIntIter):",
"hs=[] hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size) else: print('H not recognized. Must be",
"variable is less than TolVar TolVar = 1e-5, #tolerance for",
"niter = 0 Gamma = np.power(muf/mu0,1/MaxIntIter) mu = mu0 Gammat=",
"steps maxit=1000, #maximum iterations at the inner loop x0=[], #initial",
"how close the variable #must fit the observations b, i.e.",
"internal steps maxit=1000, #maximum iterations at the inner loop x0=[],",
"TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug = xk niter = niter_int + niter if",
"if L1w>0: typemin+=\"iso\" if L2w>0: typemin+=\"aniso\" typemin+='tropic ' if callable(H):",
"at every step, #second column is the value of f_mu",
"Gammat= np.power(TolVar/0.1,1/MaxIntIter) TolVar = 0.1 for i in range(MaxIntIter): mu",
"At( AAtinv(b) ) else: if len(AAtinv)>0: AAtinv=lambda x: np.matmul(AAtinv,x) AtAAtb",
"len(H)==0: Hf=identity Hft=identity else: if not sparse.issparse(H): if isinstance(H, str):",
"typemin+='tropic ' if callable(H): typemin+='filtering norm ' mu0=0 if L1w>0:",
"The observation matrix A must be a projector (non projector",
"stop when the relative change in the objective function is",
"H,_,_,_=fil2mat.fil2mat(hs,sig_size) else: print('H not recognized. Must be a sparse matrix,",
"quadratic term problem Nesterov algorithm, with continuation: argmin_x || iaFN(x)",
"xk niter = niter_int + niter if i==0: residuals=res else:",
"not implemented yet) Inputs: IAFNNESTA(b, #Observed data, a m x",
"smaller leads to higher accuracy delta, #l2 error bound. This",
"L1 (anisotropic) and L2(isotropic) norms verbose=0, #whether to print internal",
"that case, calculates the tv norm Outputs: return xk, #estimated",
"minimization + quadratic term problem Nesterov algorithm, with continuation: argmin_x",
"= %g\\n\" %(typemin,mu)) xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug =",
"loop x0=[], #initial solution, if not provided, will be At(b)",
"Nesterov Algorithm Solves the filtering norm minimization + quadratic term",
"as input, #in that case, calculates the tv norm Outputs:",
"i.e. || y - Ax ||_2 <= delta #If delta",
"AtAAtb = At( AAtinv(b) ) else: if len(AAtinv)>0: AAtinv=lambda x:",
"x: Ut(Hft(x)) typemin='' if L1w>0: typemin+=\"iso\" if L2w>0: typemin+=\"aniso\" typemin+='tropic",
"At=lambda x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1)) Atb=At(b) if sig_size==0: sig_size=Atb.shape if callable(AAtinv): AtAAtb",
"i in range(MaxIntIter): mu = mu*Gamma TolVar=TolVar*Gammat; if verbose>0: #if",
"sig_size==0: sig_size=Atb.shape if callable(AAtinv): AtAAtb = At( AAtinv(b) ) else:",
"if not callable(A): #If not function A=lambda x:np.matmul(A,x) At=lambda x:np.matmul(np.transpose(A),x)",
"muf The observation matrix A must be a projector (non",
"1 : stop when the relative change in the objective",
"Ax||_2^2 < delta If no filter is provided, solves the",
"<= delta #If delta = 0, enforces y = Ax",
"sig_size=Atb.shape if callable(AAtinv): AtAAtb = At( AAtinv(b) ) else: if",
"mu0 >= mu >= muf The observation matrix A must",
"not sparse.issparse(H): if isinstance(H, str): if H=='tv': hs=[] hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]]))",
"L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter = 0 Gamma =",
"matrix and adjoint (either a matrix, function handles) muf=0.0001, #final",
"xplug = xk niter = niter_int + niter if i==0:",
"= 1e-5, #tolerance for the stopping criteria AAtinv=[], #not implemented",
"or the string tv') else: #list of filters: H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape)",
"callable(AAtinv): AtAAtb = At( AAtinv(b) ) else: if len(AAtinv)>0: AAtinv=lambda",
"#print(type(H)) Ht=H.transpose() Hf=lambda x: H@x Hft=lambda x: Ht@x HU=lambda x:",
"provided, this should be norm(U) H=[],Ht=[]): #filter operations in sparse",
"sparse import fil2mat def identity(x): return x def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar =",
"= 0, enforces y = Ax #delta = sqrt(m +",
"x: Ht@x HU=lambda x: Hf(U(x)) UtHt=lambda x: Ut(Hft(x)) typemin='' if",
"matrix, a list of filters or the string tv') else:",
"U is provided, this should be norm(U) H=[],Ht=[]): #filter operations",
"not provided, will be At(b) U=identity,Ut=identity, #Analysis/Synthesis operators stopTest=1, #stopTest",
"function is less than TolVar stopTest == 2 : stop",
"#if U is provided, this should be norm(U) H=[],Ht=[]): #filter",
"as np from scipy import sparse import fil2mat def identity(x):",
"print(\"\\tBeginning %s Minimization; mu = %g\\n\" %(typemin,mu)) xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar",
"mu0=0 if L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter = 0",
"||b - Ax||_2^2 < delta If no filter is provided,",
"a decreasing sequence of values of mu0 >= mu >=",
"from scipy import sparse import fil2mat def identity(x): return x",
"mu = %g\\n\" %(typemin,mu)) xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug",
"L2(isotropic) norms verbose=0, #whether to print internal steps maxit=1000, #maximum",
"to ||b - Ax||_2^2 < delta If no filter is",
"term problem Nesterov algorithm, with continuation: argmin_x || iaFN(x) ||_1/2",
"measurement matrix and adjoint (either a matrix, function handles) muf=0.0001,",
"print('H not recognized. Must be a sparse matrix, a list",
"for the stopping criteria AAtinv=[], #not implemented normU=1, #if U",
"observations b, i.e. || y - Ax ||_2 <= delta",
"AAtinv=lambda x: np.matmul(AAtinv,x) AtAAtb = At( AAtinv(b) ) else: #default",
"+ niter if i==0: residuals=res else: residuals = np.vstack((residuals, res))",
"== 2 : stop with the l_infinity norm of difference",
"the stopping criteria AAtinv=[], #not implemented normU=1, #if U is",
"filter is provided, solves the L1. Continuation is performed by",
"IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug = xk niter = niter_int +",
"AAtinv(b) ) else: if len(AAtinv)>0: AAtinv=lambda x: np.matmul(AAtinv,x) AtAAtb =",
"adjoint (either a matrix, function handles) muf=0.0001, #final mu value,",
"xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug = xk niter =",
"x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1)) Atb=At(b) if sig_size==0: sig_size=Atb.shape if callable(AAtinv): AtAAtb =",
"than TolVar TolVar = 1e-5, #tolerance for the stopping criteria",
"Ht@x HU=lambda x: Hf(U(x)) UtHt=lambda x: Ut(Hft(x)) typemin='' if L1w>0:",
"AAtinv=identity if len(x0)==0: x0 = AtAAtb if len(H)==0: Hf=identity Hft=identity",
"return xk, #estimated x reconstructed signal niter, #number of iterations",
"of values of mu0 >= mu >= muf The observation",
"the l_infinity norm of difference in the xk variable is",
"AAtinv(b) ) else: #default AtAAtb = Atb AAtinv=identity if len(x0)==0:",
"x: np.matmul(AAtinv,x) AtAAtb = At( AAtinv(b) ) else: #default AtAAtb",
"A=identity,At=identity, # measurement matrix and adjoint (either a matrix, function",
">= muf The observation matrix A must be a projector",
"a list of filters or the string tv') else: #list",
"typemin+='filtering norm ' mu0=0 if L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2))",
"algorithm, with continuation: argmin_x || iaFN(x) ||_1/2 subjected to ||b",
"close the variable #must fit the observations b, i.e. ||",
"operations in sparse matrix form #also accepts the string 'tv'",
"range(MaxIntIter): mu = mu*Gamma TolVar=TolVar*Gammat; if verbose>0: #if k%verbose==0: print(\"\\tBeginning",
"residuals #first column is the residual at every step, #second",
"where sigma=std(noise). L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms",
"UtHt=lambda x: Ut(Hft(x)) typemin='' if L1w>0: typemin+=\"iso\" if L2w>0: typemin+=\"aniso\"",
"Solves the filtering norm minimization + quadratic term problem Nesterov",
"L1w>0: typemin+=\"iso\" if L2w>0: typemin+=\"aniso\" typemin+='tropic ' if callable(H): typemin+='filtering",
"Exception('Delta must not be negative') if not callable(A): #If not",
"must be a projector (non projector not implemented yet) Inputs:",
"niter, #number of iterations residuals #first column is the residual",
"matrix form #also accepts the string 'tv' as input, #in",
"filtering norm minimization + quadratic term problem Nesterov algorithm, with",
"hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size) else: print('H not recognized. Must be a sparse",
"delta = 0, enforces y = Ax #delta = sqrt(m",
"to higher accuracy delta, #l2 error bound. This enforces how",
"H@x Hft=lambda x: Ht@x HU=lambda x: Hf(U(x)) UtHt=lambda x: Ut(Hft(x))",
"Isotropic-Anisotropic Filtering Norm Nesterov Algorithm Solves the filtering norm minimization",
"xk, #estimated x reconstructed signal niter, #number of iterations residuals",
"#filter operations in sparse matrix form #also accepts the string",
"= IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug = xk niter = niter_int",
"#delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise). L1w=1,L2w=0, #weights of",
"signal niter, #number of iterations residuals #first column is the",
"Filtering Norm Nesterov Algorithm Solves the filtering norm minimization +",
">= mu >= muf The observation matrix A must be",
"bound. This enforces how close the variable #must fit the",
"is less than TolVar stopTest == 2 : stop with",
"#must fit the observations b, i.e. || y - Ax",
"iterations residuals #first column is the residual at every step,",
"if H=='tv': hs=[] hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size) else: print('H not recognized.",
"if not sparse.issparse(H): if isinstance(H, str): if H=='tv': hs=[] hs.append(np.array([[1,-1]]))",
"x reconstructed signal niter, #number of iterations residuals #first column",
"k%verbose==0: print(\"\\tBeginning %s Minimization; mu = %g\\n\" %(typemin,mu)) xk,niter_int,res =",
"2*sqrt(2*m))*sigma, where sigma=std(noise). L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic)",
"not recognized. Must be a sparse matrix, a list of",
"' if callable(H): typemin+='filtering norm ' mu0=0 if L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1))",
"mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter = 0 Gamma = np.power(muf/mu0,1/MaxIntIter)",
"0.1 for i in range(MaxIntIter): mu = mu*Gamma TolVar=TolVar*Gammat; if",
"mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter = 0 Gamma = np.power(muf/mu0,1/MaxIntIter) mu = mu0",
"for i in range(MaxIntIter): mu = mu*Gamma TolVar=TolVar*Gammat; if verbose>0:",
"typemin+=\"aniso\" typemin+='tropic ' if callable(H): typemin+='filtering norm ' mu0=0 if",
"(non projector not implemented yet) Inputs: IAFNNESTA(b, #Observed data, a",
"y = Ax #delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise).",
"L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms verbose=0, #whether",
": stop when the relative change in the objective function",
") else: #default AtAAtb = Atb AAtinv=identity if len(x0)==0: x0",
"than TolVar stopTest == 2 : stop with the l_infinity",
"#default AtAAtb = Atb AAtinv=identity if len(x0)==0: x0 = AtAAtb",
"Hf=identity Hft=identity else: if not sparse.issparse(H): if isinstance(H, str): if",
"iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 < delta If",
"change in the objective function is less than TolVar stopTest",
"reconstructed signal niter, #number of iterations residuals #first column is",
"if L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter = 0 Gamma",
"Continuation is performed by sequentially applying Nesterov's algorithm with a",
"Algorithm Solves the filtering norm minimization + quadratic term problem",
"# measurement matrix and adjoint (either a matrix, function handles)",
"callable(H): typemin+='filtering norm ' mu0=0 if L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if L2w>0:",
"and adjoint (either a matrix, function handles) muf=0.0001, #final mu",
"the value of f_mu at every step ''' import IAFNNesterov",
"#not implemented normU=1, #if U is provided, this should be",
"mu >= muf The observation matrix A must be a",
"is provided, solves the L1. Continuation is performed by sequentially",
"matrix A must be a projector (non projector not implemented",
"AtAAtb if len(H)==0: Hf=identity Hft=identity else: if not sparse.issparse(H): if",
"#Analysis/Synthesis operators stopTest=1, #stopTest == 1 : stop when the",
"L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter = 0 Gamma = np.power(muf/mu0,1/MaxIntIter) mu =",
"= mu*Gamma TolVar=TolVar*Gammat; if verbose>0: #if k%verbose==0: print(\"\\tBeginning %s Minimization;",
"scipy import sparse import fil2mat def identity(x): return x def",
"y - Ax ||_2 <= delta #If delta = 0,",
"x0 = AtAAtb if len(H)==0: Hf=identity Hft=identity else: if not",
"value of f_mu at every step ''' import IAFNNesterov import",
"function handles) muf=0.0001, #final mu value, smaller leads to higher",
"At(b) U=identity,Ut=identity, #Analysis/Synthesis operators stopTest=1, #stopTest == 1 : stop",
"%(typemin,mu)) xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug = xk niter",
"niter = niter_int + niter if i==0: residuals=res else: residuals",
"criteria AAtinv=[], #not implemented normU=1, #if U is provided, this",
"this should be norm(U) H=[],Ht=[]): #filter operations in sparse matrix",
"else: #list of filters: H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape) #print(H) #print(type(H)) Ht=H.transpose() Hf=lambda",
"norm of difference in the xk variable is less than",
"IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]): if delta<0: raise Exception('Delta must not be",
"+ 2*sqrt(2*m))*sigma, where sigma=std(noise). L1w=1,L2w=0, #weights of L1 (anisotropic) and",
"be a projector (non projector not implemented yet) Inputs: IAFNNESTA(b,",
"return x def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]): if delta<0: raise Exception('Delta",
"return ''' Isotropic-Anisotropic Filtering Norm Nesterov Algorithm Solves the filtering",
"should be norm(U) H=[],Ht=[]): #filter operations in sparse matrix form",
"||_1/2 subjected to ||b - Ax||_2^2 < delta If no",
"callable(A): #If not function A=lambda x:np.matmul(A,x) At=lambda x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1)) Atb=At(b)",
"form #also accepts the string 'tv' as input, #in that",
"#print(H.shape) #print(H) #print(type(H)) Ht=H.transpose() Hf=lambda x: H@x Hft=lambda x: Ht@x",
"A must be a projector (non projector not implemented yet)",
"TolVar = 1e-5, #tolerance for the stopping criteria AAtinv=[], #not",
"sparse matrix, a list of filters or the string tv')",
"less than TolVar TolVar = 1e-5, #tolerance for the stopping",
"def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]): if delta<0: raise Exception('Delta must not",
"step ''' import IAFNNesterov import numpy as np from scipy",
"else: print('H not recognized. Must be a sparse matrix, a",
"#l2 error bound. This enforces how close the variable #must",
"= np.power(muf/mu0,1/MaxIntIter) mu = mu0 Gammat= np.power(TolVar/0.1,1/MaxIntIter) TolVar = 0.1",
"recognized. Must be a sparse matrix, a list of filters",
"#maximum iterations at the inner loop x0=[], #initial solution, if",
"import sparse import fil2mat def identity(x): return x def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar",
"#If delta = 0, enforces y = Ax #delta =",
"else: if not sparse.issparse(H): if isinstance(H, str): if H=='tv': hs=[]",
"str): if H=='tv': hs=[] hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size) else: print('H not",
"in range(MaxIntIter): mu = mu*Gamma TolVar=TolVar*Gammat; if verbose>0: #if k%verbose==0:",
"#list of filters: H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape) #print(H) #print(type(H)) Ht=H.transpose() Hf=lambda x:",
"- Ax||_2^2 < delta If no filter is provided, solves",
"of L1 (anisotropic) and L2(isotropic) norms verbose=0, #whether to print",
"problem Nesterov algorithm, with continuation: argmin_x || iaFN(x) ||_1/2 subjected",
"0, enforces y = Ax #delta = sqrt(m + 2*sqrt(2*m))*sigma,",
"= TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft) xplug = xk niter = niter_int + niter",
"operators stopTest=1, #stopTest == 1 : stop when the relative",
"def help(): return ''' Isotropic-Anisotropic Filtering Norm Nesterov Algorithm Solves",
"#second column is the value of f_mu at every step",
"mu*Gamma TolVar=TolVar*Gammat; if verbose>0: #if k%verbose==0: print(\"\\tBeginning %s Minimization; mu",
"at the inner loop x0=[], #initial solution, if not provided,",
"#initial solution, if not provided, will be At(b) U=identity,Ut=identity, #Analysis/Synthesis",
"delta #If delta = 0, enforces y = Ax #delta",
"import numpy as np from scipy import sparse import fil2mat",
"in the xk variable is less than TolVar TolVar =",
"= At( AAtinv(b) ) else: #default AtAAtb = Atb AAtinv=identity",
"At( AAtinv(b) ) else: #default AtAAtb = Atb AAtinv=identity if",
"data, a m x 1 array A=identity,At=identity, # measurement matrix",
"x0=[], #initial solution, if not provided, will be At(b) U=identity,Ut=identity,",
"with a decreasing sequence of values of mu0 >= mu",
"< delta If no filter is provided, solves the L1.",
"if L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter = 0 Gamma = np.power(muf/mu0,1/MaxIntIter) mu",
"= sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise). L1w=1,L2w=0, #weights of L1",
"TolVar TolVar = 1e-5, #tolerance for the stopping criteria AAtinv=[],",
"2 : stop with the l_infinity norm of difference in",
"if sig_size==0: sig_size=Atb.shape if callable(AAtinv): AtAAtb = At( AAtinv(b) )",
"continuation: argmin_x || iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2",
"norm Outputs: return xk, #estimated x reconstructed signal niter, #number",
"print internal steps maxit=1000, #maximum iterations at the inner loop",
"less than TolVar stopTest == 2 : stop with the",
"else: residuals = np.vstack((residuals, res)) return xk.reshape(sig_size) if __name__ ==",
"the tv norm Outputs: return xk, #estimated x reconstructed signal",
"= 1e-5,AAtinv=[],normU=1,H=[]): if delta<0: raise Exception('Delta must not be negative')",
"' mu0=0 if L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter =",
"inner loop x0=[], #initial solution, if not provided, will be",
"This enforces how close the variable #must fit the observations",
"norms verbose=0, #whether to print internal steps maxit=1000, #maximum iterations",
"Norm Nesterov Algorithm Solves the filtering norm minimization + quadratic",
"not be negative') if not callable(A): #If not function A=lambda",
"x: H@x Hft=lambda x: Ht@x HU=lambda x: Hf(U(x)) UtHt=lambda x:",
"difference in the xk variable is less than TolVar TolVar",
"Ax ||_2 <= delta #If delta = 0, enforces y",
"be At(b) U=identity,Ut=identity, #Analysis/Synthesis operators stopTest=1, #stopTest == 1 :",
"1e-5,AAtinv=[],normU=1,H=[]): if delta<0: raise Exception('Delta must not be negative') if",
"Hft=identity else: if not sparse.issparse(H): if isinstance(H, str): if H=='tv':",
"len(AAtinv)>0: AAtinv=lambda x: np.matmul(AAtinv,x) AtAAtb = At( AAtinv(b) ) else:",
"fit the observations b, i.e. || y - Ax ||_2",
"maxit=1000, #maximum iterations at the inner loop x0=[], #initial solution,",
"solution, if not provided, will be At(b) U=identity,Ut=identity, #Analysis/Synthesis operators",
"in the objective function is less than TolVar stopTest ==",
"np.matmul(AAtinv,x) AtAAtb = At( AAtinv(b) ) else: #default AtAAtb =",
"the residual at every step, #second column is the value",
"provided, solves the L1. Continuation is performed by sequentially applying",
"handles) muf=0.0001, #final mu value, smaller leads to higher accuracy",
"#Observed data, a m x 1 array A=identity,At=identity, # measurement",
"if isinstance(H, str): if H=='tv': hs=[] hs.append(np.array([[1,-1]])) hs.append(np.array([[1],[-1]])) H,_,_,_=fil2mat.fil2mat(hs,sig_size) else:",
"x: Hf(U(x)) UtHt=lambda x: Ut(Hft(x)) typemin='' if L1w>0: typemin+=\"iso\" if",
"tv') else: #list of filters: H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape) #print(H) #print(type(H)) Ht=H.transpose()",
"#stopTest == 1 : stop when the relative change in",
"Hf(U(x)) UtHt=lambda x: Ut(Hft(x)) typemin='' if L1w>0: typemin+=\"iso\" if L2w>0:",
"argmin_x || iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 <",
"no filter is provided, solves the L1. Continuation is performed",
"higher accuracy delta, #l2 error bound. This enforces how close",
"= AtAAtb if len(H)==0: Hf=identity Hft=identity else: if not sparse.issparse(H):",
"the string tv') else: #list of filters: H,_,_,_=fil2mat.fil2mat(H,sig_size) #print(H.shape) #print(H)",
"norm ' mu0=0 if L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if L2w>0: mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2)) niter",
"= xk niter = niter_int + niter if i==0: residuals=res",
"if callable(H): typemin+='filtering norm ' mu0=0 if L1w>0: mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1)) if",
"at every step ''' import IAFNNesterov import numpy as np",
"projector not implemented yet) Inputs: IAFNNESTA(b, #Observed data, a m",
"a matrix, function handles) muf=0.0001, #final mu value, smaller leads",
"1 array A=identity,At=identity, # measurement matrix and adjoint (either a",
"filters or the string tv') else: #list of filters: H,_,_,_=fil2mat.fil2mat(H,sig_size)",
"if delta<0: raise Exception('Delta must not be negative') if not",
"input, #in that case, calculates the tv norm Outputs: return",
"when the relative change in the objective function is less",
"tv norm Outputs: return xk, #estimated x reconstructed signal niter,",
"|| iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 < delta",
"not callable(A): #If not function A=lambda x:np.matmul(A,x) At=lambda x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1))",
"objective function is less than TolVar stopTest == 2 :",
"yet) Inputs: IAFNNESTA(b, #Observed data, a m x 1 array",
"b=b.reshape((-1,1)) Atb=At(b) if sig_size==0: sig_size=Atb.shape if callable(AAtinv): AtAAtb = At(",
"#weights of L1 (anisotropic) and L2(isotropic) norms verbose=0, #whether to",
"fil2mat def identity(x): return x def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]): if",
"x:np.matmul(A,x) At=lambda x:np.matmul(np.transpose(A),x) b=b.reshape((-1,1)) Atb=At(b) if sig_size==0: sig_size=Atb.shape if callable(AAtinv):",
"a sparse matrix, a list of filters or the string",
"normU=1, #if U is provided, this should be norm(U) H=[],Ht=[]):",
"Ut(Hft(x)) typemin='' if L1w>0: typemin+=\"iso\" if L2w>0: typemin+=\"aniso\" typemin+='tropic '",
"mu = mu*Gamma TolVar=TolVar*Gammat; if verbose>0: #if k%verbose==0: print(\"\\tBeginning %s",
"IAFNNesterov import numpy as np from scipy import sparse import",
"negative') if not callable(A): #If not function A=lambda x:np.matmul(A,x) At=lambda",
"numpy as np from scipy import sparse import fil2mat def",
"value, smaller leads to higher accuracy delta, #l2 error bound.",
"if verbose>0: #if k%verbose==0: print(\"\\tBeginning %s Minimization; mu = %g\\n\"",
"''' import IAFNNesterov import numpy as np from scipy import",
"in sparse matrix form #also accepts the string 'tv' as",
"help(): return ''' Isotropic-Anisotropic Filtering Norm Nesterov Algorithm Solves the",
"#number of iterations residuals #first column is the residual at",
"iterations at the inner loop x0=[], #initial solution, if not",
"array A=identity,At=identity, # measurement matrix and adjoint (either a matrix,",
"i==0: residuals=res else: residuals = np.vstack((residuals, res)) return xk.reshape(sig_size) if",
"with continuation: argmin_x || iaFN(x) ||_1/2 subjected to ||b -",
"#also accepts the string 'tv' as input, #in that case,",
"solves the L1. Continuation is performed by sequentially applying Nesterov's",
"a m x 1 array A=identity,At=identity, # measurement matrix and",
"#final mu value, smaller leads to higher accuracy delta, #l2",
"stop with the l_infinity norm of difference in the xk",
"Ax #delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise). L1w=1,L2w=0, #weights",
"sigma=std(noise). L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms verbose=0,",
"niter if i==0: residuals=res else: residuals = np.vstack((residuals, res)) return",
"+ quadratic term problem Nesterov algorithm, with continuation: argmin_x ||",
"b, i.e. || y - Ax ||_2 <= delta #If"
] |
[
"return self.family.IF.weightedIntersection(result, docids)[1] def _negate(self, apply_func, *args, **kw): positive =",
"**kw): \"\"\"Hookable by upstream systems\"\"\" pass class RichComparisonMixin(object): # Stolen",
"None or not resolve: for id_ in self.ids: # if",
"self.family.IF.union(not_indexed, indexed) def docids_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.docids()) def apply_intersect(self,",
"used in query representations; __name__ should be set by #",
"\"\"\"See interface IIndexInjection\"\"\" if callable(self.discriminator): value = self.discriminator(obj, _marker) else:",
"..interfaces import ( IResultSet, STABLE, ) @implementer(IResultSet) class ResultSet(object): \"\"\"Implements",
"apply_func, *args, **kw): positive = apply_func(*args, **kw) all = self.docids()",
"str(self), ) def resultset_from_query(self, query, names=None, resolver=None): # default resultset",
"*args, **kw): positive = apply_func(*args, **kw) all = self.docids() if",
"object %s\" % value) if isinstance(value, Broken): raise ValueError(\"Catalog cannot",
"expect self, so this is a plain method. docids =",
"0: return self.family.IF.Set(indexed) elif len(indexed) == 0: return not_indexed indexed",
"does not access \"self\", so it would appear that this",
"# catalog __setitem__ but if it's not, we fall back",
") def resultset_from_query(self, query, names=None, resolver=None): # default resultset factory;",
"not self.__eq__(other) def __gt__(self, other): return not (self.__lt__(other) or self.__eq__(other))",
"Persistent): raise ValueError(\"Catalog cannot index persistent object %s\" % value)",
"0: return all return self.family.IF.difference(all, positive) def qname(self): # used",
"# indexes have no obligation to be able to sort",
"index.sort( self.ids, reverse=reverse, limit=limit, sort_type=sort_type, raise_unsortable=raise_unsortable, ) numids = self.numids",
"or None resolver = self.resolver if resolver is None or",
"= list(ids) self.ids = ids ids = index.sort( self.ids, reverse=reverse,",
"we can't use an intersection function here because # self.ids",
"common behavior\"\"\" family = BTrees.family64 def discriminate(self, obj, default): \"\"\"See",
"a sequence of docids or another resultset. Returns a new",
"raise_unsortable=True ): if sort_type is None: sort_type = self.sort_type ids",
"= itertools.chain([id_], self.ids) return resolver(id_) def one(self, resolve=True): if self.numids",
"in query representations; __name__ should be set by # catalog",
"if self.ids is not a list or a tuple, allow",
"ids = list(ids) self.ids = ids ids = index.sort( self.ids,",
"hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_], self.ids) return resolver(id_) def one(self,",
"override may # expect self, so this is a plain",
"IIndexInjection\"\"\" if callable(self.discriminator): value = self.discriminator(obj, _marker) else: value =",
"of docids or another resultset. Returns a new ResultSet.\"\"\" #",
"return value def reindex_doc(self, docid, obj): \"\"\"See interface IIndexInjection\"\"\" self.unindex_doc(docid)",
"is None or not resolve: return self.ids else: return self._resolve_all(resolver)",
"def reindex_doc(self, docid, obj): \"\"\"See interface IIndexInjection\"\"\" self.unindex_doc(docid) self.index_doc(docid, obj)",
"__init__(self, ids, numids, resolver, sort_type=None): self.ids = ids # only",
"sort_type=None): self.ids = ids # only guaranteed to be iterable,",
"we fall back to a generic # representation return getattr(",
"would appear that this could be # turned into a",
"family = BTrees.family64 def discriminate(self, obj, default): \"\"\"See interface IIndexInjection\"\"\"",
"def one(self, resolve=True): if self.numids == 1: return self.first(resolve=resolve) if",
"turned into a classmeth or staticmethod, subclasses that override may",
"staticmethod, subclasses that override may # expect self, so this",
"implemented\") def __ne__(self, other): return not self.__eq__(other) def __gt__(self, other):",
"docid, obj): \"\"\"See interface IIndexInjection\"\"\" self.unindex_doc(docid) self.index_doc(docid, obj) def indexed_count(self):",
"be set by # catalog __setitem__ but if it's not,",
"a generic # representation return getattr( self, \"__name__\", str(self), )",
"generic # representation return getattr( self, \"__name__\", str(self), ) def",
"not implemented\") def __lt__(self, other): raise NotImplementedError(\"Less than not implemented\")",
"all(self, resolve=True): resolver = self.resolver if resolver is None or",
"isinstance(value, Persistent): raise ValueError(\"Catalog cannot index persistent object %s\" %",
"hasattr(ids, \"__len__\"): # indexes have no obligation to be able",
"resolve: for id_ in self.ids: # if self.ids is not",
"IResultSet, STABLE, ) @implementer(IResultSet) class ResultSet(object): \"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\" family =",
"\"\"\"See IIndexedDocuments\"\"\" return len(self.docids()) def apply_intersect(self, query, docids): \"\"\"Default apply_intersect",
"hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_], self.ids) return id_ else: for",
"query, names=None, resolver=None): # default resultset factory; meant to be",
"not hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_], self.ids) return id_ else:",
"self.ids = itertools.chain([id_], self.ids) return resolver(id_) def one(self, resolve=True): if",
"may be a generator if isinstance(docids, ResultSet): docids = docids.ids",
"appear that this could be # turned into a classmeth",
"__le__(self, other): return self.__eq__(other) or self.__lt__(other) def __ge__(self, other): return",
"self.sort_type = sort_type def __len__(self): return self.numids def sort( self,",
"docids] return self.__class__(filtered_ids, len(filtered_ids), self.resolver) class BaseIndexMixin(object): \"\"\"Mixin class for",
"be iterated after first() is called and allow first() to",
"persistent object %s\" % value) if isinstance(value, Broken): raise ValueError(\"Catalog",
"classmeth or staticmethod, subclasses that override may # expect self,",
"reverse=False, limit=None, sort_type=None, raise_unsortable=True ): if sort_type is None: sort_type",
"_negate(self, apply_func, *args, **kw): positive = apply_func(*args, **kw) all =",
"= ids # only guaranteed to be iterable, not sliceable",
"resolver = self.resolver if resolver is None or not resolve:",
"1: return self.first(resolve=resolve) if self.numids > 1: raise exc.MultipleResults(self) else:",
"ids = self.ids if not hasattr(ids, \"__len__\"): # indexes have",
"\"__len__\"): # indexes have no obligation to be able to",
"# Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison def __eq__(self, other): raise NotImplementedError(\"Equality not",
"# only guaranteed to be iterable, not sliceable self.numids =",
"cannot index broken object %s\" % value) return value def",
"else: return self._resolve_all(resolver) def __iter__(self): return iter(self.all()) def intersect(self, docids):",
"not hasattr(ids, \"__len__\"): # indexes have no obligation to be",
"meant to be overridden by systems that # have a",
"value = self.discriminator(obj, _marker) else: value = getattr(obj, self.discriminator, _marker)",
"docids.ids filtered_ids = [x for x in self.ids if x",
"flush(self, *arg, **kw): \"\"\"Hookable by upstream systems\"\"\" pass class RichComparisonMixin(object):",
"_marker = object() from .. import exc from ..interfaces import",
"by systems that # have a default resolver. NB: although",
"def __eq__(self, other): raise NotImplementedError(\"Equality not implemented\") def __lt__(self, other):",
"than not implemented\") def __ne__(self, other): return not self.__eq__(other) def",
"sort_type = self.sort_type ids = self.ids if not hasattr(ids, \"__len__\"):",
"else: value = getattr(obj, self.discriminator, _marker) if value is _marker:",
"__name__ should be set by # catalog __setitem__ but if",
"result set # to be iterated after first() is called",
"resolver, sort_type=None): self.ids = ids # only guaranteed to be",
"\"\"\"See IIndexedDocuments\"\"\" not_indexed = self.not_indexed() indexed = self.indexed() if len(not_indexed)",
"other): return not (self.__lt__(other) or self.__eq__(other)) def __le__(self, other): return",
"docids(self): \"\"\"See IIndexedDocuments\"\"\" not_indexed = self.not_indexed() indexed = self.indexed() if",
"positive = apply_func(*args, **kw) all = self.docids() if len(positive) ==",
"is not a list or a tuple, allow this result",
"if self.numids > 1: raise exc.MultipleResults(self) else: raise exc.NoResults(self) def",
"1: raise exc.MultipleResults(self) else: raise exc.NoResults(self) def _resolve_all(self, resolver): for",
"docids is None: return result return self.family.IF.weightedIntersection(result, docids)[1] def _negate(self,",
"else: for id_ in self.ids: # if self.ids is not",
"from ..interfaces import ( IResultSet, STABLE, ) @implementer(IResultSet) class ResultSet(object):",
"from ZODB.broken import Broken from zope.interface import implementer _marker =",
"self.ids = itertools.chain([id_], self.ids) return id_ else: for id_ in",
"family = BTrees.family64 def __init__(self, ids, numids, resolver, sort_type=None): self.ids",
"ZODB.broken import Broken from zope.interface import implementer _marker = object()",
"for id_ in self.ids: # if self.ids is not a",
"# expect self, so this is a plain method. docids",
"implementer _marker = object() from .. import exc from ..interfaces",
"that # have a default resolver. NB: although the default",
"not_indexed = self.not_indexed() indexed = self.indexed() if len(not_indexed) == 0:",
"if value is _marker: return default if isinstance(value, Persistent): raise",
"\"self\", so it would appear that this could be #",
"value) return value def reindex_doc(self, docid, obj): \"\"\"See interface IIndexInjection\"\"\"",
"\"__len__\"): self.ids = itertools.chain([id_], self.ids) return resolver(id_) def one(self, resolve=True):",
"a new ResultSet.\"\"\" # NB: we can't use an intersection",
"access \"self\", so it would appear that this could be",
"NB: we can't use an intersection function here because #",
"not resolve: return self.ids else: return self._resolve_all(resolver) def __iter__(self): return",
"subclasses that override may # expect self, so this is",
"self.numids == 1: return self.first(resolve=resolve) if self.numids > 1: raise",
"def all(self, resolve=True): resolver = self.resolver if resolver is None",
"docids = docids.ids filtered_ids = [x for x in self.ids",
"import itertools import BTrees from persistent import Persistent from ZODB.broken",
"be overridden by systems that # have a default resolver.",
"a tuple, allow this result set # to be iterated",
"filtered_ids = [x for x in self.ids if x in",
"the default implementation # below does not access \"self\", so",
"self.resolver if resolver is None or not resolve: return self.ids",
"def __lt__(self, other): raise NotImplementedError(\"Less than not implemented\") def __ne__(self,",
"default if isinstance(value, Persistent): raise ValueError(\"Catalog cannot index persistent object",
"def intersect(self, docids): \"\"\"Intersect this resultset with a sequence of",
"IIndexedDocuments\"\"\" return len(self.indexed()) def not_indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.not_indexed()) def",
"query, docids): \"\"\"Default apply_intersect implementation\"\"\" result = self.apply(query) if docids",
"broken object %s\" % value) return value def reindex_doc(self, docid,",
"def docids(self): \"\"\"See IIndexedDocuments\"\"\" not_indexed = self.not_indexed() indexed = self.indexed()",
"self.family.IF.Set(indexed) elif len(indexed) == 0: return not_indexed indexed = self.family.IF.Set(indexed)",
"None or not resolve: return self.ids else: return self._resolve_all(resolver) def",
"function here because # self.ids may be a generator if",
"= BTrees.family64 def __init__(self, ids, numids, resolver, sort_type=None): self.ids =",
"raise ValueError(\"Catalog cannot index persistent object %s\" % value) if",
"value) if isinstance(value, Broken): raise ValueError(\"Catalog cannot index broken object",
"positive) def qname(self): # used in query representations; __name__ should",
"_marker: return default if isinstance(value, Persistent): raise ValueError(\"Catalog cannot index",
"generators ids = list(ids) self.ids = ids ids = index.sort(",
"len(docids) return ResultSet(docids, numdocs, resolver) def flush(self, *arg, **kw): \"\"\"Hookable",
"# self.ids may be a generator if isinstance(docids, ResultSet): docids",
"return self.__class__(filtered_ids, len(filtered_ids), self.resolver) class BaseIndexMixin(object): \"\"\"Mixin class for indexes",
"= index.sort( self.ids, reverse=reverse, limit=limit, sort_type=sort_type, raise_unsortable=raise_unsortable, ) numids =",
"self._resolve_all(resolver) def __iter__(self): return iter(self.all()) def intersect(self, docids): \"\"\"Intersect this",
"len(positive) == 0: return all return self.family.IF.difference(all, positive) def qname(self):",
"default resolver. NB: although the default implementation # below does",
"not access \"self\", so it would appear that this could",
"pass class RichComparisonMixin(object): # Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison def __eq__(self, other):",
"None: sort_type = self.sort_type ids = self.ids if not hasattr(ids,",
"numids = self.numids if limit: numids = min(numids, limit) return",
"BTrees.family64 def __init__(self, ids, numids, resolver, sort_type=None): self.ids = ids",
"the first object or None resolver = self.resolver if resolver",
"self.unindex_doc(docid) self.index_doc(docid, obj) def indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.indexed()) def",
"itertools.chain([id_], self.ids) return resolver(id_) def one(self, resolve=True): if self.numids ==",
"class BaseIndexMixin(object): \"\"\"Mixin class for indexes that implements common behavior\"\"\"",
"elif len(indexed) == 0: return not_indexed indexed = self.family.IF.Set(indexed) return",
"= [x for x in self.ids if x in docids]",
"class ResultSet(object): \"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\" family = BTrees.family64 def __init__(self, ids,",
"for indexes that implements common behavior\"\"\" family = BTrees.family64 def",
"min(numids, limit) return self.__class__(ids, numids, self.resolver, sort_type=STABLE) def first(self, resolve=True):",
"in self.ids if x in docids] return self.__class__(filtered_ids, len(filtered_ids), self.resolver)",
"if callable(self.discriminator): value = self.discriminator(obj, _marker) else: value = getattr(obj,",
"itertools import BTrees from persistent import Persistent from ZODB.broken import",
"return iter(self.all()) def intersect(self, docids): \"\"\"Intersect this resultset with a",
"zope.interface import implementer _marker = object() from .. import exc",
"it's not, we fall back to a generic # representation",
"= min(numids, limit) return self.__class__(ids, numids, self.resolver, sort_type=STABLE) def first(self,",
"other): raise NotImplementedError(\"Less than not implemented\") def __ne__(self, other): return",
"other): return self.__eq__(other) or self.__lt__(other) def __ge__(self, other): return self.__eq__(other)",
"\"\"\"Intersect this resultset with a sequence of docids or another",
"query._apply(names) numdocs = len(docids) return ResultSet(docids, numdocs, resolver) def flush(self,",
"__setitem__ but if it's not, we fall back to a",
"import ( IResultSet, STABLE, ) @implementer(IResultSet) class ResultSet(object): \"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\"",
"return self.first(resolve=resolve) if self.numids > 1: raise exc.MultipleResults(self) else: raise",
"# used in query representations; __name__ should be set by",
"return not_indexed indexed = self.family.IF.Set(indexed) return self.family.IF.union(not_indexed, indexed) def docids_count(self):",
"return id_ else: for id_ in self.ids: # if self.ids",
"not implemented\") def __ne__(self, other): return not self.__eq__(other) def __gt__(self,",
"__ne__(self, other): return not self.__eq__(other) def __gt__(self, other): return not",
"= getattr(obj, self.discriminator, _marker) if value is _marker: return default",
"BTrees.family64 def discriminate(self, obj, default): \"\"\"See interface IIndexInjection\"\"\" if callable(self.discriminator):",
"== 0: return not_indexed indexed = self.family.IF.Set(indexed) return self.family.IF.union(not_indexed, indexed)",
":class:`hypatia.interfaces.IResultSet`\"\"\" family = BTrees.family64 def __init__(self, ids, numids, resolver, sort_type=None):",
"generator if isinstance(docids, ResultSet): docids = docids.ids filtered_ids = [x",
"Broken): raise ValueError(\"Catalog cannot index broken object %s\" % value)",
"is None: return result return self.family.IF.weightedIntersection(result, docids)[1] def _negate(self, apply_func,",
"indexes that implements common behavior\"\"\" family = BTrees.family64 def discriminate(self,",
"__lt__(self, other): raise NotImplementedError(\"Less than not implemented\") def __ne__(self, other):",
") numids = self.numids if limit: numids = min(numids, limit)",
"discriminate(self, obj, default): \"\"\"See interface IIndexInjection\"\"\" if callable(self.discriminator): value =",
"%s\" % value) if isinstance(value, Broken): raise ValueError(\"Catalog cannot index",
"def _resolve_all(self, resolver): for id_ in self.ids: yield resolver(id_) def",
"resultset. Returns a new ResultSet.\"\"\" # NB: we can't use",
"allow first() to # be idempotent if not hasattr(self.ids, \"__len__\"):",
"def indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.indexed()) def not_indexed_count(self): \"\"\"See IIndexedDocuments\"\"\"",
"sort_type=None, raise_unsortable=True ): if sort_type is None: sort_type = self.sort_type",
"other): return not self.__eq__(other) def __gt__(self, other): return not (self.__lt__(other)",
"be iterable, not sliceable self.numids = numids self.resolver = resolver",
"exc from ..interfaces import ( IResultSet, STABLE, ) @implementer(IResultSet) class",
"to sort generators ids = list(ids) self.ids = ids ids",
"return self.family.IF.Set(indexed) elif len(indexed) == 0: return not_indexed indexed =",
"only guaranteed to be iterable, not sliceable self.numids = numids",
"an intersection function here because # self.ids may be a",
"\"\"\"See interface IIndexInjection\"\"\" self.unindex_doc(docid) self.index_doc(docid, obj) def indexed_count(self): \"\"\"See IIndexedDocuments\"\"\"",
"id_ in self.ids: yield resolver(id_) def all(self, resolve=True): resolver =",
"# below does not access \"self\", so it would appear",
"\"\"\"See IIndexedDocuments\"\"\" return len(self.indexed()) def not_indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.not_indexed())",
"be # turned into a classmeth or staticmethod, subclasses that",
"a classmeth or staticmethod, subclasses that override may # expect",
"numids, self.resolver, sort_type=STABLE) def first(self, resolve=True): # return the first",
"def __iter__(self): return iter(self.all()) def intersect(self, docids): \"\"\"Intersect this resultset",
"default resultset factory; meant to be overridden by systems that",
"resolve=True): # return the first object or None resolver =",
"sort generators ids = list(ids) self.ids = ids ids =",
"index, reverse=False, limit=None, sort_type=None, raise_unsortable=True ): if sort_type is None:",
"to be iterable, not sliceable self.numids = numids self.resolver =",
"not_indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.not_indexed()) def docids(self): \"\"\"See IIndexedDocuments\"\"\" not_indexed",
"may # expect self, so this is a plain method.",
"# representation return getattr( self, \"__name__\", str(self), ) def resultset_from_query(self,",
"or not resolve: for id_ in self.ids: # if self.ids",
"indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.indexed()) def not_indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return",
"method. docids = query._apply(names) numdocs = len(docids) return ResultSet(docids, numdocs,",
"NotImplementedError(\"Less than not implemented\") def __ne__(self, other): return not self.__eq__(other)",
"Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison def __eq__(self, other): raise NotImplementedError(\"Equality not implemented\")",
"index persistent object %s\" % value) if isinstance(value, Broken): raise",
"implementation # below does not access \"self\", so it would",
"if len(positive) == 0: return all return self.family.IF.difference(all, positive) def",
"below does not access \"self\", so it would appear that",
"behavior\"\"\" family = BTrees.family64 def discriminate(self, obj, default): \"\"\"See interface",
"= self.resolver if resolver is None or not resolve: for",
"able to sort generators ids = list(ids) self.ids = ids",
"have a default resolver. NB: although the default implementation #",
"= self.resolver if resolver is None or not resolve: return",
"self.family.IF.weightedIntersection(result, docids)[1] def _negate(self, apply_func, *args, **kw): positive = apply_func(*args,",
"self.ids if not hasattr(ids, \"__len__\"): # indexes have no obligation",
"value def reindex_doc(self, docid, obj): \"\"\"See interface IIndexInjection\"\"\" self.unindex_doc(docid) self.index_doc(docid,",
"resolver is None or not resolve: for id_ in self.ids:",
"resolver. NB: although the default implementation # below does not",
"_resolve_all(self, resolver): for id_ in self.ids: yield resolver(id_) def all(self,",
"def resultset_from_query(self, query, names=None, resolver=None): # default resultset factory; meant",
"to a generic # representation return getattr( self, \"__name__\", str(self),",
"for id_ in self.ids: yield resolver(id_) def all(self, resolve=True): resolver",
"self.ids: yield resolver(id_) def all(self, resolve=True): resolver = self.resolver if",
"self.ids = ids # only guaranteed to be iterable, not",
"use an intersection function here because # self.ids may be",
"be a generator if isinstance(docids, ResultSet): docids = docids.ids filtered_ids",
"list(ids) self.ids = ids ids = index.sort( self.ids, reverse=reverse, limit=limit,",
"names=None, resolver=None): # default resultset factory; meant to be overridden",
"= self.not_indexed() indexed = self.indexed() if len(not_indexed) == 0: return",
"is None or not resolve: for id_ in self.ids: #",
"by upstream systems\"\"\" pass class RichComparisonMixin(object): # Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison",
"intersection function here because # self.ids may be a generator",
"import exc from ..interfaces import ( IResultSet, STABLE, ) @implementer(IResultSet)",
"ids = index.sort( self.ids, reverse=reverse, limit=limit, sort_type=sort_type, raise_unsortable=raise_unsortable, ) numids",
"BaseIndexMixin(object): \"\"\"Mixin class for indexes that implements common behavior\"\"\" family",
"apply_intersect implementation\"\"\" result = self.apply(query) if docids is None: return",
"self.__eq__(other)) def __le__(self, other): return self.__eq__(other) or self.__lt__(other) def __ge__(self,",
"self.ids) return resolver(id_) def one(self, resolve=True): if self.numids == 1:",
"_marker) if value is _marker: return default if isinstance(value, Persistent):",
"from persistent import Persistent from ZODB.broken import Broken from zope.interface",
"if self.numids == 1: return self.first(resolve=resolve) if self.numids > 1:",
"id_ in self.ids: # if self.ids is not a list",
"this resultset with a sequence of docids or another resultset.",
"if isinstance(docids, ResultSet): docids = docids.ids filtered_ids = [x for",
"guaranteed to be iterable, not sliceable self.numids = numids self.resolver",
"import Broken from zope.interface import implementer _marker = object() from",
"resolver=None): # default resultset factory; meant to be overridden by",
"can't use an intersection function here because # self.ids may",
"( IResultSet, STABLE, ) @implementer(IResultSet) class ResultSet(object): \"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\" family",
"could be # turned into a classmeth or staticmethod, subclasses",
"\"__name__\", str(self), ) def resultset_from_query(self, query, names=None, resolver=None): # default",
"Broken from zope.interface import implementer _marker = object() from ..",
"def docids_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.docids()) def apply_intersect(self, query, docids):",
"# be idempotent if not hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_],",
"# NB: we can't use an intersection function here because",
"= self.apply(query) if docids is None: return result return self.family.IF.weightedIntersection(result,",
"have no obligation to be able to sort generators ids",
"first() is called and allow first() to # be idempotent",
") @implementer(IResultSet) class ResultSet(object): \"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\" family = BTrees.family64 def",
"a default resolver. NB: although the default implementation # below",
"Persistent from ZODB.broken import Broken from zope.interface import implementer _marker",
"isinstance(value, Broken): raise ValueError(\"Catalog cannot index broken object %s\" %",
"ValueError(\"Catalog cannot index broken object %s\" % value) return value",
"exc.MultipleResults(self) else: raise exc.NoResults(self) def _resolve_all(self, resolver): for id_ in",
"raise ValueError(\"Catalog cannot index broken object %s\" % value) return",
"raise NotImplementedError(\"Less than not implemented\") def __ne__(self, other): return not",
"len(indexed) == 0: return not_indexed indexed = self.family.IF.Set(indexed) return self.family.IF.union(not_indexed,",
"to be iterated after first() is called and allow first()",
"resultset with a sequence of docids or another resultset. Returns",
"yield resolver(id_) def all(self, resolve=True): resolver = self.resolver if resolver",
"return all return self.family.IF.difference(all, positive) def qname(self): # used in",
"so it would appear that this could be # turned",
"iterated after first() is called and allow first() to #",
"default implementation # below does not access \"self\", so it",
"self.discriminator(obj, _marker) else: value = getattr(obj, self.discriminator, _marker) if value",
"getattr( self, \"__name__\", str(self), ) def resultset_from_query(self, query, names=None, resolver=None):",
"return not self.__eq__(other) def __gt__(self, other): return not (self.__lt__(other) or",
"to be able to sort generators ids = list(ids) self.ids",
"): if sort_type is None: sort_type = self.sort_type ids =",
"resolver(id_) def all(self, resolve=True): resolver = self.resolver if resolver is",
"obligation to be able to sort generators ids = list(ids)",
"sliceable self.numids = numids self.resolver = resolver self.sort_type = sort_type",
"persistent import Persistent from ZODB.broken import Broken from zope.interface import",
"> 1: raise exc.MultipleResults(self) else: raise exc.NoResults(self) def _resolve_all(self, resolver):",
"self.first(resolve=resolve) if self.numids > 1: raise exc.MultipleResults(self) else: raise exc.NoResults(self)",
"self.__class__(filtered_ids, len(filtered_ids), self.resolver) class BaseIndexMixin(object): \"\"\"Mixin class for indexes that",
"__len__(self): return self.numids def sort( self, index, reverse=False, limit=None, sort_type=None,",
"def first(self, resolve=True): # return the first object or None",
"or a tuple, allow this result set # to be",
"docids or another resultset. Returns a new ResultSet.\"\"\" # NB:",
"= self.discriminator(obj, _marker) else: value = getattr(obj, self.discriminator, _marker) if",
"another resultset. Returns a new ResultSet.\"\"\" # NB: we can't",
"other): raise NotImplementedError(\"Equality not implemented\") def __lt__(self, other): raise NotImplementedError(\"Less",
"ResultSet(object): \"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\" family = BTrees.family64 def __init__(self, ids, numids,",
"self.numids if limit: numids = min(numids, limit) return self.__class__(ids, numids,",
"IIndexedDocuments\"\"\" return len(self.docids()) def apply_intersect(self, query, docids): \"\"\"Default apply_intersect implementation\"\"\"",
"object() from .. import exc from ..interfaces import ( IResultSet,",
"object %s\" % value) return value def reindex_doc(self, docid, obj):",
"self.not_indexed() indexed = self.indexed() if len(not_indexed) == 0: return self.family.IF.Set(indexed)",
"def __ne__(self, other): return not self.__eq__(other) def __gt__(self, other): return",
"to be overridden by systems that # have a default",
"__gt__(self, other): return not (self.__lt__(other) or self.__eq__(other)) def __le__(self, other):",
"self.numids = numids self.resolver = resolver self.sort_type = sort_type def",
"# turned into a classmeth or staticmethod, subclasses that override",
"return self._resolve_all(resolver) def __iter__(self): return iter(self.all()) def intersect(self, docids): \"\"\"Intersect",
"len(not_indexed) == 0: return self.family.IF.Set(indexed) elif len(indexed) == 0: return",
"def __le__(self, other): return self.__eq__(other) or self.__lt__(other) def __ge__(self, other):",
"= self.numids if limit: numids = min(numids, limit) return self.__class__(ids,",
"% value) return value def reindex_doc(self, docid, obj): \"\"\"See interface",
"be able to sort generators ids = list(ids) self.ids =",
"def apply_intersect(self, query, docids): \"\"\"Default apply_intersect implementation\"\"\" result = self.apply(query)",
"\"\"\"Default apply_intersect implementation\"\"\" result = self.apply(query) if docids is None:",
"not_indexed indexed = self.family.IF.Set(indexed) return self.family.IF.union(not_indexed, indexed) def docids_count(self): \"\"\"See",
"isinstance(docids, ResultSet): docids = docids.ids filtered_ids = [x for x",
"tuple, allow this result set # to be iterated after",
"== 1: return self.first(resolve=resolve) if self.numids > 1: raise exc.MultipleResults(self)",
"iter(self.all()) def intersect(self, docids): \"\"\"Intersect this resultset with a sequence",
"or staticmethod, subclasses that override may # expect self, so",
"upstream systems\"\"\" pass class RichComparisonMixin(object): # Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison def",
"no obligation to be able to sort generators ids =",
"(self.__lt__(other) or self.__eq__(other)) def __le__(self, other): return self.__eq__(other) or self.__lt__(other)",
"in self.ids: yield resolver(id_) def all(self, resolve=True): resolver = self.resolver",
"= ids ids = index.sort( self.ids, reverse=reverse, limit=limit, sort_type=sort_type, raise_unsortable=raise_unsortable,",
"= self.docids() if len(positive) == 0: return all return self.family.IF.difference(all,",
"numids, resolver, sort_type=None): self.ids = ids # only guaranteed to",
"iterable, not sliceable self.numids = numids self.resolver = resolver self.sort_type",
"so this is a plain method. docids = query._apply(names) numdocs",
"query representations; __name__ should be set by # catalog __setitem__",
"not sliceable self.numids = numids self.resolver = resolver self.sort_type =",
"raise exc.MultipleResults(self) else: raise exc.NoResults(self) def _resolve_all(self, resolver): for id_",
"resultset_from_query(self, query, names=None, resolver=None): # default resultset factory; meant to",
"limit) return self.__class__(ids, numids, self.resolver, sort_type=STABLE) def first(self, resolve=True): #",
"= numids self.resolver = resolver self.sort_type = sort_type def __len__(self):",
"resolve=True): resolver = self.resolver if resolver is None or not",
"# default resultset factory; meant to be overridden by systems",
"not resolve: for id_ in self.ids: # if self.ids is",
"or not resolve: return self.ids else: return self._resolve_all(resolver) def __iter__(self):",
"\"__len__\"): self.ids = itertools.chain([id_], self.ids) return id_ else: for id_",
"called and allow first() to # be idempotent if not",
"\"\"\"Mixin class for indexes that implements common behavior\"\"\" family =",
"if docids is None: return result return self.family.IF.weightedIntersection(result, docids)[1] def",
"return the first object or None resolver = self.resolver if",
"return self.numids def sort( self, index, reverse=False, limit=None, sort_type=None, raise_unsortable=True",
"len(filtered_ids), self.resolver) class BaseIndexMixin(object): \"\"\"Mixin class for indexes that implements",
"catalog __setitem__ but if it's not, we fall back to",
"= object() from .. import exc from ..interfaces import (",
"class for indexes that implements common behavior\"\"\" family = BTrees.family64",
"None: return result return self.family.IF.weightedIntersection(result, docids)[1] def _negate(self, apply_func, *args,",
"list or a tuple, allow this result set # to",
"if resolver is None or not resolve: return self.ids else:",
"return self.__eq__(other) or self.__lt__(other) def __ge__(self, other): return self.__eq__(other) or",
"self.resolver if resolver is None or not resolve: for id_",
"\"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\" family = BTrees.family64 def __init__(self, ids, numids, resolver,",
"systems that # have a default resolver. NB: although the",
"= len(docids) return ResultSet(docids, numdocs, resolver) def flush(self, *arg, **kw):",
"in docids] return self.__class__(filtered_ids, len(filtered_ids), self.resolver) class BaseIndexMixin(object): \"\"\"Mixin class",
"cannot index persistent object %s\" % value) if isinstance(value, Broken):",
"all = self.docids() if len(positive) == 0: return all return",
"sort_type def __len__(self): return self.numids def sort( self, index, reverse=False,",
"IIndexedDocuments\"\"\" not_indexed = self.not_indexed() indexed = self.indexed() if len(not_indexed) ==",
"== 0: return self.family.IF.Set(indexed) elif len(indexed) == 0: return not_indexed",
"plain method. docids = query._apply(names) numdocs = len(docids) return ResultSet(docids,",
"first(self, resolve=True): # return the first object or None resolver",
"fall back to a generic # representation return getattr( self,",
"and allow first() to # be idempotent if not hasattr(self.ids,",
"= resolver self.sort_type = sort_type def __len__(self): return self.numids def",
"return default if isinstance(value, Persistent): raise ValueError(\"Catalog cannot index persistent",
"not, we fall back to a generic # representation return",
"def sort( self, index, reverse=False, limit=None, sort_type=None, raise_unsortable=True ): if",
"self.ids = ids ids = index.sort( self.ids, reverse=reverse, limit=limit, sort_type=sort_type,",
"self.ids else: return self._resolve_all(resolver) def __iter__(self): return iter(self.all()) def intersect(self,",
"resultset factory; meant to be overridden by systems that #",
"return self.family.IF.union(not_indexed, indexed) def docids_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.docids()) def",
"ResultSet.\"\"\" # NB: we can't use an intersection function here",
"% value) if isinstance(value, Broken): raise ValueError(\"Catalog cannot index broken",
"return len(self.not_indexed()) def docids(self): \"\"\"See IIndexedDocuments\"\"\" not_indexed = self.not_indexed() indexed",
"self.ids is not a list or a tuple, allow this",
"default): \"\"\"See interface IIndexInjection\"\"\" if callable(self.discriminator): value = self.discriminator(obj, _marker)",
"\"\"\"Hookable by upstream systems\"\"\" pass class RichComparisonMixin(object): # Stolen from",
"exc.NoResults(self) def _resolve_all(self, resolver): for id_ in self.ids: yield resolver(id_)",
"from zope.interface import implementer _marker = object() from .. import",
"getattr(obj, self.discriminator, _marker) if value is _marker: return default if",
"allow this result set # to be iterated after first()",
"sequence of docids or another resultset. Returns a new ResultSet.\"\"\"",
"idempotent if not hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_], self.ids) return",
"callable(self.discriminator): value = self.discriminator(obj, _marker) else: value = getattr(obj, self.discriminator,",
"ValueError(\"Catalog cannot index persistent object %s\" % value) if isinstance(value,",
"apply_intersect(self, query, docids): \"\"\"Default apply_intersect implementation\"\"\" result = self.apply(query) if",
"\"\"\"See IIndexedDocuments\"\"\" return len(self.not_indexed()) def docids(self): \"\"\"See IIndexedDocuments\"\"\" not_indexed =",
"# return the first object or None resolver = self.resolver",
"def discriminate(self, obj, default): \"\"\"See interface IIndexInjection\"\"\" if callable(self.discriminator): value",
"obj): \"\"\"See interface IIndexInjection\"\"\" self.unindex_doc(docid) self.index_doc(docid, obj) def indexed_count(self): \"\"\"See",
"obj, default): \"\"\"See interface IIndexInjection\"\"\" if callable(self.discriminator): value = self.discriminator(obj,",
"_marker) else: value = getattr(obj, self.discriminator, _marker) if value is",
"= BTrees.family64 def discriminate(self, obj, default): \"\"\"See interface IIndexInjection\"\"\" if",
"return not (self.__lt__(other) or self.__eq__(other)) def __le__(self, other): return self.__eq__(other)",
"if sort_type is None: sort_type = self.sort_type ids = self.ids",
"with a sequence of docids or another resultset. Returns a",
"Returns a new ResultSet.\"\"\" # NB: we can't use an",
"docids): \"\"\"Default apply_intersect implementation\"\"\" result = self.apply(query) if docids is",
"resolve=True): if self.numids == 1: return self.first(resolve=resolve) if self.numids >",
"= self.indexed() if len(not_indexed) == 0: return self.family.IF.Set(indexed) elif len(indexed)",
"it would appear that this could be # turned into",
"indexed) def docids_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.docids()) def apply_intersect(self, query,",
"def _negate(self, apply_func, *args, **kw): positive = apply_func(*args, **kw) all",
"self.__eq__(other) or self.__lt__(other) def __ge__(self, other): return self.__eq__(other) or self.__gt__(other)",
"= query._apply(names) numdocs = len(docids) return ResultSet(docids, numdocs, resolver) def",
"self.ids, reverse=reverse, limit=limit, sort_type=sort_type, raise_unsortable=raise_unsortable, ) numids = self.numids if",
"resolver(id_) def one(self, resolve=True): if self.numids == 1: return self.first(resolve=resolve)",
"that this could be # turned into a classmeth or",
"len(self.not_indexed()) def docids(self): \"\"\"See IIndexedDocuments\"\"\" not_indexed = self.not_indexed() indexed =",
"**kw): positive = apply_func(*args, **kw) all = self.docids() if len(positive)",
"self.ids: # if self.ids is not a list or a",
"self.index_doc(docid, obj) def indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.indexed()) def not_indexed_count(self):",
"is called and allow first() to # be idempotent if",
"raise exc.NoResults(self) def _resolve_all(self, resolver): for id_ in self.ids: yield",
"return self.__class__(ids, numids, self.resolver, sort_type=STABLE) def first(self, resolve=True): # return",
"or self.__eq__(other)) def __le__(self, other): return self.__eq__(other) or self.__lt__(other) def",
"return self.ids else: return self._resolve_all(resolver) def __iter__(self): return iter(self.all()) def",
"qname(self): # used in query representations; __name__ should be set",
"a plain method. docids = query._apply(names) numdocs = len(docids) return",
"def not_indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.not_indexed()) def docids(self): \"\"\"See IIndexedDocuments\"\"\"",
"limit: numids = min(numids, limit) return self.__class__(ids, numids, self.resolver, sort_type=STABLE)",
"set # to be iterated after first() is called and",
"resolver is None or not resolve: return self.ids else: return",
"= self.family.IF.Set(indexed) return self.family.IF.union(not_indexed, indexed) def docids_count(self): \"\"\"See IIndexedDocuments\"\"\" return",
"back to a generic # representation return getattr( self, \"__name__\",",
"if resolver is None or not resolve: for id_ in",
"indexed = self.family.IF.Set(indexed) return self.family.IF.union(not_indexed, indexed) def docids_count(self): \"\"\"See IIndexedDocuments\"\"\"",
"intersect(self, docids): \"\"\"Intersect this resultset with a sequence of docids",
"id_ else: for id_ in self.ids: # if self.ids is",
"sort( self, index, reverse=False, limit=None, sort_type=None, raise_unsortable=True ): if sort_type",
"def __init__(self, ids, numids, resolver, sort_type=None): self.ids = ids #",
"ids # only guaranteed to be iterable, not sliceable self.numids",
"# if self.ids is not a list or a tuple,",
"import BTrees from persistent import Persistent from ZODB.broken import Broken",
"self.numids def sort( self, index, reverse=False, limit=None, sort_type=None, raise_unsortable=True ):",
"%s\" % value) return value def reindex_doc(self, docid, obj): \"\"\"See",
"be idempotent if not hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_], self.ids)",
"RichComparisonMixin(object): # Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison def __eq__(self, other): raise NotImplementedError(\"Equality",
"return len(self.docids()) def apply_intersect(self, query, docids): \"\"\"Default apply_intersect implementation\"\"\" result",
"= itertools.chain([id_], self.ids) return id_ else: for id_ in self.ids:",
"= sort_type def __len__(self): return self.numids def sort( self, index,",
"a generator if isinstance(docids, ResultSet): docids = docids.ids filtered_ids =",
"BTrees from persistent import Persistent from ZODB.broken import Broken from",
"return result return self.family.IF.weightedIntersection(result, docids)[1] def _negate(self, apply_func, *args, **kw):",
"class RichComparisonMixin(object): # Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison def __eq__(self, other): raise",
"because # self.ids may be a generator if isinstance(docids, ResultSet):",
"__eq__(self, other): raise NotImplementedError(\"Equality not implemented\") def __lt__(self, other): raise",
"itertools.chain([id_], self.ids) return id_ else: for id_ in self.ids: #",
"x in docids] return self.__class__(filtered_ids, len(filtered_ids), self.resolver) class BaseIndexMixin(object): \"\"\"Mixin",
"not hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_], self.ids) return resolver(id_) def",
".. import exc from ..interfaces import ( IResultSet, STABLE, )",
"if len(not_indexed) == 0: return self.family.IF.Set(indexed) elif len(indexed) == 0:",
"import Persistent from ZODB.broken import Broken from zope.interface import implementer",
"self, so this is a plain method. docids = query._apply(names)",
"if not hasattr(ids, \"__len__\"): # indexes have no obligation to",
"@implementer(IResultSet) class ResultSet(object): \"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\" family = BTrees.family64 def __init__(self,",
"by # catalog __setitem__ but if it's not, we fall",
"this is a plain method. docids = query._apply(names) numdocs =",
"one(self, resolve=True): if self.numids == 1: return self.first(resolve=resolve) if self.numids",
"resolver self.sort_type = sort_type def __len__(self): return self.numids def sort(",
"len(self.indexed()) def not_indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.not_indexed()) def docids(self): \"\"\"See",
"in self.ids: # if self.ids is not a list or",
"docids_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.docids()) def apply_intersect(self, query, docids): \"\"\"Default",
"reverse=reverse, limit=limit, sort_type=sort_type, raise_unsortable=raise_unsortable, ) numids = self.numids if limit:",
"self.indexed() if len(not_indexed) == 0: return self.family.IF.Set(indexed) elif len(indexed) ==",
"0: return not_indexed indexed = self.family.IF.Set(indexed) return self.family.IF.union(not_indexed, indexed) def",
"raise_unsortable=raise_unsortable, ) numids = self.numids if limit: numids = min(numids,",
"if limit: numids = min(numids, limit) return self.__class__(ids, numids, self.resolver,",
"numids = min(numids, limit) return self.__class__(ids, numids, self.resolver, sort_type=STABLE) def",
"ResultSet): docids = docids.ids filtered_ids = [x for x in",
"indexed = self.indexed() if len(not_indexed) == 0: return self.family.IF.Set(indexed) elif",
"docids = query._apply(names) numdocs = len(docids) return ResultSet(docids, numdocs, resolver)",
"a list or a tuple, allow this result set #",
"sort_type is None: sort_type = self.sort_type ids = self.ids if",
"NB: although the default implementation # below does not access",
"self.ids may be a generator if isinstance(docids, ResultSet): docids =",
"# have a default resolver. NB: although the default implementation",
"is None: sort_type = self.sort_type ids = self.ids if not",
"docids)[1] def _negate(self, apply_func, *args, **kw): positive = apply_func(*args, **kw)",
"object or None resolver = self.resolver if resolver is None",
"self.docids() if len(positive) == 0: return all return self.family.IF.difference(all, positive)",
"ids, numids, resolver, sort_type=None): self.ids = ids # only guaranteed",
"from http://www.voidspace.org.uk/python/recipebook.shtml#comparison def __eq__(self, other): raise NotImplementedError(\"Equality not implemented\") def",
"if not hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_], self.ids) return resolver(id_)",
"to # be idempotent if not hasattr(self.ids, \"__len__\"): self.ids =",
"x in self.ids if x in docids] return self.__class__(filtered_ids, len(filtered_ids),",
"self.__class__(ids, numids, self.resolver, sort_type=STABLE) def first(self, resolve=True): # return the",
"that implements common behavior\"\"\" family = BTrees.family64 def discriminate(self, obj,",
"implements common behavior\"\"\" family = BTrees.family64 def discriminate(self, obj, default):",
"def __gt__(self, other): return not (self.__lt__(other) or self.__eq__(other)) def __le__(self,",
"if not hasattr(self.ids, \"__len__\"): self.ids = itertools.chain([id_], self.ids) return id_",
"__iter__(self): return iter(self.all()) def intersect(self, docids): \"\"\"Intersect this resultset with",
"first() to # be idempotent if not hasattr(self.ids, \"__len__\"): self.ids",
"self.family.IF.Set(indexed) return self.family.IF.union(not_indexed, indexed) def docids_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.docids())",
"*arg, **kw): \"\"\"Hookable by upstream systems\"\"\" pass class RichComparisonMixin(object): #",
"numids self.resolver = resolver self.sort_type = sort_type def __len__(self): return",
"if isinstance(value, Persistent): raise ValueError(\"Catalog cannot index persistent object %s\"",
"def __len__(self): return self.numids def sort( self, index, reverse=False, limit=None,",
"== 0: return all return self.family.IF.difference(all, positive) def qname(self): #",
"if it's not, we fall back to a generic #",
"= self.sort_type ids = self.ids if not hasattr(ids, \"__len__\"): #",
"return len(self.indexed()) def not_indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.not_indexed()) def docids(self):",
"representations; __name__ should be set by # catalog __setitem__ but",
"return resolver(id_) def one(self, resolve=True): if self.numids == 1: return",
"value is _marker: return default if isinstance(value, Persistent): raise ValueError(\"Catalog",
"numdocs = len(docids) return ResultSet(docids, numdocs, resolver) def flush(self, *arg,",
"set by # catalog __setitem__ but if it's not, we",
"or another resultset. Returns a new ResultSet.\"\"\" # NB: we",
"self.sort_type ids = self.ids if not hasattr(ids, \"__len__\"): # indexes",
"raise NotImplementedError(\"Equality not implemented\") def __lt__(self, other): raise NotImplementedError(\"Less than",
"result = self.apply(query) if docids is None: return result return",
"all return self.family.IF.difference(all, positive) def qname(self): # used in query",
"from .. import exc from ..interfaces import ( IResultSet, STABLE,",
"self.resolver = resolver self.sort_type = sort_type def __len__(self): return self.numids",
"**kw) all = self.docids() if len(positive) == 0: return all",
"implementation\"\"\" result = self.apply(query) if docids is None: return result",
"self.__eq__(other) def __gt__(self, other): return not (self.__lt__(other) or self.__eq__(other)) def",
"limit=limit, sort_type=sort_type, raise_unsortable=raise_unsortable, ) numids = self.numids if limit: numids",
"reindex_doc(self, docid, obj): \"\"\"See interface IIndexInjection\"\"\" self.unindex_doc(docid) self.index_doc(docid, obj) def",
"new ResultSet.\"\"\" # NB: we can't use an intersection function",
"after first() is called and allow first() to # be",
"this could be # turned into a classmeth or staticmethod,",
"return getattr( self, \"__name__\", str(self), ) def resultset_from_query(self, query, names=None,",
"resolve: return self.ids else: return self._resolve_all(resolver) def __iter__(self): return iter(self.all())",
"factory; meant to be overridden by systems that # have",
"overridden by systems that # have a default resolver. NB:",
"is a plain method. docids = query._apply(names) numdocs = len(docids)",
"def flush(self, *arg, **kw): \"\"\"Hookable by upstream systems\"\"\" pass class",
"limit=None, sort_type=None, raise_unsortable=True ): if sort_type is None: sort_type =",
"# to be iterated after first() is called and allow",
"self.numids > 1: raise exc.MultipleResults(self) else: raise exc.NoResults(self) def _resolve_all(self,",
"for x in self.ids if x in docids] return self.__class__(filtered_ids,",
"numdocs, resolver) def flush(self, *arg, **kw): \"\"\"Hookable by upstream systems\"\"\"",
"self, \"__name__\", str(self), ) def resultset_from_query(self, query, names=None, resolver=None): #",
"although the default implementation # below does not access \"self\",",
"else: raise exc.NoResults(self) def _resolve_all(self, resolver): for id_ in self.ids:",
"interface IIndexInjection\"\"\" if callable(self.discriminator): value = self.discriminator(obj, _marker) else: value",
"not (self.__lt__(other) or self.__eq__(other)) def __le__(self, other): return self.__eq__(other) or",
"if isinstance(value, Broken): raise ValueError(\"Catalog cannot index broken object %s\"",
"if x in docids] return self.__class__(filtered_ids, len(filtered_ids), self.resolver) class BaseIndexMixin(object):",
"interface IIndexInjection\"\"\" self.unindex_doc(docid) self.index_doc(docid, obj) def indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return",
"value = getattr(obj, self.discriminator, _marker) if value is _marker: return",
"def qname(self): # used in query representations; __name__ should be",
"not a list or a tuple, allow this result set",
"self.ids) return id_ else: for id_ in self.ids: # if",
"return self.family.IF.difference(all, positive) def qname(self): # used in query representations;",
"sort_type=sort_type, raise_unsortable=raise_unsortable, ) numids = self.numids if limit: numids =",
"sort_type=STABLE) def first(self, resolve=True): # return the first object or",
"is _marker: return default if isinstance(value, Persistent): raise ValueError(\"Catalog cannot",
"STABLE, ) @implementer(IResultSet) class ResultSet(object): \"\"\"Implements :class:`hypatia.interfaces.IResultSet`\"\"\" family = BTrees.family64",
"<reponame>pfw/hypatia<gh_stars>0 import itertools import BTrees from persistent import Persistent from",
"into a classmeth or staticmethod, subclasses that override may #",
"self.resolver) class BaseIndexMixin(object): \"\"\"Mixin class for indexes that implements common",
"that override may # expect self, so this is a",
"first object or None resolver = self.resolver if resolver is",
"self.discriminator, _marker) if value is _marker: return default if isinstance(value,",
"indexes have no obligation to be able to sort generators",
"resolver) def flush(self, *arg, **kw): \"\"\"Hookable by upstream systems\"\"\" pass",
"this result set # to be iterated after first() is",
"here because # self.ids may be a generator if isinstance(docids,",
"systems\"\"\" pass class RichComparisonMixin(object): # Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison def __eq__(self,",
"self, index, reverse=False, limit=None, sort_type=None, raise_unsortable=True ): if sort_type is",
"self.family.IF.difference(all, positive) def qname(self): # used in query representations; __name__",
"implemented\") def __lt__(self, other): raise NotImplementedError(\"Less than not implemented\") def",
"index broken object %s\" % value) return value def reindex_doc(self,",
"= docids.ids filtered_ids = [x for x in self.ids if",
"self.ids if x in docids] return self.__class__(filtered_ids, len(filtered_ids), self.resolver) class",
"= self.ids if not hasattr(ids, \"__len__\"): # indexes have no",
"ids ids = index.sort( self.ids, reverse=reverse, limit=limit, sort_type=sort_type, raise_unsortable=raise_unsortable, )",
"= apply_func(*args, **kw) all = self.docids() if len(positive) == 0:",
"[x for x in self.ids if x in docids] return",
"return ResultSet(docids, numdocs, resolver) def flush(self, *arg, **kw): \"\"\"Hookable by",
"self.apply(query) if docids is None: return result return self.family.IF.weightedIntersection(result, docids)[1]",
"result return self.family.IF.weightedIntersection(result, docids)[1] def _negate(self, apply_func, *args, **kw): positive",
"apply_func(*args, **kw) all = self.docids() if len(positive) == 0: return",
"len(self.docids()) def apply_intersect(self, query, docids): \"\"\"Default apply_intersect implementation\"\"\" result =",
"but if it's not, we fall back to a generic",
"docids): \"\"\"Intersect this resultset with a sequence of docids or",
"self.resolver, sort_type=STABLE) def first(self, resolve=True): # return the first object",
"IIndexedDocuments\"\"\" return len(self.not_indexed()) def docids(self): \"\"\"See IIndexedDocuments\"\"\" not_indexed = self.not_indexed()",
"should be set by # catalog __setitem__ but if it's",
"IIndexInjection\"\"\" self.unindex_doc(docid) self.index_doc(docid, obj) def indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.indexed())",
"representation return getattr( self, \"__name__\", str(self), ) def resultset_from_query(self, query,",
"NotImplementedError(\"Equality not implemented\") def __lt__(self, other): raise NotImplementedError(\"Less than not",
"resolver): for id_ in self.ids: yield resolver(id_) def all(self, resolve=True):",
"ResultSet(docids, numdocs, resolver) def flush(self, *arg, **kw): \"\"\"Hookable by upstream",
"import implementer _marker = object() from .. import exc from",
"http://www.voidspace.org.uk/python/recipebook.shtml#comparison def __eq__(self, other): raise NotImplementedError(\"Equality not implemented\") def __lt__(self,",
"None resolver = self.resolver if resolver is None or not",
"obj) def indexed_count(self): \"\"\"See IIndexedDocuments\"\"\" return len(self.indexed()) def not_indexed_count(self): \"\"\"See"
] |
[
"dependencies = [ ('realtors', '0001_initial'), ] operations = [ migrations.CreateModel(",
"models.TextField(blank=True)), ('sale_type', models.CharField(choices=[('For Sale', 'For Sale'), ('For Rent', 'For Rent')],",
"('price', models.IntegerField()), ('bedrooms', models.IntegerField()), ('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)), ('home_type', models.CharField(choices=[('House', 'House'),",
"'Townhouse')], default='House', max_length=50)), ('sqft', models.IntegerField()), ('open_house', models.BooleanField(default=False)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1',",
"('sale_type', models.CharField(choices=[('For Sale', 'For Sale'), ('For Rent', 'For Rent')], default='For",
"primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.CharField(max_length=200, unique=True)), ('title', models.CharField(max_length=150)), ('address', models.CharField(default='',",
"3.2.3 on 2021-05-30 04:28 from django.db import migrations, models import",
"= [ migrations.CreateModel( name='Listing', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),",
"Sale'), ('For Rent', 'For Rent')], default='For Sale', max_length=50)), ('price', models.IntegerField()),",
"Django 3.2.3 on 2021-05-30 04:28 from django.db import migrations, models",
"models.CharField(max_length=100)), ('state', models.CharField(max_length=100)), ('zipcode', models.CharField(max_length=15)), ('description', models.TextField(blank=True)), ('sale_type', models.CharField(choices=[('For Sale',",
"('open_house', models.BooleanField(default=False)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),",
"models.CharField(max_length=100)), ('zipcode', models.CharField(max_length=15)), ('description', models.TextField(blank=True)), ('sale_type', models.CharField(choices=[('For Sale', 'For Sale'),",
"Rent')], default='For Sale', max_length=50)), ('price', models.IntegerField()), ('bedrooms', models.IntegerField()), ('bathrooms', models.DecimalField(decimal_places=1,",
"default='For Sale', max_length=50)), ('price', models.IntegerField()), ('bedrooms', models.IntegerField()), ('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),",
"models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True",
"django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies =",
"verbose_name='ID')), ('slug', models.CharField(max_length=200, unique=True)), ('title', models.CharField(max_length=150)), ('address', models.CharField(default='', max_length=150)), ('city',",
"('slug', models.CharField(max_length=200, unique=True)), ('title', models.CharField(max_length=150)), ('address', models.CharField(default='', max_length=150)), ('city', models.CharField(max_length=100)),",
"Sale', 'For Sale'), ('For Rent', 'For Rent')], default='For Sale', max_length=50)),",
"Sale', max_length=50)), ('price', models.IntegerField()), ('bedrooms', models.IntegerField()), ('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)), ('home_type',",
"max_digits=2)), ('home_type', models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'), ('Townhouse', 'Townhouse')], default='House', max_length=50)),",
"True dependencies = [ ('realtors', '0001_initial'), ] operations = [",
"django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('realtors',",
"('realtors', '0001_initial'), ] operations = [ migrations.CreateModel( name='Listing', fields=[ ('id',",
"('city', models.CharField(max_length=100)), ('state', models.CharField(max_length=100)), ('zipcode', models.CharField(max_length=15)), ('description', models.TextField(blank=True)), ('sale_type', models.CharField(choices=[('For",
"Generated by Django 3.2.3 on 2021-05-30 04:28 from django.db import",
"'Condo'), ('Townhouse', 'Townhouse')], default='House', max_length=50)), ('sqft', models.IntegerField()), ('open_house', models.BooleanField(default=False)), ('photo_main',",
"[ ('realtors', '0001_initial'), ] operations = [ migrations.CreateModel( name='Listing', fields=[",
"models.CharField(max_length=150)), ('address', models.CharField(default='', max_length=150)), ('city', models.CharField(max_length=100)), ('state', models.CharField(max_length=100)), ('zipcode', models.CharField(max_length=15)),",
"migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial =",
"('Condo', 'Condo'), ('Townhouse', 'Townhouse')], default='House', max_length=50)), ('sqft', models.IntegerField()), ('open_house', models.BooleanField(default=False)),",
"Migration(migrations.Migration): initial = True dependencies = [ ('realtors', '0001_initial'), ]",
"max_length=50)), ('sqft', models.IntegerField()), ('open_house', models.BooleanField(default=False)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),",
"models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('is_published', models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)),",
"Rent', 'For Rent')], default='For Sale', max_length=50)), ('price', models.IntegerField()), ('bedrooms', models.IntegerField()),",
"('address', models.CharField(default='', max_length=150)), ('city', models.CharField(max_length=100)), ('state', models.CharField(max_length=100)), ('zipcode', models.CharField(max_length=15)), ('description',",
"models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'), ('Townhouse', 'Townhouse')], default='House', max_length=50)), ('sqft', models.IntegerField()),",
"migrations.CreateModel( name='Listing', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.CharField(max_length=200,",
"('Townhouse', 'Townhouse')], default='House', max_length=50)), ('sqft', models.IntegerField()), ('open_house', models.BooleanField(default=False)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')),",
"by Django 3.2.3 on 2021-05-30 04:28 from django.db import migrations,",
"('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('is_published', models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True,",
"models.BooleanField(default=False)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('is_published',",
"('title', models.CharField(max_length=150)), ('address', models.CharField(default='', max_length=150)), ('city', models.CharField(max_length=100)), ('state', models.CharField(max_length=100)), ('zipcode',",
"name='Listing', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.CharField(max_length=200, unique=True)),",
"models.DecimalField(decimal_places=1, max_digits=2)), ('home_type', models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'), ('Townhouse', 'Townhouse')], default='House',",
"('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)), ('home_type', models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'), ('Townhouse', 'Townhouse')],",
"('home_type', models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'), ('Townhouse', 'Townhouse')], default='House', max_length=50)), ('sqft',",
"models.IntegerField()), ('bedrooms', models.IntegerField()), ('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)), ('home_type', models.CharField(choices=[('House', 'House'), ('Condo',",
"models.CharField(default='', max_length=150)), ('city', models.CharField(max_length=100)), ('state', models.CharField(max_length=100)), ('zipcode', models.CharField(max_length=15)), ('description', models.TextField(blank=True)),",
"django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration):",
"('state', models.CharField(max_length=100)), ('zipcode', models.CharField(max_length=15)), ('description', models.TextField(blank=True)), ('sale_type', models.CharField(choices=[('For Sale', 'For",
"('is_published', models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)), ('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')), ], ),",
"class Migration(migrations.Migration): initial = True dependencies = [ ('realtors', '0001_initial'),",
"max_length=150)), ('city', models.CharField(max_length=100)), ('state', models.CharField(max_length=100)), ('zipcode', models.CharField(max_length=15)), ('description', models.TextField(blank=True)), ('sale_type',",
"models.IntegerField()), ('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)), ('home_type', models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'), ('Townhouse',",
"import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial",
"('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('is_published', models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)), ('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING,",
"'For Rent')], default='For Sale', max_length=50)), ('price', models.IntegerField()), ('bedrooms', models.IntegerField()), ('bathrooms',",
"upload_to='photos/%Y/%m/%d')), ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('is_published', models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)), ('realtor',",
"'For Sale'), ('For Rent', 'For Rent')], default='For Sale', max_length=50)), ('price',",
"serialize=False, verbose_name='ID')), ('slug', models.CharField(max_length=200, unique=True)), ('title', models.CharField(max_length=150)), ('address', models.CharField(default='', max_length=150)),",
"= True dependencies = [ ('realtors', '0001_initial'), ] operations =",
"import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [",
"operations = [ migrations.CreateModel( name='Listing', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False,",
"initial = True dependencies = [ ('realtors', '0001_initial'), ] operations",
"'House'), ('Condo', 'Condo'), ('Townhouse', 'Townhouse')], default='House', max_length=50)), ('sqft', models.IntegerField()), ('open_house',",
"upload_to='photos/%Y/%m/%d')), ('is_published', models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)), ('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')), ],",
"fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.CharField(max_length=200, unique=True)), ('title',",
"'0001_initial'), ] operations = [ migrations.CreateModel( name='Listing', fields=[ ('id', models.BigAutoField(auto_created=True,",
"('sqft', models.IntegerField()), ('open_house', models.BooleanField(default=False)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('photo_2',",
"default='House', max_length=50)), ('sqft', models.IntegerField()), ('open_house', models.BooleanField(default=False)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1', models.ImageField(blank=True,",
"('zipcode', models.CharField(max_length=15)), ('description', models.TextField(blank=True)), ('sale_type', models.CharField(choices=[('For Sale', 'For Sale'), ('For",
"04:28 from django.db import migrations, models import django.db.models.deletion import django.utils.timezone",
"= [ ('realtors', '0001_initial'), ] operations = [ migrations.CreateModel( name='Listing',",
"models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('is_published', models.BooleanField(default=True)), ('list_date',",
"from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class",
"[ migrations.CreateModel( name='Listing', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug',",
"models.CharField(max_length=15)), ('description', models.TextField(blank=True)), ('sale_type', models.CharField(choices=[('For Sale', 'For Sale'), ('For Rent',",
"max_length=50)), ('price', models.IntegerField()), ('bedrooms', models.IntegerField()), ('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)), ('home_type', models.CharField(choices=[('House',",
"models.CharField(choices=[('For Sale', 'For Sale'), ('For Rent', 'For Rent')], default='For Sale',",
"('description', models.TextField(blank=True)), ('sale_type', models.CharField(choices=[('For Sale', 'For Sale'), ('For Rent', 'For",
"('bedrooms', models.IntegerField()), ('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)), ('home_type', models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'),",
"2021-05-30 04:28 from django.db import migrations, models import django.db.models.deletion import",
"<filename>backend/listings/migrations/0001_initial.py # Generated by Django 3.2.3 on 2021-05-30 04:28 from",
"models.IntegerField()), ('open_house', models.BooleanField(default=False)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('photo_2', models.ImageField(blank=True,",
"models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('is_published', models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)), ('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')),",
"import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies",
"] operations = [ migrations.CreateModel( name='Listing', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True,",
"# Generated by Django 3.2.3 on 2021-05-30 04:28 from django.db",
"('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.CharField(max_length=200, unique=True)), ('title', models.CharField(max_length=150)),",
"('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')), ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')), ('is_published', models.BooleanField(default=True)),",
"on 2021-05-30 04:28 from django.db import migrations, models import django.db.models.deletion",
"models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('slug', models.CharField(max_length=200, unique=True)), ('title', models.CharField(max_length=150)), ('address',",
"('For Rent', 'For Rent')], default='For Sale', max_length=50)), ('price', models.IntegerField()), ('bedrooms',",
"models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)), ('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')), ], ), ]",
"unique=True)), ('title', models.CharField(max_length=150)), ('address', models.CharField(default='', max_length=150)), ('city', models.CharField(max_length=100)), ('state', models.CharField(max_length=100)),",
"models.CharField(max_length=200, unique=True)), ('title', models.CharField(max_length=150)), ('address', models.CharField(default='', max_length=150)), ('city', models.CharField(max_length=100)), ('state',"
] |
[
"add to the bot. async def on_message(message): if not message.author.bot:",
"to the bot. async def on_message(message): if not message.author.bot: await",
"already created with the function from discordEasy.objects import Listener async",
"function from discordEasy.objects import Listener async def on_message(message): if not",
"a function to add to the bot. async def on_message(message):",
"created with the function from discordEasy.objects import Listener async def",
"from discordEasy.objects import Listener async def on_message(message): if not message.author.bot:",
"Listener async def on_message(message): if not message.author.bot: await message.channel.send(f\"{message.author.mention} a",
"the bot. async def on_message(message): if not message.author.bot: await message.channel.send(f\"{message.author.mention}",
"async def on_message(message): if not message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé",
"with the function from discordEasy.objects import Listener async def on_message(message):",
"message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé un message!\") listener_on_message = Listener(on_message)",
"function to add to the bot. async def on_message(message): if",
"bot. async def on_message(message): if not message.author.bot: await message.channel.send(f\"{message.author.mention} a",
"await message.channel.send(f\"{message.author.mention} a envoyé un message!\") # A Listener already",
"with a function to add to the bot. async def",
"import asyncio import discord # Just with a function to",
"if not message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé un message!\") listener_on_message",
"to add to the bot. async def on_message(message): if not",
"asyncio import discord # Just with a function to add",
"import discord # Just with a function to add to",
"# A Listener already created with the function from discordEasy.objects",
"message.channel.send(f\"{message.author.mention} a envoyé un message!\") # A Listener already created",
"# Just with a function to add to the bot.",
"message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé un message!\") # A Listener",
"the function from discordEasy.objects import Listener async def on_message(message): if",
"import Listener async def on_message(message): if not message.author.bot: await message.channel.send(f\"{message.author.mention}",
"discord # Just with a function to add to the",
"def on_message(message): if not message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé un",
"if not message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé un message!\") #",
"message!\") # A Listener already created with the function from",
"envoyé un message!\") # A Listener already created with the",
"un message!\") # A Listener already created with the function",
"Listener already created with the function from discordEasy.objects import Listener",
"Just with a function to add to the bot. async",
"not message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé un message!\") listener_on_message =",
"discordEasy.objects import Listener async def on_message(message): if not message.author.bot: await",
"not message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé un message!\") # A",
"on_message(message): if not message.author.bot: await message.channel.send(f\"{message.author.mention} a envoyé un message!\")",
"a envoyé un message!\") # A Listener already created with",
"A Listener already created with the function from discordEasy.objects import"
] |
[
"'high', 'highest'] \"\"\" seed_midi_file_data = request.form.get('seed_midi_file_data') if seed_midi_file_data == None:",
"x in seed_midi_file_data.split(',')] frame = bytearray() for i in seed_midi_file_int_array:",
"string is 'midi_file_name', such as 1234.midi \"\"\" performance_midi_file_name = request.args.get('midi_file_name')",
"get_random_midi_file_name()) with open(saved_seed_midi_file_path, 'wb') as midi_file: midi_file.write(frame) seconds_to_generate = request.form.get('seconds_to_generate')",
"\"\"\" performance_midi_file_name = request.args.get('midi_file_name') performance_midi_file_name = secure_filename(performance_midi_file_name) print(performance_midi_file_name) if performance_midi_file_name",
"if seed_midi_file_data == None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\":",
"form data as follows: seed_midi_file_data: Midi file that forms the",
"full path to performaqnce midi file given a file name.",
"Number of seconds of new notes to generate model_complexity: Quality",
"forms the seed for a performance as string encoding like",
"os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name()) with open(saved_seed_midi_file_path, 'wb') as midi_file: midi_file.write(frame)",
"model_complexity: Quality of model to use, one of ['low', 'medium',",
"return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seconds_to_generate not found in",
"if not os.path.exists(midi_file_path): return { \"http_code\": 404, \"code\": \"Not Found\",",
"10000000000000000000)) + \".midi\" def get_performance_path(midi_file_name): \"\"\" Returns full path to",
"\"/app/\" # base_path = \"/Users/angsten/PycharmProjects/pianonet\" performances_path = os.path.join(base_path, 'data', 'performances')",
"use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends() final_pianoroll = get_performance_from_pianoroll( pianoroll_seed=input_pianoroll, num_time_steps=int(48 * seconds_to_generate), model_path=model_path,",
"return str(random.randint(0, 10000000000000000000)) + \".midi\" def get_performance_path(midi_file_name): \"\"\" Returns full",
"= Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends() final_pianoroll = get_performance_from_pianoroll( pianoroll_seed=input_pianoroll, num_time_steps=int(48 *",
"open(midi_file_path, 'rb') as midi_file: return send_from_directory(performances_path, performance_midi_file_name) @app.route('/create-performance', methods=['POST']) def",
"\"\"\" Get a random midi file name that will not",
"\"http_code\": 404, \"code\": \"Not Found\", \"message\": \"midi_file \" + performance_midi_file_name",
"else: model_name = \"r9p0_3500kparams_approx_9_blocks_model\" model_path = os.path.join(base_path, 'models', model_name) input_pianoroll",
"'low': model_name = \"micro_1\" else: model_name = \"r9p0_3500kparams_approx_9_blocks_model\" model_path =",
"performance_midi_file_name == None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"midi_file_name",
"performance_midi_file_name = secure_filename(performance_midi_file_name) print(performance_midi_file_name) if performance_midi_file_name == None: return {\"http_code\":",
"midi_file_name = get_random_midi_file_name() midi_file_path = get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path) return {\"http_code\": 200,",
"to use, one of ['low', 'medium', 'high', 'highest'] \"\"\" seed_midi_file_data",
"in request.\"} else: seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')]",
"None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seconds_to_generate not found",
"num_time_steps=int(48 * seconds_to_generate), model_path=model_path, ) midi_file_name = get_random_midi_file_name() midi_file_path =",
"= secure_filename(performance_midi_file_name) print(performance_midi_file_name) if performance_midi_file_name == None: return {\"http_code\": 400,",
"use, one of ['low', 'medium', 'high', 'highest'] \"\"\" seed_midi_file_data =",
"is 'midi_file_name', such as 1234.midi \"\"\" performance_midi_file_name = request.args.get('midi_file_name') performance_midi_file_name",
"'data', 'performances') def get_random_midi_file_name(): \"\"\" Get a random midi file",
"found in request.\"} else: seconds_to_generate = float(seconds_to_generate) model_complexity = request.form.get('model_complexity',",
"@app.route('/') def alive(): return 'OK' @app.route('/performances/', methods=['GET']) def get_performance(): \"\"\"",
"seconds_to_generate: Number of seconds of new notes to generate model_complexity:",
"Flask, request, send_from_directory from werkzeug.utils import secure_filename from pianonet.core.pianoroll import",
"'highest'] \"\"\" seed_midi_file_data = request.form.get('seed_midi_file_data') if seed_midi_file_data == None: return",
"one of ['low', 'medium', 'high', 'highest'] \"\"\" seed_midi_file_data = request.form.get('seed_midi_file_data')",
"print(performance_midi_file_name) if performance_midi_file_name == None: return {\"http_code\": 400, \"code\": \"BadRequest\",",
"base_path = \"/Users/angsten/PycharmProjects/pianonet\" performances_path = os.path.join(base_path, 'data', 'performances') def get_random_midi_file_name():",
"model_name = \"micro_1\" else: model_name = \"r9p0_3500kparams_approx_9_blocks_model\" model_path = os.path.join(base_path,",
"import get_performance_from_pianoroll app = Flask(__name__) base_path = \"/app/\" # base_path",
"= request.args.get('midi_file_name') performance_midi_file_name = secure_filename(performance_midi_file_name) print(performance_midi_file_name) if performance_midi_file_name == None:",
"for i in seed_midi_file_int_array: frame.append(i) saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds',",
"return {\"http_code\": 200, \"code\": \"Success\", \"message\": \"\", \"midi_file_name\": midi_file_name} if",
"input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends() final_pianoroll = get_performance_from_pianoroll( pianoroll_seed=input_pianoroll, num_time_steps=int(48",
"400, \"code\": \"BadRequest\", \"message\": \"midi_file_name not found in request.\"} midi_file_path",
"methods=['POST']) def performance(): \"\"\" Expects post form data as follows:",
"that forms the seed for a performance as string encoding",
"None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"midi_file_name not found",
"+ performance_midi_file_name + \" not found.\" } with open(midi_file_path, 'rb')",
"in seed_midi_file_int_array: frame.append(i) saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name()) with",
"+ \".midi\" def get_performance_path(midi_file_name): \"\"\" Returns full path to performaqnce",
"\"code\": \"BadRequest\", \"message\": \"seconds_to_generate not found in request.\"} else: seconds_to_generate",
"not os.path.exists(midi_file_path): return { \"http_code\": 404, \"code\": \"Not Found\", \"message\":",
"os.path.join(base_path, 'data', 'performances') def get_random_midi_file_name(): \"\"\" Get a random midi",
"'wb') as midi_file: midi_file.write(frame) seconds_to_generate = request.form.get('seconds_to_generate') if seconds_to_generate ==",
"\"8,2,3,4,5...\" seconds_to_generate: Number of seconds of new notes to generate",
"None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seed_midi_file_data not found",
"performance as midi file. Expected query string is 'midi_file_name', such",
"= float(seconds_to_generate) model_complexity = request.form.get('model_complexity', 'low') if model_complexity == 'low':",
"midi file. Expected query string is 'midi_file_name', such as 1234.midi",
"secure_filename from pianonet.core.pianoroll import Pianoroll from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll app",
"import os import random from flask import Flask, request, send_from_directory",
"will not ever collide. \"\"\" return str(random.randint(0, 10000000000000000000)) + \".midi\"",
"not ever collide. \"\"\" return str(random.randint(0, 10000000000000000000)) + \".midi\" def",
"\"BadRequest\", \"message\": \"seconds_to_generate not found in request.\"} else: seconds_to_generate =",
"if model_complexity == 'low': model_name = \"micro_1\" else: model_name =",
"400, \"code\": \"BadRequest\", \"message\": \"seconds_to_generate not found in request.\"} else:",
"collide. \"\"\" return str(random.randint(0, 10000000000000000000)) + \".midi\" def get_performance_path(midi_file_name): \"\"\"",
"Expected query string is 'midi_file_name', such as 1234.midi \"\"\" performance_midi_file_name",
"'medium', 'high', 'highest'] \"\"\" seed_midi_file_data = request.form.get('seed_midi_file_data') if seed_midi_file_data ==",
"== None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"midi_file_name not",
"== None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seed_midi_file_data not",
"frame.append(i) saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name()) with open(saved_seed_midi_file_path, 'wb')",
"'rb') as midi_file: return send_from_directory(performances_path, performance_midi_file_name) @app.route('/create-performance', methods=['POST']) def performance():",
"request.form.get('seconds_to_generate') if seconds_to_generate == None: return {\"http_code\": 400, \"code\": \"BadRequest\",",
"model_path = os.path.join(base_path, 'models', model_name) input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends()",
"given a file name. \"\"\" return os.path.join(performances_path, midi_file_name) @app.route('/') def",
"\" not found.\" } with open(midi_file_path, 'rb') as midi_file: return",
"if performance_midi_file_name == None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\":",
"\"micro_1\" else: model_name = \"r9p0_3500kparams_approx_9_blocks_model\" model_path = os.path.join(base_path, 'models', model_name)",
"\"\"\" return str(random.randint(0, 10000000000000000000)) + \".midi\" def get_performance_path(midi_file_name): \"\"\" Returns",
"@app.route('/create-performance', methods=['POST']) def performance(): \"\"\" Expects post form data as",
"{\"http_code\": 200, \"code\": \"Success\", \"message\": \"\", \"midi_file_name\": midi_file_name} if __name__",
"get_performance_from_pianoroll( pianoroll_seed=input_pianoroll, num_time_steps=int(48 * seconds_to_generate), model_path=model_path, ) midi_file_name = get_random_midi_file_name()",
"query string is 'midi_file_name', such as 1234.midi \"\"\" performance_midi_file_name =",
"def get_random_midi_file_name(): \"\"\" Get a random midi file name that",
"= request.form.get('model_complexity', 'low') if model_complexity == 'low': model_name = \"micro_1\"",
"seconds of new notes to generate model_complexity: Quality of model",
"Returns full path to performaqnce midi file given a file",
"string encoding like \"8,2,3,4,5...\" seconds_to_generate: Number of seconds of new",
"= get_random_midi_file_name() midi_file_path = get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path) return {\"http_code\": 200, \"code\":",
"\"BadRequest\", \"message\": \"midi_file_name not found in request.\"} midi_file_path = get_performance_path(performance_midi_file_name)",
"= \"r9p0_3500kparams_approx_9_blocks_model\" model_path = os.path.join(base_path, 'models', model_name) input_pianoroll = Pianoroll(saved_seed_midi_file_path,",
"with open(midi_file_path, 'rb') as midi_file: return send_from_directory(performances_path, performance_midi_file_name) @app.route('/create-performance', methods=['POST'])",
"file. Expected query string is 'midi_file_name', such as 1234.midi \"\"\"",
"'performances') def get_random_midi_file_name(): \"\"\" Get a random midi file name",
"seconds_to_generate == None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seconds_to_generate",
"else: seconds_to_generate = float(seconds_to_generate) model_complexity = request.form.get('model_complexity', 'low') if model_complexity",
"= request.form.get('seconds_to_generate') if seconds_to_generate == None: return {\"http_code\": 400, \"code\":",
"that will not ever collide. \"\"\" return str(random.randint(0, 10000000000000000000)) +",
"Pianoroll from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll app = Flask(__name__) base_path =",
"= request.form.get('seed_midi_file_data') if seed_midi_file_data == None: return {\"http_code\": 400, \"code\":",
"of ['low', 'medium', 'high', 'highest'] \"\"\" seed_midi_file_data = request.form.get('seed_midi_file_data') if",
"seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')] frame = bytearray()",
"model to use, one of ['low', 'medium', 'high', 'highest'] \"\"\"",
"model_complexity == 'low': model_name = \"micro_1\" else: model_name = \"r9p0_3500kparams_approx_9_blocks_model\"",
"in request.\"} midi_file_path = get_performance_path(performance_midi_file_name) if not os.path.exists(midi_file_path): return {",
"{\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seconds_to_generate not found in request.\"}",
"name that will not ever collide. \"\"\" return str(random.randint(0, 10000000000000000000))",
"a random midi file name that will not ever collide.",
"get_random_midi_file_name() midi_file_path = get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path) return {\"http_code\": 200, \"code\": \"Success\",",
"request.\"} midi_file_path = get_performance_path(performance_midi_file_name) if not os.path.exists(midi_file_path): return { \"http_code\":",
"performance(): \"\"\" Expects post form data as follows: seed_midi_file_data: Midi",
"\"r9p0_3500kparams_approx_9_blocks_model\" model_path = os.path.join(base_path, 'models', model_name) input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True)",
"methods=['GET']) def get_performance(): \"\"\" Returns the requested performance as midi",
"model_path=model_path, ) midi_file_name = get_random_midi_file_name() midi_file_path = get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path) return",
"open(saved_seed_midi_file_path, 'wb') as midi_file: midi_file.write(frame) seconds_to_generate = request.form.get('seconds_to_generate') if seconds_to_generate",
"\"midi_file_name not found in request.\"} midi_file_path = get_performance_path(performance_midi_file_name) if not",
"i in seed_midi_file_int_array: frame.append(i) saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name())",
"performance_midi_file_name + \" not found.\" } with open(midi_file_path, 'rb') as",
"{\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seed_midi_file_data not found in request.\"}",
"# base_path = \"/Users/angsten/PycharmProjects/pianonet\" performances_path = os.path.join(base_path, 'data', 'performances') def",
"name. \"\"\" return os.path.join(performances_path, midi_file_name) @app.route('/') def alive(): return 'OK'",
"from pianonet.core.pianoroll import Pianoroll from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll app =",
"\"Not Found\", \"message\": \"midi_file \" + performance_midi_file_name + \" not",
"for a performance as string encoding like \"8,2,3,4,5...\" seconds_to_generate: Number",
"random from flask import Flask, request, send_from_directory from werkzeug.utils import",
"for x in seed_midi_file_data.split(',')] frame = bytearray() for i in",
"get_performance_path(performance_midi_file_name) if not os.path.exists(midi_file_path): return { \"http_code\": 404, \"code\": \"Not",
"frame = bytearray() for i in seed_midi_file_int_array: frame.append(i) saved_seed_midi_file_path =",
"ever collide. \"\"\" return str(random.randint(0, 10000000000000000000)) + \".midi\" def get_performance_path(midi_file_name):",
"return 'OK' @app.route('/performances/', methods=['GET']) def get_performance(): \"\"\" Returns the requested",
"return send_from_directory(performances_path, performance_midi_file_name) @app.route('/create-performance', methods=['POST']) def performance(): \"\"\" Expects post",
"\"\"\" Expects post form data as follows: seed_midi_file_data: Midi file",
"get_random_midi_file_name(): \"\"\" Get a random midi file name that will",
"path to performaqnce midi file given a file name. \"\"\"",
"input_pianoroll.trim_silence_off_ends() final_pianoroll = get_performance_from_pianoroll( pianoroll_seed=input_pianoroll, num_time_steps=int(48 * seconds_to_generate), model_path=model_path, )",
"os.path.join(performances_path, midi_file_name) @app.route('/') def alive(): return 'OK' @app.route('/performances/', methods=['GET']) def",
"as midi_file: return send_from_directory(performances_path, performance_midi_file_name) @app.route('/create-performance', methods=['POST']) def performance(): \"\"\"",
"generate model_complexity: Quality of model to use, one of ['low',",
"= get_performance_path(performance_midi_file_name) if not os.path.exists(midi_file_path): return { \"http_code\": 404, \"code\":",
"request.args.get('midi_file_name') performance_midi_file_name = secure_filename(performance_midi_file_name) print(performance_midi_file_name) if performance_midi_file_name == None: return",
"Expects post form data as follows: seed_midi_file_data: Midi file that",
"file name that will not ever collide. \"\"\" return str(random.randint(0,",
"= Flask(__name__) base_path = \"/app/\" # base_path = \"/Users/angsten/PycharmProjects/pianonet\" performances_path",
"model_complexity = request.form.get('model_complexity', 'low') if model_complexity == 'low': model_name =",
"performance as string encoding like \"8,2,3,4,5...\" seconds_to_generate: Number of seconds",
"} with open(midi_file_path, 'rb') as midi_file: return send_from_directory(performances_path, performance_midi_file_name) @app.route('/create-performance',",
"final_pianoroll = get_performance_from_pianoroll( pianoroll_seed=input_pianoroll, num_time_steps=int(48 * seconds_to_generate), model_path=model_path, ) midi_file_name",
"\"message\": \"midi_file \" + performance_midi_file_name + \" not found.\" }",
"seconds_to_generate = float(seconds_to_generate) model_complexity = request.form.get('model_complexity', 'low') if model_complexity ==",
"seconds_to_generate = request.form.get('seconds_to_generate') if seconds_to_generate == None: return {\"http_code\": 400,",
"\".midi\" def get_performance_path(midi_file_name): \"\"\" Returns full path to performaqnce midi",
"\"\"\" seed_midi_file_data = request.form.get('seed_midi_file_data') if seed_midi_file_data == None: return {\"http_code\":",
"seed_midi_file_data = request.form.get('seed_midi_file_data') if seed_midi_file_data == None: return {\"http_code\": 400,",
"\"code\": \"BadRequest\", \"message\": \"seed_midi_file_data not found in request.\"} else: seed_midi_file_int_array",
"seed_midi_file_data == None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seed_midi_file_data",
"of seconds of new notes to generate model_complexity: Quality of",
"as midi file. Expected query string is 'midi_file_name', such as",
"base_path = \"/app/\" # base_path = \"/Users/angsten/PycharmProjects/pianonet\" performances_path = os.path.join(base_path,",
"* seconds_to_generate), model_path=model_path, ) midi_file_name = get_random_midi_file_name() midi_file_path = get_performance_path(midi_file_name)",
"\"/Users/angsten/PycharmProjects/pianonet\" performances_path = os.path.join(base_path, 'data', 'performances') def get_random_midi_file_name(): \"\"\" Get",
"found in request.\"} midi_file_path = get_performance_path(performance_midi_file_name) if not os.path.exists(midi_file_path): return",
"@app.route('/performances/', methods=['GET']) def get_performance(): \"\"\" Returns the requested performance as",
"not found in request.\"} midi_file_path = get_performance_path(performance_midi_file_name) if not os.path.exists(midi_file_path):",
"pianonet.core.pianoroll import Pianoroll from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll app = Flask(__name__)",
"\" + performance_midi_file_name + \" not found.\" } with open(midi_file_path,",
"import Pianoroll from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll app = Flask(__name__) base_path",
"app = Flask(__name__) base_path = \"/app/\" # base_path = \"/Users/angsten/PycharmProjects/pianonet\"",
"= get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path) return {\"http_code\": 200, \"code\": \"Success\", \"message\": \"\",",
"a file name. \"\"\" return os.path.join(performances_path, midi_file_name) @app.route('/') def alive():",
"return os.path.join(performances_path, midi_file_name) @app.route('/') def alive(): return 'OK' @app.route('/performances/', methods=['GET'])",
"as follows: seed_midi_file_data: Midi file that forms the seed for",
"file given a file name. \"\"\" return os.path.join(performances_path, midi_file_name) @app.route('/')",
"such as 1234.midi \"\"\" performance_midi_file_name = request.args.get('midi_file_name') performance_midi_file_name = secure_filename(performance_midi_file_name)",
"request.form.get('seed_midi_file_data') if seed_midi_file_data == None: return {\"http_code\": 400, \"code\": \"BadRequest\",",
"Returns the requested performance as midi file. Expected query string",
"from flask import Flask, request, send_from_directory from werkzeug.utils import secure_filename",
"performance_midi_file_name) @app.route('/create-performance', methods=['POST']) def performance(): \"\"\" Expects post form data",
"def get_performance_path(midi_file_name): \"\"\" Returns full path to performaqnce midi file",
"{\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"midi_file_name not found in request.\"}",
"seed_midi_file_data: Midi file that forms the seed for a performance",
"midi_file_name) @app.route('/') def alive(): return 'OK' @app.route('/performances/', methods=['GET']) def get_performance():",
"['low', 'medium', 'high', 'highest'] \"\"\" seed_midi_file_data = request.form.get('seed_midi_file_data') if seed_midi_file_data",
"\"Success\", \"message\": \"\", \"midi_file_name\": midi_file_name} if __name__ == '__main__': app.run(host='0.0.0.0')",
"not found in request.\"} else: seed_midi_file_int_array = [int(x) for x",
"with open(saved_seed_midi_file_path, 'wb') as midi_file: midi_file.write(frame) seconds_to_generate = request.form.get('seconds_to_generate') if",
"def alive(): return 'OK' @app.route('/performances/', methods=['GET']) def get_performance(): \"\"\" Returns",
"'seeds', get_random_midi_file_name()) with open(saved_seed_midi_file_path, 'wb') as midi_file: midi_file.write(frame) seconds_to_generate =",
"return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seed_midi_file_data not found in",
"= [int(x) for x in seed_midi_file_data.split(',')] frame = bytearray() for",
"{ \"http_code\": 404, \"code\": \"Not Found\", \"message\": \"midi_file \" +",
"performaqnce midi file given a file name. \"\"\" return os.path.join(performances_path,",
"follows: seed_midi_file_data: Midi file that forms the seed for a",
"if seconds_to_generate == None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\":",
"new notes to generate model_complexity: Quality of model to use,",
"encoding like \"8,2,3,4,5...\" seconds_to_generate: Number of seconds of new notes",
"like \"8,2,3,4,5...\" seconds_to_generate: Number of seconds of new notes to",
"= get_performance_from_pianoroll( pianoroll_seed=input_pianoroll, num_time_steps=int(48 * seconds_to_generate), model_path=model_path, ) midi_file_name =",
"Flask(__name__) base_path = \"/app/\" # base_path = \"/Users/angsten/PycharmProjects/pianonet\" performances_path =",
"found.\" } with open(midi_file_path, 'rb') as midi_file: return send_from_directory(performances_path, performance_midi_file_name)",
"def get_performance(): \"\"\" Returns the requested performance as midi file.",
"\"midi_file \" + performance_midi_file_name + \" not found.\" } with",
"seed for a performance as string encoding like \"8,2,3,4,5...\" seconds_to_generate:",
"get_performance_from_pianoroll app = Flask(__name__) base_path = \"/app/\" # base_path =",
"alive(): return 'OK' @app.route('/performances/', methods=['GET']) def get_performance(): \"\"\" Returns the",
"of new notes to generate model_complexity: Quality of model to",
"request.\"} else: seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')] frame",
"get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path) return {\"http_code\": 200, \"code\": \"Success\", \"message\": \"\", \"midi_file_name\":",
"str(random.randint(0, 10000000000000000000)) + \".midi\" def get_performance_path(midi_file_name): \"\"\" Returns full path",
"model_name) input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends() final_pianoroll = get_performance_from_pianoroll( pianoroll_seed=input_pianoroll,",
"import random from flask import Flask, request, send_from_directory from werkzeug.utils",
"\"message\": \"midi_file_name not found in request.\"} midi_file_path = get_performance_path(performance_midi_file_name) if",
"\"seconds_to_generate not found in request.\"} else: seconds_to_generate = float(seconds_to_generate) model_complexity",
"= os.path.join(base_path, 'models', model_name) input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends() final_pianoroll",
"request, send_from_directory from werkzeug.utils import secure_filename from pianonet.core.pianoroll import Pianoroll",
"pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll app = Flask(__name__) base_path = \"/app/\" #",
"seed_midi_file_int_array: frame.append(i) saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name()) with open(saved_seed_midi_file_path,",
"200, \"code\": \"Success\", \"message\": \"\", \"midi_file_name\": midi_file_name} if __name__ ==",
"def performance(): \"\"\" Expects post form data as follows: seed_midi_file_data:",
") midi_file_name = get_random_midi_file_name() midi_file_path = get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path) return {\"http_code\":",
"seconds_to_generate), model_path=model_path, ) midi_file_name = get_random_midi_file_name() midi_file_path = get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path)",
"= \"micro_1\" else: model_name = \"r9p0_3500kparams_approx_9_blocks_model\" model_path = os.path.join(base_path, 'models',",
"midi_file_path = get_performance_path(midi_file_name) final_pianoroll.save_to_midi_file(midi_file_path) return {\"http_code\": 200, \"code\": \"Success\", \"message\":",
"to performaqnce midi file given a file name. \"\"\" return",
"midi_file: return send_from_directory(performances_path, performance_midi_file_name) @app.route('/create-performance', methods=['POST']) def performance(): \"\"\" Expects",
"as string encoding like \"8,2,3,4,5...\" seconds_to_generate: Number of seconds of",
"\"seed_midi_file_data not found in request.\"} else: seed_midi_file_int_array = [int(x) for",
"\"\"\" Returns full path to performaqnce midi file given a",
"+ \" not found.\" } with open(midi_file_path, 'rb') as midi_file:",
"file name. \"\"\" return os.path.join(performances_path, midi_file_name) @app.route('/') def alive(): return",
"midi file name that will not ever collide. \"\"\" return",
"Get a random midi file name that will not ever",
"performance_midi_file_name = request.args.get('midi_file_name') performance_midi_file_name = secure_filename(performance_midi_file_name) print(performance_midi_file_name) if performance_midi_file_name ==",
"os.path.join(base_path, 'models', model_name) input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends() final_pianoroll =",
"random midi file name that will not ever collide. \"\"\"",
"= bytearray() for i in seed_midi_file_int_array: frame.append(i) saved_seed_midi_file_path = os.path.join(base_path,",
"= \"/Users/angsten/PycharmProjects/pianonet\" performances_path = os.path.join(base_path, 'data', 'performances') def get_random_midi_file_name(): \"\"\"",
"\"message\": \"seed_midi_file_data not found in request.\"} else: seed_midi_file_int_array = [int(x)",
"'midi_file_name', such as 1234.midi \"\"\" performance_midi_file_name = request.args.get('midi_file_name') performance_midi_file_name =",
"requested performance as midi file. Expected query string is 'midi_file_name',",
"data as follows: seed_midi_file_data: Midi file that forms the seed",
"performances_path = os.path.join(base_path, 'data', 'performances') def get_random_midi_file_name(): \"\"\" Get a",
"\"\"\" return os.path.join(performances_path, midi_file_name) @app.route('/') def alive(): return 'OK' @app.route('/performances/',",
"\"code\": \"Not Found\", \"message\": \"midi_file \" + performance_midi_file_name + \"",
"from werkzeug.utils import secure_filename from pianonet.core.pianoroll import Pianoroll from pianonet.model_inspection.performance_from_pianoroll",
"os import random from flask import Flask, request, send_from_directory from",
"400, \"code\": \"BadRequest\", \"message\": \"seed_midi_file_data not found in request.\"} else:",
"== 'low': model_name = \"micro_1\" else: model_name = \"r9p0_3500kparams_approx_9_blocks_model\" model_path",
"midi file given a file name. \"\"\" return os.path.join(performances_path, midi_file_name)",
"from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll app = Flask(__name__) base_path = \"/app/\"",
"in seed_midi_file_data.split(',')] frame = bytearray() for i in seed_midi_file_int_array: frame.append(i)",
"final_pianoroll.save_to_midi_file(midi_file_path) return {\"http_code\": 200, \"code\": \"Success\", \"message\": \"\", \"midi_file_name\": midi_file_name}",
"'models', model_name) input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends() final_pianoroll = get_performance_from_pianoroll(",
"'low') if model_complexity == 'low': model_name = \"micro_1\" else: model_name",
"a performance as string encoding like \"8,2,3,4,5...\" seconds_to_generate: Number of",
"pianoroll_seed=input_pianoroll, num_time_steps=int(48 * seconds_to_generate), model_path=model_path, ) midi_file_name = get_random_midi_file_name() midi_file_path",
"float(seconds_to_generate) model_complexity = request.form.get('model_complexity', 'low') if model_complexity == 'low': model_name",
"Midi file that forms the seed for a performance as",
"= os.path.join(base_path, 'data', 'performances') def get_random_midi_file_name(): \"\"\" Get a random",
"1234.midi \"\"\" performance_midi_file_name = request.args.get('midi_file_name') performance_midi_file_name = secure_filename(performance_midi_file_name) print(performance_midi_file_name) if",
"\"message\": \"seconds_to_generate not found in request.\"} else: seconds_to_generate = float(seconds_to_generate)",
"found in request.\"} else: seed_midi_file_int_array = [int(x) for x in",
"model_name = \"r9p0_3500kparams_approx_9_blocks_model\" model_path = os.path.join(base_path, 'models', model_name) input_pianoroll =",
"\"code\": \"Success\", \"message\": \"\", \"midi_file_name\": midi_file_name} if __name__ == '__main__':",
"get_performance(): \"\"\" Returns the requested performance as midi file. Expected",
"the requested performance as midi file. Expected query string is",
"saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name()) with open(saved_seed_midi_file_path, 'wb') as",
"Quality of model to use, one of ['low', 'medium', 'high',",
"\"code\": \"BadRequest\", \"message\": \"midi_file_name not found in request.\"} midi_file_path =",
"post form data as follows: seed_midi_file_data: Midi file that forms",
"[int(x) for x in seed_midi_file_data.split(',')] frame = bytearray() for i",
"Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True) input_pianoroll.trim_silence_off_ends() final_pianoroll = get_performance_from_pianoroll( pianoroll_seed=input_pianoroll, num_time_steps=int(48 * seconds_to_generate),",
"import Flask, request, send_from_directory from werkzeug.utils import secure_filename from pianonet.core.pianoroll",
"the seed for a performance as string encoding like \"8,2,3,4,5...\"",
"notes to generate model_complexity: Quality of model to use, one",
"return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"midi_file_name not found in",
"bytearray() for i in seed_midi_file_int_array: frame.append(i) saved_seed_midi_file_path = os.path.join(base_path, 'data',",
"flask import Flask, request, send_from_directory from werkzeug.utils import secure_filename from",
"midi_file_path = get_performance_path(performance_midi_file_name) if not os.path.exists(midi_file_path): return { \"http_code\": 404,",
"request.form.get('model_complexity', 'low') if model_complexity == 'low': model_name = \"micro_1\" else:",
"file that forms the seed for a performance as string",
"'OK' @app.route('/performances/', methods=['GET']) def get_performance(): \"\"\" Returns the requested performance",
"midi_file: midi_file.write(frame) seconds_to_generate = request.form.get('seconds_to_generate') if seconds_to_generate == None: return",
"werkzeug.utils import secure_filename from pianonet.core.pianoroll import Pianoroll from pianonet.model_inspection.performance_from_pianoroll import",
"not found.\" } with open(midi_file_path, 'rb') as midi_file: return send_from_directory(performances_path,",
"Found\", \"message\": \"midi_file \" + performance_midi_file_name + \" not found.\"",
"404, \"code\": \"Not Found\", \"message\": \"midi_file \" + performance_midi_file_name +",
"get_performance_path(midi_file_name): \"\"\" Returns full path to performaqnce midi file given",
"midi_file.write(frame) seconds_to_generate = request.form.get('seconds_to_generate') if seconds_to_generate == None: return {\"http_code\":",
"\"BadRequest\", \"message\": \"seed_midi_file_data not found in request.\"} else: seed_midi_file_int_array =",
"\"\"\" Returns the requested performance as midi file. Expected query",
"request.\"} else: seconds_to_generate = float(seconds_to_generate) model_complexity = request.form.get('model_complexity', 'low') if",
"= \"/app/\" # base_path = \"/Users/angsten/PycharmProjects/pianonet\" performances_path = os.path.join(base_path, 'data',",
"os.path.exists(midi_file_path): return { \"http_code\": 404, \"code\": \"Not Found\", \"message\": \"midi_file",
"not found in request.\"} else: seconds_to_generate = float(seconds_to_generate) model_complexity =",
"'data', 'seeds', get_random_midi_file_name()) with open(saved_seed_midi_file_path, 'wb') as midi_file: midi_file.write(frame) seconds_to_generate",
"import secure_filename from pianonet.core.pianoroll import Pianoroll from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll",
"send_from_directory(performances_path, performance_midi_file_name) @app.route('/create-performance', methods=['POST']) def performance(): \"\"\" Expects post form",
"of model to use, one of ['low', 'medium', 'high', 'highest']",
"secure_filename(performance_midi_file_name) print(performance_midi_file_name) if performance_midi_file_name == None: return {\"http_code\": 400, \"code\":",
"as midi_file: midi_file.write(frame) seconds_to_generate = request.form.get('seconds_to_generate') if seconds_to_generate == None:",
"in request.\"} else: seconds_to_generate = float(seconds_to_generate) model_complexity = request.form.get('model_complexity', 'low')",
"return { \"http_code\": 404, \"code\": \"Not Found\", \"message\": \"midi_file \"",
"== None: return {\"http_code\": 400, \"code\": \"BadRequest\", \"message\": \"seconds_to_generate not",
"as 1234.midi \"\"\" performance_midi_file_name = request.args.get('midi_file_name') performance_midi_file_name = secure_filename(performance_midi_file_name) print(performance_midi_file_name)",
"= os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name()) with open(saved_seed_midi_file_path, 'wb') as midi_file:",
"to generate model_complexity: Quality of model to use, one of",
"else: seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')] frame =",
"seed_midi_file_data.split(',')] frame = bytearray() for i in seed_midi_file_int_array: frame.append(i) saved_seed_midi_file_path",
"send_from_directory from werkzeug.utils import secure_filename from pianonet.core.pianoroll import Pianoroll from"
] |
[
"score(token, algorithm, start_date, country, predict_range, s, i, r): headers =",
"port) logger.info('WML URL: %s', wml_base_url) logger.info('WML instance ID: %s', wml_instance_id)",
") fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total confirmed cases\", secondary_y=False, range=[0, 6000]) fig.update_yaxes(title_text=\"New cases",
"pd from datetime import datetime from datetime import timedelta from",
"wml_base_url = url._replace(path='').geturl() wml_instance_id = url.path.split('/')[3] logger.setLevel(logging.INFO if is_cf_instance else",
"%s', json.dumps(payload)) response = requests.post(wml_scoring_url, json=payload, headers=headers) if response.status_code ==",
"range=[0, 1000]) # fig = go.Figure( # data=[ # go.Scatter(x=days,",
"template=\"plotly_dark\", height=900 ) fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total confirmed cases\", secondary_y=False, range=[0, 6000])",
"logger.info('Scoring with payload: %s', json.dumps(payload)) response = requests.post(wml_scoring_url, json=payload, headers=headers)",
"import parse import requests logger = logging.getLogger(__name__) external_stylesheets = [dbc.themes.DARKLY]",
"8050)) host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key = os.environ['WML_API_KEY'] wml_scoring_url =",
"y=calibration_result['Actual'], name='Actual', mode=\"markers\", marker=dict(size=8)), secondary_y=False, ) fig.update_layout( title=\"Prediction of confirmed",
"make_subplots import logging import json import os import pandas as",
"from urllib import parse import requests logger = logging.getLogger(__name__) external_stylesheets",
"fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Scatter(x=days,",
"datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10) # days",
"if is_cf_instance else logging.DEBUG) logger.info('Starting %s server: %s:%d', 'CF' if",
"headers=headers) if response.status_code == 200: result = response.json() else: raise",
"list(calibration_result.index) calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0) calibration_result['PredictedChange'] = calibration_result['Predicted']",
"} iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token' def _get_token(): data = { 'grant_type':",
"[(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)] return pd.DataFrame(result['values'], columns=result['fields'],",
"\"S0\", \"I0\", \"R0\"], \"values\": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i,",
"= { \"apikey\": wml_api_key, \"instance_id\": wml_instance_id, \"url\": wml_base_url, } iam_token_endpoint",
"{ 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey'] } headers = {'Content-Type': 'application/x-www-form-urlencoded'}",
"logging.getLogger(__name__) external_stylesheets = [dbc.themes.DARKLY] is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != ''",
"as go from plotly.subplots import make_subplots import logging import json",
"fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode=\"markers\", marker=dict(size=8)), secondary_y=False, ) fig.update_layout( title=\"Prediction",
") fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'), secondary_y=False, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'],",
"wml_instance_id, \"url\": wml_base_url, } iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token' def _get_token(): data",
"import logging import json import os import pandas as pd",
"]) app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = serve_layout if __name__",
"Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'), secondary_y=False, )",
"s, i, r]] } logger.info('Scoring with payload: %s', json.dumps(payload)) response",
"go.Scatter(x=days, y=sir_result['I'], name='SIR'), # go.Scatter(x=days, y=logistic_result['I'], name='Logistic'), # ], #",
"response.text)) n_days = len(result['values']) index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for",
"secondary_y=False, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode=\"markers\", marker=dict(size=8)), secondary_y=False, )",
"wml_base_url) logger.info('WML instance ID: %s', wml_instance_id) wml_credentials = { \"apikey\":",
"dcc import dash_html_components as html import plotly.graph_objects as go from",
"10_000, 20, 10) # days = list(sir_result.index) days = list(calibration_result.index)",
"wml_credentials = { \"apikey\": wml_api_key, \"instance_id\": wml_instance_id, \"url\": wml_base_url, }",
"\"apikey\": wml_api_key, \"instance_id\": wml_instance_id, \"url\": wml_base_url, } iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token'",
"port = int(os.environ.get('PORT', 8050)) host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key =",
"Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5),",
"html import plotly.graph_objects as go from plotly.subplots import make_subplots import",
"1, 22), 'Poland', 40, 10_000, 20, 10) # days =",
"# ], # layout=go.Layout( # title=\"COVID19 infected prediction in Poland\",",
"secondary_y=True, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'), secondary_y=False, ) fig.add_trace( go.Scatter(x=days,",
"dash_core_components as dcc import dash_html_components as html import plotly.graph_objects as",
"if response.status_code == 200: result = response.json() else: raise Exception('Scoring",
"as dbc import dash_core_components as dcc import dash_html_components as html",
"payload: %s', json.dumps(payload)) response = requests.post(wml_scoring_url, json=payload, headers=headers) if response.status_code",
"index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)] return",
"plotly.graph_objects as go from plotly.subplots import make_subplots import logging import",
"go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'), secondary_y=False, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode=\"markers\",",
"40, 10_000, 20, 10) # days = list(sir_result.index) days =",
"day\", secondary_y=True, range=[0, 1000]) # fig = go.Figure( # data=[",
"return response.json()['access_token'] def score(token, algorithm, start_date, country, predict_range, s, i,",
"= {'Content-Type': 'application/x-www-form-urlencoded'} response = requests.post(iam_token_endpoint, data=data, headers=headers) return response.json()['access_token']",
"= url.path.split('/')[3] logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG) logger.info('Starting %s server:",
"mode=\"markers\", marker=dict(size=8)), secondary_y=False, ) fig.update_layout( title=\"Prediction of confirmed cases for",
"'Poland', 40, 10_000, 20, 10) # days = list(sir_result.index) days",
"pandas as pd from datetime import datetime from datetime import",
"= logging.getLogger(__name__) external_stylesheets = [dbc.themes.DARKLY] is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') !=",
"else logging.DEBUG) logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else",
"200: result = response.json() else: raise Exception('Scoring error [{}]: {}'.format(response.status_code,",
"predict_range = 14 # sir_result = score(token, 'SIR', datetime(2020, 3,",
"predict_range, 10_000, 20, 10) # logistic_result = score(token, 'LOGISTIC', datetime(2020,",
"urllib import parse import requests logger = logging.getLogger(__name__) external_stylesheets =",
"parse import requests logger = logging.getLogger(__name__) external_stylesheets = [dbc.themes.DARKLY] is_cf_instance",
"dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = serve_layout if __name__ == '__main__': app.run_server(debug=(not",
"'apikey': wml_credentials['apikey'] } headers = {'Content-Type': 'application/x-www-form-urlencoded'} response = requests.post(iam_token_endpoint,",
"fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'), secondary_y=False, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'], name='Actual',",
"as pd from datetime import datetime from datetime import timedelta",
"%s', wml_base_url) logger.info('WML instance ID: %s', wml_instance_id) wml_credentials = {",
"cases\", secondary_y=False, range=[0, 6000]) fig.update_yaxes(title_text=\"New cases per day\", secondary_y=True, range=[0,",
"if is_cf_instance else 'local', host, port) logger.info('WML URL: %s', wml_base_url)",
"app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = serve_layout if __name__ ==",
") fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5), secondary_y=True, ) fig.add_trace(",
"ID: %s', wml_instance_id) wml_credentials = { \"apikey\": wml_api_key, \"instance_id\": wml_instance_id,",
"def serve_layout(): token = _get_token() # predict_range = 14 #",
"'Poland', predict_range, 10_000, 20, 10) calibration_result = score(token, 'CALIBRATION', datetime(2020,",
"import os import pandas as pd from datetime import datetime",
") # ) return html.Div(children=[ html.H1(children='COVID-19 Predictions with Watson Machine",
"opacity=0.5), secondary_y=True, ) fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5), secondary_y=True,",
"start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]] } logger.info('Scoring with payload:",
"url = parse.urlparse(wml_scoring_url) wml_base_url = url._replace(path='').geturl() wml_instance_id = url.path.split('/')[3] logger.setLevel(logging.INFO",
"response.json() else: raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text)) n_days =",
"server: %s:%d', 'CF' if is_cf_instance else 'local', host, port) logger.info('WML",
"= requests.post(iam_token_endpoint, data=data, headers=headers) return response.json()['access_token'] def score(token, algorithm, start_date,",
"name='Logistic'), # ], # layout=go.Layout( # title=\"COVID19 infected prediction in",
"name='Actual', mode=\"markers\", marker=dict(size=8)), secondary_y=False, ) fig.update_layout( title=\"Prediction of confirmed cases",
"timedelta from urllib import parse import requests logger = logging.getLogger(__name__)",
"# ) # ) return html.Div(children=[ html.H1(children='COVID-19 Predictions with Watson",
"name='Actual Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'), secondary_y=False,",
"# height=600 # ) # ) return html.Div(children=[ html.H1(children='COVID-19 Predictions",
"go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'],",
"{ \"apikey\": wml_api_key, \"instance_id\": wml_instance_id, \"url\": wml_base_url, } iam_token_endpoint =",
"return pd.DataFrame(result['values'], columns=result['fields'], index=index) def serve_layout(): token = _get_token() #",
"= {'Authorization': 'Bearer ' + token} payload = { \"fields\":",
"import make_subplots import logging import json import os import pandas",
"payload = { \"fields\": [\"algorithm\", \"start_date\", \"country\", \"predict_range\", \"S0\", \"I0\",",
"layout=go.Layout( # title=\"COVID19 infected prediction in Poland\", # template=\"plotly_dark\", #",
"name='Calibration'), secondary_y=False, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode=\"markers\", marker=dict(size=8)), secondary_y=False,",
"%s server: %s:%d', 'CF' if is_cf_instance else 'local', host, port)",
"== 200: result = response.json() else: raise Exception('Scoring error [{}]:",
"host, port) logger.info('WML URL: %s', wml_base_url) logger.info('WML instance ID: %s',",
"3, 3), 'Poland', predict_range, 10_000, 20, 10) # logistic_result =",
"3), 'Poland', predict_range, 10_000, 20, 10) calibration_result = score(token, 'CALIBRATION',",
"height=900 ) fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total confirmed cases\", secondary_y=False, range=[0, 6000]) fig.update_yaxes(title_text=\"New",
"dcc.Graph( id='example-graph', figure=fig ) ]) app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout",
"= score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20,",
"i, r]] } logger.info('Scoring with payload: %s', json.dumps(payload)) response =",
"!= '' port = int(os.environ.get('PORT', 8050)) host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1')",
"start_date, country, predict_range, s, i, r): headers = {'Authorization': 'Bearer",
") fig.update_layout( title=\"Prediction of confirmed cases for Poland\", template=\"plotly_dark\", height=900",
"secondary_y=True, ) fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5), secondary_y=True, )",
"# go.Scatter(x=days, y=logistic_result['I'], name='Logistic'), # ], # layout=go.Layout( # title=\"COVID19",
"fill_value=0) calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0) fig = make_subplots(specs=[[{\"secondary_y\":",
"calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0) fig = make_subplots(specs=[[{\"secondary_y\": True}]])",
"20, 10) # logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3),",
"logger.info('WML URL: %s', wml_base_url) logger.info('WML instance ID: %s', wml_instance_id) wml_credentials",
"1000]) # fig = go.Figure( # data=[ # go.Scatter(x=days, y=sir_result['I'],",
"y=sir_result['I'], name='SIR'), # go.Scatter(x=days, y=logistic_result['I'], name='Logistic'), # ], # layout=go.Layout(",
"20, 10) calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland',",
"# template=\"plotly_dark\", # height=600 # ) # ) return html.Div(children=[",
"{}'.format(response.status_code, response.text)) n_days = len(result['values']) index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y')",
"title=\"Prediction of confirmed cases for Poland\", template=\"plotly_dark\", height=900 ) fig.update_xaxes(title_text=\"Date\")",
") return html.Div(children=[ html.H1(children='COVID-19 Predictions with Watson Machine Learning'), dcc.Graph(",
"3), 'Poland', predict_range, 10_000, 20, 10) # logistic_result = score(token,",
"infected prediction in Poland\", # template=\"plotly_dark\", # height=600 # )",
"timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)] return pd.DataFrame(result['values'], columns=result['fields'], index=index) def",
"14 # sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland',",
"= go.Figure( # data=[ # go.Scatter(x=days, y=sir_result['I'], name='SIR'), # go.Scatter(x=days,",
"error [{}]: {}'.format(response.status_code, response.text)) n_days = len(result['values']) index = [(start_date",
"wml_credentials['apikey'] } headers = {'Content-Type': 'application/x-www-form-urlencoded'} response = requests.post(iam_token_endpoint, data=data,",
"with payload: %s', json.dumps(payload)) response = requests.post(wml_scoring_url, json=payload, headers=headers) if",
"for i in range(n_days)] return pd.DataFrame(result['values'], columns=result['fields'], index=index) def serve_layout():",
"def _get_token(): data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey'] }",
"3, 3), 'Poland', predict_range, 10_000, 20, 10) calibration_result = score(token,",
"= calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0) fig = make_subplots(specs=[[{\"secondary_y\": True}]]) fig.add_trace(",
"return html.Div(children=[ html.H1(children='COVID-19 Predictions with Watson Machine Learning'), dcc.Graph( id='example-graph',",
"Learning'), dcc.Graph( id='example-graph', figure=fig ) ]) app = dash.Dash(__name__, external_stylesheets=external_stylesheets)",
"# logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range,",
"'CF' if is_cf_instance else 'local', host, port) logger.info('WML URL: %s',",
"secondary_y=True, range=[0, 1000]) # fig = go.Figure( # data=[ #",
"response.status_code == 200: result = response.json() else: raise Exception('Scoring error",
"\"values\": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]] } logger.info('Scoring",
"fig.update_yaxes(title_text=\"New cases per day\", secondary_y=True, range=[0, 1000]) # fig =",
"in Poland\", # template=\"plotly_dark\", # height=600 # ) # )",
"y=calibration_result['Predicted'], name='Calibration'), secondary_y=False, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode=\"markers\", marker=dict(size=8)),",
"from datetime import datetime from datetime import timedelta from urllib",
"{ \"fields\": [\"algorithm\", \"start_date\", \"country\", \"predict_range\", \"S0\", \"I0\", \"R0\"], \"values\":",
"logger = logging.getLogger(__name__) external_stylesheets = [dbc.themes.DARKLY] is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '')",
"%s:%d', 'CF' if is_cf_instance else 'local', host, port) logger.info('WML URL:",
"[dbc.themes.DARKLY] is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != '' port = int(os.environ.get('PORT',",
"data=[ # go.Scatter(x=days, y=sir_result['I'], name='SIR'), # go.Scatter(x=days, y=logistic_result['I'], name='Logistic'), #",
"go.Scatter(x=days, y=logistic_result['I'], name='Logistic'), # ], # layout=go.Layout( # title=\"COVID19 infected",
"os import pandas as pd from datetime import datetime from",
"logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG) logger.info('Starting %s server: %s:%d', 'CF'",
"as html import plotly.graph_objects as go from plotly.subplots import make_subplots",
"} headers = {'Content-Type': 'application/x-www-form-urlencoded'} response = requests.post(iam_token_endpoint, data=data, headers=headers)",
"n_days = len(result['values']) index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i",
"data=data, headers=headers) return response.json()['access_token'] def score(token, algorithm, start_date, country, predict_range,",
"10) # days = list(sir_result.index) days = list(calibration_result.index) calibration_result['ActualChange'] =",
"'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10) #",
"[{}]: {}'.format(response.status_code, response.text)) n_days = len(result['values']) index = [(start_date +",
"wml_api_key, \"instance_id\": wml_instance_id, \"url\": wml_base_url, } iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token' def",
"\"predict_range\", \"S0\", \"I0\", \"R0\"], \"values\": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s,",
"= [dbc.themes.DARKLY] is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != '' port =",
"fig.update_layout( title=\"Prediction of confirmed cases for Poland\", template=\"plotly_dark\", height=900 )",
"host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key = os.environ['WML_API_KEY'] wml_scoring_url = os.environ['WML_SCORING_URL']",
"cases per day\", secondary_y=True, range=[0, 1000]) # fig = go.Figure(",
"calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0) calibration_result['PredictedChange'] = calibration_result['Predicted'] -",
"html.Div(children=[ html.H1(children='COVID-19 Predictions with Watson Machine Learning'), dcc.Graph( id='example-graph', figure=fig",
"10) calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40,",
"'127.0.0.1') wml_api_key = os.environ['WML_API_KEY'] wml_scoring_url = os.environ['WML_SCORING_URL'] url = parse.urlparse(wml_scoring_url)",
"id='example-graph', figure=fig ) ]) app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout =",
"{'Content-Type': 'application/x-www-form-urlencoded'} response = requests.post(iam_token_endpoint, data=data, headers=headers) return response.json()['access_token'] def",
"'Bearer ' + token} payload = { \"fields\": [\"algorithm\", \"start_date\",",
"# fig = go.Figure( # data=[ # go.Scatter(x=days, y=sir_result['I'], name='SIR'),",
"logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000,",
"wml_instance_id = url.path.split('/')[3] logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG) logger.info('Starting %s",
"'local', host, port) logger.info('WML URL: %s', wml_base_url) logger.info('WML instance ID:",
"\"I0\", \"R0\"], \"values\": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]]",
"[\"algorithm\", \"start_date\", \"country\", \"predict_range\", \"S0\", \"I0\", \"R0\"], \"values\": [[algorithm, start_date.strftime('%-m/%-d/%y'),",
"# predict_range = 14 # sir_result = score(token, 'SIR', datetime(2020,",
"= calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0) calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1,",
"int(os.environ.get('PORT', 8050)) host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key = os.environ['WML_API_KEY'] wml_scoring_url",
"Predictions with Watson Machine Learning'), dcc.Graph( id='example-graph', figure=fig ) ])",
"= score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20,",
"Poland\", template=\"plotly_dark\", height=900 ) fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total confirmed cases\", secondary_y=False, range=[0,",
"json import os import pandas as pd from datetime import",
"r]] } logger.info('Scoring with payload: %s', json.dumps(payload)) response = requests.post(wml_scoring_url,",
"[[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]] } logger.info('Scoring with",
"json=payload, headers=headers) if response.status_code == 200: result = response.json() else:",
"data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey'] } headers =",
"opacity=0.5), secondary_y=True, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'), secondary_y=False, ) fig.add_trace(",
"# layout=go.Layout( # title=\"COVID19 infected prediction in Poland\", # template=\"plotly_dark\",",
"{'Authorization': 'Bearer ' + token} payload = { \"fields\": [\"algorithm\",",
"= requests.post(wml_scoring_url, json=payload, headers=headers) if response.status_code == 200: result =",
"fill_value=0) fig = make_subplots(specs=[[{\"secondary_y\": True}]]) fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change',",
"wml_base_url, } iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token' def _get_token(): data = {",
"fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total confirmed cases\", secondary_y=False, range=[0, 6000]) fig.update_yaxes(title_text=\"New cases per",
"import dash_core_components as dcc import dash_html_components as html import plotly.graph_objects",
"secondary_y=False, range=[0, 6000]) fig.update_yaxes(title_text=\"New cases per day\", secondary_y=True, range=[0, 1000])",
"= int(os.environ.get('PORT', 8050)) host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key = os.environ['WML_API_KEY']",
"days = list(sir_result.index) days = list(calibration_result.index) calibration_result['ActualChange'] = calibration_result['Actual'] -",
"- calibration_result['Predicted'].shift(1, fill_value=0) fig = make_subplots(specs=[[{\"secondary_y\": True}]]) fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'],",
"# title=\"COVID19 infected prediction in Poland\", # template=\"plotly_dark\", # height=600",
"result = response.json() else: raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text))",
"go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode=\"markers\", marker=dict(size=8)), secondary_y=False, ) fig.update_layout( title=\"Prediction of",
"score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)",
"_get_token() # predict_range = 14 # sir_result = score(token, 'SIR',",
"external_stylesheets=external_stylesheets) app.layout = serve_layout if __name__ == '__main__': app.run_server(debug=(not is_cf_instance),",
"\"R0\"], \"values\": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]] }",
"url.path.split('/')[3] logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG) logger.info('Starting %s server: %s:%d',",
"URL: %s', wml_base_url) logger.info('WML instance ID: %s', wml_instance_id) wml_credentials =",
"logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else 'local', host,",
"= parse.urlparse(wml_scoring_url) wml_base_url = url._replace(path='').geturl() wml_instance_id = url.path.split('/')[3] logger.setLevel(logging.INFO if",
"logger.info('WML instance ID: %s', wml_instance_id) wml_credentials = { \"apikey\": wml_api_key,",
"import requests logger = logging.getLogger(__name__) external_stylesheets = [dbc.themes.DARKLY] is_cf_instance =",
"'https://iam.cloud.ibm.com/identity/token' def _get_token(): data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey']",
"import pandas as pd from datetime import datetime from datetime",
"go.Figure( # data=[ # go.Scatter(x=days, y=sir_result['I'], name='SIR'), # go.Scatter(x=days, y=logistic_result['I'],",
"# go.Scatter(x=days, y=sir_result['I'], name='SIR'), # go.Scatter(x=days, y=logistic_result['I'], name='Logistic'), # ],",
"= _get_token() # predict_range = 14 # sir_result = score(token,",
"requests logger = logging.getLogger(__name__) external_stylesheets = [dbc.themes.DARKLY] is_cf_instance = os.environ.get('CF_INSTANCE_GUID',",
"else: raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text)) n_days = len(result['values'])",
"template=\"plotly_dark\", # height=600 # ) # ) return html.Div(children=[ html.H1(children='COVID-19",
"instance ID: %s', wml_instance_id) wml_credentials = { \"apikey\": wml_api_key, \"instance_id\":",
"confirmed cases for Poland\", template=\"plotly_dark\", height=900 ) fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total confirmed",
"wml_scoring_url = os.environ['WML_SCORING_URL'] url = parse.urlparse(wml_scoring_url) wml_base_url = url._replace(path='').geturl() wml_instance_id",
"dash import dash_bootstrap_components as dbc import dash_core_components as dcc import",
"requests.post(iam_token_endpoint, data=data, headers=headers) return response.json()['access_token'] def score(token, algorithm, start_date, country,",
"predict_range, s, i, r): headers = {'Authorization': 'Bearer ' +",
"len(result['values']) index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)]",
"'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10) #",
"figure=fig ) ]) app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = serve_layout",
"= os.environ['WML_SCORING_URL'] url = parse.urlparse(wml_scoring_url) wml_base_url = url._replace(path='').geturl() wml_instance_id =",
"headers=headers) return response.json()['access_token'] def score(token, algorithm, start_date, country, predict_range, s,",
"range(n_days)] return pd.DataFrame(result['values'], columns=result['fields'], index=index) def serve_layout(): token = _get_token()",
"22), 'Poland', 40, 10_000, 20, 10) # days = list(sir_result.index)",
"calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000,",
"per day\", secondary_y=True, range=[0, 1000]) # fig = go.Figure( #",
"go from plotly.subplots import make_subplots import logging import json import",
"from datetime import timedelta from urllib import parse import requests",
"y=logistic_result['I'], name='Logistic'), # ], # layout=go.Layout( # title=\"COVID19 infected prediction",
"i in range(n_days)] return pd.DataFrame(result['values'], columns=result['fields'], index=index) def serve_layout(): token",
"\"instance_id\": wml_instance_id, \"url\": wml_base_url, } iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token' def _get_token():",
"predict_range, s, i, r]] } logger.info('Scoring with payload: %s', json.dumps(payload))",
"' + token} payload = { \"fields\": [\"algorithm\", \"start_date\", \"country\",",
"external_stylesheets = [dbc.themes.DARKLY] is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != '' port",
"\"start_date\", \"country\", \"predict_range\", \"S0\", \"I0\", \"R0\"], \"values\": [[algorithm, start_date.strftime('%-m/%-d/%y'), country,",
"pd.DataFrame(result['values'], columns=result['fields'], index=index) def serve_layout(): token = _get_token() # predict_range",
"plotly.subplots import make_subplots import logging import json import os import",
"], # layout=go.Layout( # title=\"COVID19 infected prediction in Poland\", #",
"= dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = serve_layout if __name__ == '__main__':",
"os.environ['WML_SCORING_URL'] url = parse.urlparse(wml_scoring_url) wml_base_url = url._replace(path='').geturl() wml_instance_id = url.path.split('/')[3]",
"response = requests.post(iam_token_endpoint, data=data, headers=headers) return response.json()['access_token'] def score(token, algorithm,",
"fig.update_yaxes(title_text=\"Total confirmed cases\", secondary_y=False, range=[0, 6000]) fig.update_yaxes(title_text=\"New cases per day\",",
"is_cf_instance else logging.DEBUG) logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance",
"'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10) calibration_result",
"token} payload = { \"fields\": [\"algorithm\", \"start_date\", \"country\", \"predict_range\", \"S0\",",
"make_subplots(specs=[[{\"secondary_y\": True}]]) fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5), secondary_y=True, )",
"response.json()['access_token'] def score(token, algorithm, start_date, country, predict_range, s, i, r):",
"score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)",
"calibration_result['Predicted'].shift(1, fill_value=0) fig = make_subplots(specs=[[{\"secondary_y\": True}]]) fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted",
"import timedelta from urllib import parse import requests logger =",
"in range(n_days)] return pd.DataFrame(result['values'], columns=result['fields'], index=index) def serve_layout(): token =",
"json.dumps(payload)) response = requests.post(wml_scoring_url, json=payload, headers=headers) if response.status_code == 200:",
"dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as",
"headers = {'Content-Type': 'application/x-www-form-urlencoded'} response = requests.post(iam_token_endpoint, data=data, headers=headers) return",
"import datetime from datetime import timedelta from urllib import parse",
"_get_token(): data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey'] } headers",
"10_000, 20, 10) # logistic_result = score(token, 'LOGISTIC', datetime(2020, 3,",
"score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10)",
"app.layout = serve_layout if __name__ == '__main__': app.run_server(debug=(not is_cf_instance), port=port,",
"r): headers = {'Authorization': 'Bearer ' + token} payload =",
"= 'https://iam.cloud.ibm.com/identity/token' def _get_token(): data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey':",
"for Poland\", template=\"plotly_dark\", height=900 ) fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total confirmed cases\", secondary_y=False,",
"url._replace(path='').geturl() wml_instance_id = url.path.split('/')[3] logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG) logger.info('Starting",
"= make_subplots(specs=[[{\"secondary_y\": True}]]) fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5), secondary_y=True,",
"y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual",
"name='Predicted Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change',",
"10) # logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland',",
"= url._replace(path='').geturl() wml_instance_id = url.path.split('/')[3] logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG)",
"calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0) calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0)",
"= os.environ['WML_API_KEY'] wml_scoring_url = os.environ['WML_SCORING_URL'] url = parse.urlparse(wml_scoring_url) wml_base_url =",
"os.environ['WML_API_KEY'] wml_scoring_url = os.environ['WML_SCORING_URL'] url = parse.urlparse(wml_scoring_url) wml_base_url = url._replace(path='').geturl()",
"+ timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)] return pd.DataFrame(result['values'], columns=result['fields'], index=index)",
"i, r): headers = {'Authorization': 'Bearer ' + token} payload",
"wml_instance_id) wml_credentials = { \"apikey\": wml_api_key, \"instance_id\": wml_instance_id, \"url\": wml_base_url,",
"import plotly.graph_objects as go from plotly.subplots import make_subplots import logging",
"dash_html_components as html import plotly.graph_objects as go from plotly.subplots import",
"\"url\": wml_base_url, } iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token' def _get_token(): data =",
"y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'),",
"'application/x-www-form-urlencoded'} response = requests.post(iam_token_endpoint, data=data, headers=headers) return response.json()['access_token'] def score(token,",
"20, 10) # days = list(sir_result.index) days = list(calibration_result.index) calibration_result['ActualChange']",
"serve_layout(): token = _get_token() # predict_range = 14 # sir_result",
"confirmed cases\", secondary_y=False, range=[0, 6000]) fig.update_yaxes(title_text=\"New cases per day\", secondary_y=True,",
"- calibration_result['Actual'].shift(1, fill_value=0) calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0) fig",
"else 'local', host, port) logger.info('WML URL: %s', wml_base_url) logger.info('WML instance",
"\"country\", \"predict_range\", \"S0\", \"I0\", \"R0\"], \"values\": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range,",
"\"fields\": [\"algorithm\", \"start_date\", \"country\", \"predict_range\", \"S0\", \"I0\", \"R0\"], \"values\": [[algorithm,",
"Watson Machine Learning'), dcc.Graph( id='example-graph', figure=fig ) ]) app =",
"'' port = int(os.environ.get('PORT', 8050)) host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key",
"import dash_html_components as html import plotly.graph_objects as go from plotly.subplots",
"height=600 # ) # ) return html.Div(children=[ html.H1(children='COVID-19 Predictions with",
"calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0) fig = make_subplots(specs=[[{\"secondary_y\": True}]]) fig.add_trace( go.Bar(x=days,",
"algorithm, start_date, country, predict_range, s, i, r): headers = {'Authorization':",
"= 14 # sir_result = score(token, 'SIR', datetime(2020, 3, 3),",
"secondary_y=False, ) fig.update_layout( title=\"Prediction of confirmed cases for Poland\", template=\"plotly_dark\",",
"predict_range, 10_000, 20, 10) calibration_result = score(token, 'CALIBRATION', datetime(2020, 1,",
"marker=dict(size=8)), secondary_y=False, ) fig.update_layout( title=\"Prediction of confirmed cases for Poland\",",
"'Poland', predict_range, 10_000, 20, 10) # logistic_result = score(token, 'LOGISTIC',",
"# sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range,",
"import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components",
"fig = go.Figure( # data=[ # go.Scatter(x=days, y=sir_result['I'], name='SIR'), #",
"fig = make_subplots(specs=[[{\"secondary_y\": True}]]) fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5),",
"s, i, r): headers = {'Authorization': 'Bearer ' + token}",
"datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10) # logistic_result",
"6000]) fig.update_yaxes(title_text=\"New cases per day\", secondary_y=True, range=[0, 1000]) # fig",
"= [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)] return pd.DataFrame(result['values'],",
"'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey'] } headers = {'Content-Type': 'application/x-www-form-urlencoded'} response =",
"+ token} payload = { \"fields\": [\"algorithm\", \"start_date\", \"country\", \"predict_range\",",
"headers = {'Authorization': 'Bearer ' + token} payload = {",
"datetime from datetime import timedelta from urllib import parse import",
"= list(sir_result.index) days = list(calibration_result.index) calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1,",
"= response.json() else: raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text)) n_days",
"'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey'] } headers = {'Content-Type': 'application/x-www-form-urlencoded'} response",
"os.environ.get('CF_INSTANCE_GUID', '') != '' port = int(os.environ.get('PORT', 8050)) host =",
"cases for Poland\", template=\"plotly_dark\", height=900 ) fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total confirmed cases\",",
"Poland\", # template=\"plotly_dark\", # height=600 # ) # ) return",
"= len(result['values']) index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in",
"requests.post(wml_scoring_url, json=payload, headers=headers) if response.status_code == 200: result = response.json()",
"# days = list(sir_result.index) days = list(calibration_result.index) calibration_result['ActualChange'] = calibration_result['Actual']",
"os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key = os.environ['WML_API_KEY'] wml_scoring_url = os.environ['WML_SCORING_URL'] url =",
"is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != '' port = int(os.environ.get('PORT', 8050))",
"parse.urlparse(wml_scoring_url) wml_base_url = url._replace(path='').geturl() wml_instance_id = url.path.split('/')[3] logger.setLevel(logging.INFO if is_cf_instance",
"prediction in Poland\", # template=\"plotly_dark\", # height=600 # ) #",
"columns=result['fields'], index=index) def serve_layout(): token = _get_token() # predict_range =",
"name='SIR'), # go.Scatter(x=days, y=logistic_result['I'], name='Logistic'), # ], # layout=go.Layout( #",
"iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token' def _get_token(): data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',",
"datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10) calibration_result =",
"response = requests.post(wml_scoring_url, json=payload, headers=headers) if response.status_code == 200: result",
"Machine Learning'), dcc.Graph( id='example-graph', figure=fig ) ]) app = dash.Dash(__name__,",
"= os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1') wml_api_key = os.environ['WML_API_KEY'] wml_scoring_url = os.environ['WML_SCORING_URL'] url",
"True}]]) fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5), secondary_y=True, ) fig.add_trace(",
"datetime import datetime from datetime import timedelta from urllib import",
"sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000,",
"range=[0, 6000]) fig.update_yaxes(title_text=\"New cases per day\", secondary_y=True, range=[0, 1000]) #",
"logging.DEBUG) logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else 'local',",
"with Watson Machine Learning'), dcc.Graph( id='example-graph', figure=fig ) ]) app",
"# ) return html.Div(children=[ html.H1(children='COVID-19 Predictions with Watson Machine Learning'),",
"= score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20,",
"list(sir_result.index) days = list(calibration_result.index) calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0)",
"dbc import dash_core_components as dcc import dash_html_components as html import",
"html.H1(children='COVID-19 Predictions with Watson Machine Learning'), dcc.Graph( id='example-graph', figure=fig )",
"title=\"COVID19 infected prediction in Poland\", # template=\"plotly_dark\", # height=600 #",
"days = list(calibration_result.index) calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0) calibration_result['PredictedChange']",
"logging import json import os import pandas as pd from",
"= serve_layout if __name__ == '__main__': app.run_server(debug=(not is_cf_instance), port=port, host=host)",
"} logger.info('Scoring with payload: %s', json.dumps(payload)) response = requests.post(wml_scoring_url, json=payload,",
"fig.add_trace( go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Bar(x=days,",
"= os.environ.get('CF_INSTANCE_GUID', '') != '' port = int(os.environ.get('PORT', 8050)) host",
"country, predict_range, s, i, r]] } logger.info('Scoring with payload: %s',",
"index=index) def serve_layout(): token = _get_token() # predict_range = 14",
"of confirmed cases for Poland\", template=\"plotly_dark\", height=900 ) fig.update_xaxes(title_text=\"Date\") fig.update_yaxes(title_text=\"Total",
"calibration_result['Actual'].shift(1, fill_value=0) calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0) fig =",
"# data=[ # go.Scatter(x=days, y=sir_result['I'], name='SIR'), # go.Scatter(x=days, y=logistic_result['I'], name='Logistic'),",
"def score(token, algorithm, start_date, country, predict_range, s, i, r): headers",
"as dcc import dash_html_components as html import plotly.graph_objects as go",
"Exception('Scoring error [{}]: {}'.format(response.status_code, response.text)) n_days = len(result['values']) index =",
"import dash import dash_bootstrap_components as dbc import dash_core_components as dcc",
"wml_api_key = os.environ['WML_API_KEY'] wml_scoring_url = os.environ['WML_SCORING_URL'] url = parse.urlparse(wml_scoring_url) wml_base_url",
") fig.add_trace( go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode=\"markers\", marker=dict(size=8)), secondary_y=False, ) fig.update_layout(",
"is_cf_instance else 'local', host, port) logger.info('WML URL: %s', wml_base_url) logger.info('WML",
"token = _get_token() # predict_range = 14 # sir_result =",
"= list(calibration_result.index) calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0) calibration_result['PredictedChange'] =",
"country, predict_range, s, i, r): headers = {'Authorization': 'Bearer '",
"import json import os import pandas as pd from datetime",
"= { \"fields\": [\"algorithm\", \"start_date\", \"country\", \"predict_range\", \"S0\", \"I0\", \"R0\"],",
"'') != '' port = int(os.environ.get('PORT', 8050)) host = os.environ.get('CF_INSTANCE_INTERNAL_IP',",
"from plotly.subplots import make_subplots import logging import json import os",
"datetime import timedelta from urllib import parse import requests logger",
"%s', wml_instance_id) wml_credentials = { \"apikey\": wml_api_key, \"instance_id\": wml_instance_id, \"url\":",
"= { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'apikey': wml_credentials['apikey'] } headers = {'Content-Type':",
") ]) app = dash.Dash(__name__, external_stylesheets=external_stylesheets) app.layout = serve_layout if",
"raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text)) n_days = len(result['values']) index",
"10_000, 20, 10) calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22),",
"go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5), secondary_y=True, ) fig.add_trace( go.Scatter(x=days, y=calibration_result['Predicted'],"
] |
[
"from sweetrpg_api_core.data import APIData from sweetrpg_library_objects.model.system import System from sweetrpg_library_api.application.db",
"SystemAPISchema from sweetrpg_api_core.data import APIData from sweetrpg_library_objects.model.system import System from",
"\"type\": \"system\", \"model\": System, \"db\": db, \"model_info\": model_info } #",
"data_layer = {\"class\": APIData, \"type\": \"system\", \"model\": System, \"db\": db,",
"# schema = SystemAPISchema # data_layer = { # \"class\":",
"\"model\": System, # \"db\": db, # \"model_info\": model_info # }",
"\"\"\" \"\"\" from flask_rest_jsonapi import ResourceList, ResourceDetail, ResourceRelationship from sweetrpg_library_objects.api.system.schema",
"from flask_rest_jsonapi import ResourceList, ResourceDetail, ResourceRelationship from sweetrpg_library_objects.api.system.schema import SystemAPISchema",
"schema = SystemAPISchema data_layer = {\"class\": APIData, \"type\": \"system\", \"model\":",
"import APIData from sweetrpg_library_objects.model.system import System from sweetrpg_library_api.application.db import db",
"= \"<NAME> <<EMAIL>>\" \"\"\" \"\"\" from flask_rest_jsonapi import ResourceList, ResourceDetail,",
"SystemAPISchema data_layer = { \"class\": APIData, \"type\": \"system\", \"model\": System,",
"\"system\", \"model\": System, \"db\": db, \"model_info\": model_info } # class",
"sweetrpg_library_objects.model.system import System from sweetrpg_library_api.application.db import db from sweetrpg_library_api.application.blueprints.setup import",
"\"model_info\": model_info } # class SystemAuthorRelationship(ResourceRelationship): # schema = SystemAPISchema",
"= SystemAPISchema data_layer = { \"class\": APIData, \"type\": \"system\", \"model\":",
"from sweetrpg_library_api.application.blueprints.setup import model_info class SystemList(ResourceList): schema = SystemAPISchema data_layer",
"SystemDetail(ResourceDetail): schema = SystemAPISchema data_layer = { \"class\": APIData, \"type\":",
"APIData, # \"type\": \"system\", # \"model\": System, # \"db\": db,",
"APIData from sweetrpg_library_objects.model.system import System from sweetrpg_library_api.application.db import db from",
"\"type\": \"system\", # \"model\": System, # \"db\": db, # \"model_info\":",
"schema = SystemAPISchema data_layer = { \"class\": APIData, \"type\": \"system\",",
"sweetrpg_library_objects.api.system.schema import SystemAPISchema from sweetrpg_api_core.data import APIData from sweetrpg_library_objects.model.system import",
"schema = SystemAPISchema # data_layer = { # \"class\": APIData,",
"sweetrpg_library_api.application.db import db from sweetrpg_library_api.application.blueprints.setup import model_info class SystemList(ResourceList): schema",
"<<EMAIL>>\" \"\"\" \"\"\" from flask_rest_jsonapi import ResourceList, ResourceDetail, ResourceRelationship from",
"# data_layer = { # \"class\": APIData, # \"type\": \"system\",",
"import db from sweetrpg_library_api.application.blueprints.setup import model_info class SystemList(ResourceList): schema =",
"db, \"model_info\": model_info} class SystemDetail(ResourceDetail): schema = SystemAPISchema data_layer =",
"\"db\": db, \"model_info\": model_info} class SystemDetail(ResourceDetail): schema = SystemAPISchema data_layer",
"class SystemDetail(ResourceDetail): schema = SystemAPISchema data_layer = { \"class\": APIData,",
"\"system\", \"model\": System, \"db\": db, \"model_info\": model_info} class SystemDetail(ResourceDetail): schema",
"\"<NAME> <<EMAIL>>\" \"\"\" \"\"\" from flask_rest_jsonapi import ResourceList, ResourceDetail, ResourceRelationship",
"= SystemAPISchema # data_layer = { # \"class\": APIData, #",
"= SystemAPISchema data_layer = {\"class\": APIData, \"type\": \"system\", \"model\": System,",
"model_info class SystemList(ResourceList): schema = SystemAPISchema data_layer = {\"class\": APIData,",
"flask_rest_jsonapi import ResourceList, ResourceDetail, ResourceRelationship from sweetrpg_library_objects.api.system.schema import SystemAPISchema from",
"= {\"class\": APIData, \"type\": \"system\", \"model\": System, \"db\": db, \"model_info\":",
"-*- __author__ = \"<NAME> <<EMAIL>>\" \"\"\" \"\"\" from flask_rest_jsonapi import",
"from sweetrpg_library_objects.model.system import System from sweetrpg_library_api.application.db import db from sweetrpg_library_api.application.blueprints.setup",
"\"model\": System, \"db\": db, \"model_info\": model_info} class SystemDetail(ResourceDetail): schema =",
"-*- coding: utf-8 -*- __author__ = \"<NAME> <<EMAIL>>\" \"\"\" \"\"\"",
"} # class SystemAuthorRelationship(ResourceRelationship): # schema = SystemAPISchema # data_layer",
"db from sweetrpg_library_api.application.blueprints.setup import model_info class SystemList(ResourceList): schema = SystemAPISchema",
"{ # \"class\": APIData, # \"type\": \"system\", # \"model\": System,",
"# \"class\": APIData, # \"type\": \"system\", # \"model\": System, #",
"# \"model\": System, # \"db\": db, # \"model_info\": model_info #",
"\"db\": db, \"model_info\": model_info } # class SystemAuthorRelationship(ResourceRelationship): # schema",
"utf-8 -*- __author__ = \"<NAME> <<EMAIL>>\" \"\"\" \"\"\" from flask_rest_jsonapi",
"APIData, \"type\": \"system\", \"model\": System, \"db\": db, \"model_info\": model_info} class",
"{ \"class\": APIData, \"type\": \"system\", \"model\": System, \"db\": db, \"model_info\":",
"sweetrpg_api_core.data import APIData from sweetrpg_library_objects.model.system import System from sweetrpg_library_api.application.db import",
"\"\"\" from flask_rest_jsonapi import ResourceList, ResourceDetail, ResourceRelationship from sweetrpg_library_objects.api.system.schema import",
"= { \"class\": APIData, \"type\": \"system\", \"model\": System, \"db\": db,",
"SystemList(ResourceList): schema = SystemAPISchema data_layer = {\"class\": APIData, \"type\": \"system\",",
"ResourceRelationship from sweetrpg_library_objects.api.system.schema import SystemAPISchema from sweetrpg_api_core.data import APIData from",
"from sweetrpg_library_api.application.db import db from sweetrpg_library_api.application.blueprints.setup import model_info class SystemList(ResourceList):",
"import model_info class SystemList(ResourceList): schema = SystemAPISchema data_layer = {\"class\":",
"ResourceDetail, ResourceRelationship from sweetrpg_library_objects.api.system.schema import SystemAPISchema from sweetrpg_api_core.data import APIData",
"# class SystemAuthorRelationship(ResourceRelationship): # schema = SystemAPISchema # data_layer =",
"SystemAuthorRelationship(ResourceRelationship): # schema = SystemAPISchema # data_layer = { #",
"class SystemList(ResourceList): schema = SystemAPISchema data_layer = {\"class\": APIData, \"type\":",
"\"class\": APIData, # \"type\": \"system\", # \"model\": System, # \"db\":",
"data_layer = { \"class\": APIData, \"type\": \"system\", \"model\": System, \"db\":",
"System, \"db\": db, \"model_info\": model_info } # class SystemAuthorRelationship(ResourceRelationship): #",
"import System from sweetrpg_library_api.application.db import db from sweetrpg_library_api.application.blueprints.setup import model_info",
"System from sweetrpg_library_api.application.db import db from sweetrpg_library_api.application.blueprints.setup import model_info class",
"\"model_info\": model_info} class SystemDetail(ResourceDetail): schema = SystemAPISchema data_layer = {",
"\"system\", # \"model\": System, # \"db\": db, # \"model_info\": model_info",
"\"class\": APIData, \"type\": \"system\", \"model\": System, \"db\": db, \"model_info\": model_info",
"coding: utf-8 -*- __author__ = \"<NAME> <<EMAIL>>\" \"\"\" \"\"\" from",
"from sweetrpg_library_objects.api.system.schema import SystemAPISchema from sweetrpg_api_core.data import APIData from sweetrpg_library_objects.model.system",
"{\"class\": APIData, \"type\": \"system\", \"model\": System, \"db\": db, \"model_info\": model_info}",
"sweetrpg_library_api.application.blueprints.setup import model_info class SystemList(ResourceList): schema = SystemAPISchema data_layer =",
"model_info } # class SystemAuthorRelationship(ResourceRelationship): # schema = SystemAPISchema #",
"SystemAPISchema data_layer = {\"class\": APIData, \"type\": \"system\", \"model\": System, \"db\":",
"data_layer = { # \"class\": APIData, # \"type\": \"system\", #",
"\"type\": \"system\", \"model\": System, \"db\": db, \"model_info\": model_info} class SystemDetail(ResourceDetail):",
"System, \"db\": db, \"model_info\": model_info} class SystemDetail(ResourceDetail): schema = SystemAPISchema",
"SystemAPISchema # data_layer = { # \"class\": APIData, # \"type\":",
"class SystemAuthorRelationship(ResourceRelationship): # schema = SystemAPISchema # data_layer = {",
"\"model\": System, \"db\": db, \"model_info\": model_info } # class SystemAuthorRelationship(ResourceRelationship):",
"APIData, \"type\": \"system\", \"model\": System, \"db\": db, \"model_info\": model_info }",
"# -*- coding: utf-8 -*- __author__ = \"<NAME> <<EMAIL>>\" \"\"\"",
"import SystemAPISchema from sweetrpg_api_core.data import APIData from sweetrpg_library_objects.model.system import System",
"__author__ = \"<NAME> <<EMAIL>>\" \"\"\" \"\"\" from flask_rest_jsonapi import ResourceList,",
"import ResourceList, ResourceDetail, ResourceRelationship from sweetrpg_library_objects.api.system.schema import SystemAPISchema from sweetrpg_api_core.data",
"model_info} class SystemDetail(ResourceDetail): schema = SystemAPISchema data_layer = { \"class\":",
"# \"type\": \"system\", # \"model\": System, # \"db\": db, #",
"db, \"model_info\": model_info } # class SystemAuthorRelationship(ResourceRelationship): # schema =",
"ResourceList, ResourceDetail, ResourceRelationship from sweetrpg_library_objects.api.system.schema import SystemAPISchema from sweetrpg_api_core.data import",
"= { # \"class\": APIData, # \"type\": \"system\", # \"model\":"
] |
[
"\"5 tests, 5 passed, 0 failed, 0 skipped.\" self.assertIn(expected_write, stdout,",
"0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 3) def test_too_big_testname(self): stdout,",
"porta blandit sed ut tortor. Nunc vel nulla bibendum, auctor",
"orderfile): robot_file = open(\"{}/test.robot\".format(self.tmpdir), \"w\") robot_file.write(textwrap.dedent(testfile)) robot_file.close() with open(\"{}/order.dat\".format(self.tmpdir), \"w\")",
"07 --test Test.The Somewhat Long Name Of The Test S1Test",
"Test \"\"\", \"\"\" --test Invalid \"\"\", ) if sys.version_info <",
"Should Be Equal ${SCALAR} Hello, world! Second And Quarter Should",
"stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) if ROBOT_VERSION < \"4.0\": expected_write",
"else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) if ROBOT_VERSION <",
"stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 1) else: self.assertIn(b\"PASSED\", stdout,",
"Test.The Somewhat Long Name Of The Test S1Test 06 }",
"--test Test.The Somewhat Long Name Of The Test S1Test 07",
"04 1 The Somewhat Long Name Of The Test S1Test",
"${SCALAR} Hello, globe! Second And Half Should Be Equal ${SCALAR}",
"Of The Test S1Test 02 --test Test.The Somewhat Long Name",
"passed, 0 failed\" else: expected_write = \"5 tests, 5 passed,",
"self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 1) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\",",
"Of The Test S1Test 09 --test Test.The Somewhat Long Name",
"S1Test 12 } \"\"\", ) if sys.version_info < (3, 0):",
"import tempfile import textwrap import unittest import shutil import subprocess",
"\"{}/test.robot\".format(self.tmpdir), ], cwd=self.tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) return process.communicate() def test_orders(self):",
"04 --test Test.The Somewhat Long Name Of The Test S1Test",
"self._run_tests_with( \"\"\" *** Test Cases *** Test Lorem ipsum dolor",
"\"4.0\": expected_write = b\"5 critical tests, 5 passed, 0 failed\"",
"Of The Test S1Test 01 1 The Somewhat Long Name",
"Name Of The Test S1Test 04 --test Test.The Somewhat Long",
"Name Of The Test S1Test 07 --test Test.The Somewhat Long",
"subprocess class PabotOrderingGroupTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self):",
"} \"\"\", ) if sys.version_info < (3, 0): self.assertIn(\"PASSED\", stdout,",
"Equal ${SCALAR} Hello, globe! Second And Half Should Be Equal",
"expected_write = \"5 critical tests, 5 passed, 0 failed\" else:",
"in, auctor urna. Nunc a sodales. Log Test \"\"\", \"\"\"",
"Long Name Of The Test S1Test 09 1 The Somewhat",
"robot_file.write(textwrap.dedent(testfile)) robot_file.close() with open(\"{}/order.dat\".format(self.tmpdir), \"w\") as f: f.write(textwrap.dedent(orderfile)) process =",
"molestie risus. Sed eu metus volutpat, hendrerit nibh in, auctor",
"Be Equal ${SCALAR} Hello, globe! Third Test Should Be Equal",
"stderr = self._run_tests_with( \"\"\" *** Test Cases *** Test Lorem",
"1 The Somewhat Long Name Of The Test S1Test 12",
"02 1 The Somewhat Long Name Of The Test S1Test",
"11 --test Test.The Somewhat Long Name Of The Test S1Test",
"Name Of The Test S1Test 12 } \"\"\", ) if",
"Be Equal ${SCALAR} Hello, world! Second And Quarter Should Be",
"as f: f.write(textwrap.dedent(orderfile)) process = subprocess.Popen( [ sys.executable, \"-m\" \"pabot.pabot\",",
"= b\"5 tests, 5 passed, 0 failed, 0 skipped.\" self.assertIn(expected_write,",
"--test Test.The Somewhat Long Name Of The Test S1Test 01",
"Be Equal ${SCALAR} Hello, globe! \"\"\", \"\"\" { --test Test.First",
"failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 3) else: self.assertIn(b\"PASSED\",",
"auctor urna. Nunc a sodales. Log Test \"\"\", \"\"\" --test",
"stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 2) def test_two_orders(self): stdout,",
"failed\" else: expected_write = b\"5 tests, 5 passed, 0 failed,",
"The Somewhat Long Name Of The Test S1Test 08 1",
"Test1 [Arguments] ${arg} Log Test \"\"\", \"\"\" { --test Test.The",
"sys import tempfile import textwrap import unittest import shutil import",
"critical tests, 5 passed, 0 failed\" else: expected_write = \"5",
"*** Test Cases *** Test Lorem ipsum dolor sit amet,",
"05 --test Test.The Somewhat Long Name Of The Test S1Test",
"tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def _run_tests_with(self, testfile, orderfile): robot_file =",
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris eu",
"Test S1Test 07 --test Test.The Somewhat Long Name Of The",
"Cases *** First Test Set Suite Variable ${SCALAR} Hello, world!",
"The Test S1Test 01 1 The Somewhat Long Name Of",
"sys.version_info < (3, 0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr)",
"stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 2) def test_two_orders(self): stdout, stderr = self._run_tests_with(",
"Mauris eu velit nunc. Duis eget purus eget orci porta",
"Long Name Of The Test S1Test 05 1 The Somewhat",
"Test.The Somewhat Long Name Of The Test S1Test 10 --test",
"03 1 The Somewhat Long Name Of The Test S1Test",
"1 The Somewhat Long Name Of The Test S1Test 11",
"Test.The Somewhat Long Name Of The Test S1Test 08 --test",
"Long Name Of The Test S1Test 02 --test Test.The Somewhat",
"Test S1Test 10 --test Test.The Somewhat Long Name Of The",
"nibh in, auctor urna. Nunc a sodales. Log Test \"\"\",",
"stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 2) def test_two_orders(self): stdout, stderr",
"stderr) self.assertEqual(stdout.count(\"PASSED\"), 2) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr)",
"stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 1) def test_longnames_in_tests(self): stdout,",
"Name Of The Test S1Test 03 1 The Somewhat Long",
"*** Settings *** Test Template Test1 *** Test Cases ***",
"globe! *** Test Cases *** First Test Set Suite Variable",
"nulla bibendum, auctor sem ac, molestie risus. Sed eu metus",
"with open(\"{}/order.dat\".format(self.tmpdir), \"w\") as f: f.write(textwrap.dedent(orderfile)) process = subprocess.Popen( [",
"(3, 0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) if ROBOT_VERSION",
"Test.Second Test } --test Test.Third Test \"\"\", ) if sys.version_info",
"Name Of The Test S1Test 10 1 The Somewhat Long",
"textwrap import unittest import shutil import subprocess class PabotOrderingGroupTest(unittest.TestCase): def",
"Hello, globe! \"\"\", \"\"\" { --test Test.First Test --test Test.Second",
"Test S1Test 04 1 The Somewhat Long Name Of The",
"def test_longnames_in_tests(self): stdout, stderr = self._run_tests_with( \"\"\" *** Settings ***",
"sys.executable, \"-m\" \"pabot.pabot\", \"--testlevelsplit\", \"--ordering\", \"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir), ], cwd=self.tmpdir, stdout=subprocess.PIPE,",
"b\"5 tests, 5 passed, 0 failed, 0 skipped.\" self.assertIn(expected_write, stdout,",
"= \"5 critical tests, 5 passed, 0 failed\" else: expected_write",
"self.assertNotIn(\"FAILED\", stdout, stderr) if ROBOT_VERSION < \"4.0\": expected_write = \"5",
"self.assertEqual(stdout.count(\"PASSED\"), 3) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) if",
"def test_too_big_testname(self): stdout, stderr = self._run_tests_with( \"\"\" *** Test Cases",
"The Somewhat Long Name Of The Test S1Test 05 1",
"1 The Somewhat Long Name Of The Test S1Test 04",
"Test.The Somewhat Long Name Of The Test S1Test 01 --test",
"Duis eget purus eget orci porta blandit sed ut tortor.",
"robot_file = open(\"{}/test.robot\".format(self.tmpdir), \"w\") robot_file.write(textwrap.dedent(testfile)) robot_file.close() with open(\"{}/order.dat\".format(self.tmpdir), \"w\") as",
"< (3, 0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) if",
"self.assertEqual(stdout.count(b\"PASSED\"), 2) def test_two_orders(self): stdout, stderr = self._run_tests_with( \"\"\" ***",
"Test \"\"\", \"\"\" { --test Test.The Somewhat Long Name Of",
"02 --test Test.The Somewhat Long Name Of The Test S1Test",
"--test Test.The Somewhat Long Name Of The Test S1Test 10",
"The Somewhat Long Name Of The Test S1Test 06 1",
"else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 2) def",
"\"\"\" *** Settings *** Test Template Test1 *** Test Cases",
"import __version__ as ROBOT_VERSION import sys import tempfile import textwrap",
"Long Name Of The Test S1Test 12 } \"\"\", )",
"Somewhat Long Name Of The Test S1Test 03 --test Test.The",
"unittest import shutil import subprocess class PabotOrderingGroupTest(unittest.TestCase): def setUp(self): self.tmpdir",
"Test S1Test 01 --test Test.The Somewhat Long Name Of The",
"The Test S1Test 10 --test Test.The Somewhat Long Name Of",
"} --test Test.Third Test \"\"\", ) if sys.version_info < (3,",
"--test Test.The Somewhat Long Name Of The Test S1Test 02",
"Test Set Suite Variable ${SCALAR} Hello, world! Second Test Should",
"0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) if ROBOT_VERSION <",
"critical tests, 5 passed, 0 failed\" else: expected_write = b\"5",
"class PabotOrderingGroupTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir)",
"--test Test.Second And Quarter --test Test.Second And Half } --test",
"Name Of The Test S1Test 05 --test Test.The Somewhat Long",
"The Test S1Test 12 } \"\"\", ) if sys.version_info <",
"1 *** Keywords *** Test1 [Arguments] ${arg} Log Test \"\"\",",
"Test S1Test 12 1 *** Keywords *** Test1 [Arguments] ${arg}",
"S1Test 09 1 The Somewhat Long Name Of The Test",
"import unittest import shutil import subprocess class PabotOrderingGroupTest(unittest.TestCase): def setUp(self):",
"Somewhat Long Name Of The Test S1Test 05 1 The",
"self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 2) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\",",
"S1Test 06 } { --test Test.The Somewhat Long Name Of",
"Test Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris",
"Test S1Test 12 } \"\"\", ) if sys.version_info < (3,",
"\"\"\" { --test Test.First Test --test Test.Second Test } {",
"= self._run_tests_with( \"\"\" *** Variables *** ${SCALAR} Hello, globe! ***",
"The Test S1Test 03 --test Test.The Somewhat Long Name Of",
"0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 1) else:",
"process = subprocess.Popen( [ sys.executable, \"-m\" \"pabot.pabot\", \"--testlevelsplit\", \"--ordering\", \"{}/order.dat\".format(self.tmpdir),",
"Should Be Equal ${SCALAR} Hello, world! Third Test Should Be",
"Name Of The Test S1Test 08 1 The Somewhat Long",
"\"\"\" *** Test Cases *** Test Lorem ipsum dolor sit",
"tests, 5 passed, 0 failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr)",
"S1Test 06 1 The Somewhat Long Name Of The Test",
"1 The Somewhat Long Name Of The Test S1Test 09",
"The Test S1Test 02 --test Test.The Somewhat Long Name Of",
"The Somewhat Long Name Of The Test S1Test 07 1",
"${SCALAR} Hello, world! Second Test Should Be Equal ${SCALAR} Hello,",
"self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) if ROBOT_VERSION < \"4.0\":",
"Of The Test S1Test 06 } { --test Test.The Somewhat",
"Test.The Somewhat Long Name Of The Test S1Test 03 --test",
"from robot import __version__ as ROBOT_VERSION import sys import tempfile",
"vel nulla bibendum, auctor sem ac, molestie risus. Sed eu",
"Test S1Test 05 1 The Somewhat Long Name Of The",
"< \"4.0\": expected_write = b\"5 critical tests, 5 passed, 0",
"First Test Set Suite Variable ${SCALAR} Hello, world! Second Test",
"0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 2) else:",
"Somewhat Long Name Of The Test S1Test 12 1 ***",
"= open(\"{}/test.robot\".format(self.tmpdir), \"w\") robot_file.write(textwrap.dedent(testfile)) robot_file.close() with open(\"{}/order.dat\".format(self.tmpdir), \"w\") as f:",
"07 1 The Somewhat Long Name Of The Test S1Test",
"tests, 5 passed, 0 failed\" else: expected_write = \"5 tests,",
"The Somewhat Long Name Of The Test S1Test 10 1",
"The Test S1Test 04 --test Test.The Somewhat Long Name Of",
"Test Cases *** First Test Set Suite Variable ${SCALAR} Hello,",
"S1Test 08 --test Test.The Somewhat Long Name Of The Test",
"Long Name Of The Test S1Test 08 1 The Somewhat",
"Test.Third Test \"\"\", ) if sys.version_info < (3, 0): self.assertIn(\"PASSED\",",
"b\"5 critical tests, 5 passed, 0 failed\" else: expected_write =",
"Long Name Of The Test S1Test 10 --test Test.The Somewhat",
"stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) if ROBOT_VERSION < \"4.0\": expected_write =",
"Somewhat Long Name Of The Test S1Test 07 1 The",
"The Test S1Test 01 --test Test.The Somewhat Long Name Of",
"shutil import subprocess class PabotOrderingGroupTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp()",
"self.assertEqual(stdout.count(b\"PASSED\"), 3) def test_too_big_testname(self): stdout, stderr = self._run_tests_with( \"\"\" ***",
"self.assertEqual(stdout.count(b\"PASSED\"), 1) def test_longnames_in_tests(self): stdout, stderr = self._run_tests_with( \"\"\" ***",
"cwd=self.tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) return process.communicate() def test_orders(self): stdout, stderr",
"Name Of The Test S1Test 08 --test Test.The Somewhat Long",
"Test.First Test --test Test.Second Test } { --test Test.Second And",
"08 1 The Somewhat Long Name Of The Test S1Test",
"10 1 The Somewhat Long Name Of The Test S1Test",
"09 1 The Somewhat Long Name Of The Test S1Test",
"[Arguments] ${arg} Log Test \"\"\", \"\"\" { --test Test.The Somewhat",
"Long Name Of The Test S1Test 04 --test Test.The Somewhat",
"testfile, orderfile): robot_file = open(\"{}/test.robot\".format(self.tmpdir), \"w\") robot_file.write(textwrap.dedent(testfile)) robot_file.close() with open(\"{}/order.dat\".format(self.tmpdir),",
"stderr) self.assertNotIn(\"FAILED\", stdout, stderr) if ROBOT_VERSION < \"4.0\": expected_write =",
"Test.The Somewhat Long Name Of The Test S1Test 09 --test",
"risus. Sed eu metus volutpat, hendrerit nibh in, auctor urna.",
"Cases *** Test Lorem ipsum dolor sit amet, consectetur adipiscing",
"Name Of The Test S1Test 02 --test Test.The Somewhat Long",
") return process.communicate() def test_orders(self): stdout, stderr = self._run_tests_with( \"\"\"",
"process.communicate() def test_orders(self): stdout, stderr = self._run_tests_with( \"\"\" *** Variables",
"The Somewhat Long Name Of The Test S1Test 04 1",
"1 The Somewhat Long Name Of The Test S1Test 06",
"--test Test.The Somewhat Long Name Of The Test S1Test 12",
"Test S1Test 03 --test Test.The Somewhat Long Name Of The",
"Test Should Be Equal ${SCALAR} Hello, world! Third Test Should",
"stderr=subprocess.PIPE, ) return process.communicate() def test_orders(self): stdout, stderr = self._run_tests_with(",
"\"\"\", \"\"\" { --test Test.The Somewhat Long Name Of The",
"open(\"{}/order.dat\".format(self.tmpdir), \"w\") as f: f.write(textwrap.dedent(orderfile)) process = subprocess.Popen( [ sys.executable,",
"1 The Somewhat Long Name Of The Test S1Test 08",
"--test Test.Second And Half } --test Test.Third Test \"\"\", )",
"S1Test 01 --test Test.The Somewhat Long Name Of The Test",
"stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 2) else: self.assertIn(b\"PASSED\", stdout, stderr)",
"ROBOT_VERSION < \"4.0\": expected_write = b\"5 critical tests, 5 passed,",
"5 passed, 0 failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"),",
"Somewhat Long Name Of The Test S1Test 06 1 The",
"self.assertEqual(stdout.count(\"PASSED\"), 1) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"),",
"S1Test 02 --test Test.The Somewhat Long Name Of The Test",
"*** Test Template Test1 *** Test Cases *** The Somewhat",
"Somewhat Long Name Of The Test S1Test 02 --test Test.The",
"S1Test 10 --test Test.The Somewhat Long Name Of The Test",
"volutpat, hendrerit nibh in, auctor urna. Nunc a sodales. Log",
"Test \"\"\", ) if sys.version_info < (3, 0): self.assertIn(\"PASSED\", stdout,",
"stderr) self.assertEqual(stdout.count(b\"PASSED\"), 1) def test_longnames_in_tests(self): stdout, stderr = self._run_tests_with( \"\"\"",
"Of The Test S1Test 07 --test Test.The Somewhat Long Name",
"Test S1Test 08 --test Test.The Somewhat Long Name Of The",
"import textwrap import unittest import shutil import subprocess class PabotOrderingGroupTest(unittest.TestCase):",
"globe! \"\"\", \"\"\" { --test Test.First Test --test Test.Second Test",
"f: f.write(textwrap.dedent(orderfile)) process = subprocess.Popen( [ sys.executable, \"-m\" \"pabot.pabot\", \"--testlevelsplit\",",
"Of The Test S1Test 05 1 The Somewhat Long Name",
"hendrerit nibh in, auctor urna. Nunc a sodales. Log Test",
"adipiscing elit. Mauris eu velit nunc. Duis eget purus eget",
"self._run_tests_with( \"\"\" *** Settings *** Test Template Test1 *** Test",
"Long Name Of The Test S1Test 03 --test Test.The Somewhat",
"Of The Test S1Test 04 --test Test.The Somewhat Long Name",
"= b\"5 critical tests, 5 passed, 0 failed\" else: expected_write",
"Test.The Somewhat Long Name Of The Test S1Test 07 --test",
"\"\"\", \"\"\" --test Invalid \"\"\", ) if sys.version_info < (3,",
"self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 1) def test_longnames_in_tests(self): stdout, stderr =",
"subprocess.Popen( [ sys.executable, \"-m\" \"pabot.pabot\", \"--testlevelsplit\", \"--ordering\", \"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir), ],",
"S1Test 05 --test Test.The Somewhat Long Name Of The Test",
"= self._run_tests_with( \"\"\" *** Settings *** Test Template Test1 ***",
"= subprocess.Popen( [ sys.executable, \"-m\" \"pabot.pabot\", \"--testlevelsplit\", \"--ordering\", \"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir),",
"eget orci porta blandit sed ut tortor. Nunc vel nulla",
"tortor. Nunc vel nulla bibendum, auctor sem ac, molestie risus.",
") if sys.version_info < (3, 0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\",",
"0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 3) else: self.assertIn(b\"PASSED\", stdout,",
"bibendum, auctor sem ac, molestie risus. Sed eu metus volutpat,",
"Long Name Of The Test S1Test 08 --test Test.The Somewhat",
"S1Test 01 1 The Somewhat Long Name Of The Test",
"--test Test.Second Test } --test Test.Third Test \"\"\", ) if",
"Second And Quarter Should Be Equal ${SCALAR} Hello, globe! Second",
"01 1 The Somewhat Long Name Of The Test S1Test",
"stderr = self._run_tests_with( \"\"\" *** Variables *** ${SCALAR} Hello, globe!",
"eu velit nunc. Duis eget purus eget orci porta blandit",
"Test.The Somewhat Long Name Of The Test S1Test 02 --test",
"Name Of The Test S1Test 09 --test Test.The Somewhat Long",
"S1Test 02 1 The Somewhat Long Name Of The Test",
"S1Test 12 1 *** Keywords *** Test1 [Arguments] ${arg} Log",
"10 --test Test.The Somewhat Long Name Of The Test S1Test",
"test_too_big_testname(self): stdout, stderr = self._run_tests_with( \"\"\" *** Test Cases ***",
"robot_file.close() with open(\"{}/order.dat\".format(self.tmpdir), \"w\") as f: f.write(textwrap.dedent(orderfile)) process = subprocess.Popen(",
"Of The Test S1Test 02 1 The Somewhat Long Name",
"And Quarter Should Be Equal ${SCALAR} Hello, globe! Second And",
"Somewhat Long Name Of The Test S1Test 05 --test Test.The",
"nunc. Duis eget purus eget orci porta blandit sed ut",
"Test --test Test.Second Test } --test Test.Third Test \"\"\", )",
"1 The Somewhat Long Name Of The Test S1Test 02",
"Long Name Of The Test S1Test 11 1 The Somewhat",
"--test Test.Second Test } { --test Test.Second And Quarter --test",
"S1Test 04 1 The Somewhat Long Name Of The Test",
"Quarter --test Test.Second And Half } --test Test.Third Test \"\"\",",
"Name Of The Test S1Test 06 1 The Somewhat Long",
"The Test S1Test 10 1 The Somewhat Long Name Of",
"S1Test 04 --test Test.The Somewhat Long Name Of The Test",
"{ --test Test.The Somewhat Long Name Of The Test S1Test",
"Long Name Of The Test S1Test 09 --test Test.The Somewhat",
"self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 3) def test_too_big_testname(self): stdout, stderr =",
"Of The Test S1Test 09 1 The Somewhat Long Name",
"The Test S1Test 06 1 The Somewhat Long Name Of",
"Long Name Of The Test S1Test 06 } { --test",
"def _run_tests_with(self, testfile, orderfile): robot_file = open(\"{}/test.robot\".format(self.tmpdir), \"w\") robot_file.write(textwrap.dedent(testfile)) robot_file.close()",
"stdout, stderr) if ROBOT_VERSION < \"4.0\": expected_write = \"5 critical",
"stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) return process.communicate() def test_orders(self): stdout, stderr =",
"return process.communicate() def test_orders(self): stdout, stderr = self._run_tests_with( \"\"\" ***",
"2) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 2)",
"Test S1Test 05 --test Test.The Somewhat Long Name Of The",
"else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 1) def",
"Second And Half Should Be Equal ${SCALAR} Hello, globe! Third",
"Hello, globe! *** Test Cases *** First Test Set Suite",
"stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 1) else: self.assertIn(b\"PASSED\", stdout, stderr)",
"*** Keywords *** Test1 [Arguments] ${arg} Log Test \"\"\", \"\"\"",
"Name Of The Test S1Test 09 1 The Somewhat Long",
"Long Name Of The Test S1Test 01 --test Test.The Somewhat",
"--test Test.The Somewhat Long Name Of The Test S1Test 04",
"\"pabot.pabot\", \"--testlevelsplit\", \"--ordering\", \"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir), ], cwd=self.tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, )",
"(3, 0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 1)",
"The Test S1Test 07 1 The Somewhat Long Name Of",
"\"\"\", \"\"\" { --test Test.First Test --test Test.Second Test }",
"globe! Second And Half Should Be Equal ${SCALAR} Hello, globe!",
"Somewhat Long Name Of The Test S1Test 10 1 The",
"\"\"\", ) if sys.version_info < (3, 0): self.assertIn(\"PASSED\", stdout, stderr)",
"Quarter Should Be Equal ${SCALAR} Hello, globe! Second And Half",
"*** Test Cases *** First Test Set Suite Variable ${SCALAR}",
"(3, 0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 2)",
"stderr) if ROBOT_VERSION < \"4.0\": expected_write = \"5 critical tests,",
"Somewhat Long Name Of The Test S1Test 01 1 The",
"Log Test \"\"\", \"\"\" { --test Test.The Somewhat Long Name",
"passed, 0 failed\" else: expected_write = b\"5 tests, 5 passed,",
"passed, 0 failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 3)",
"Be Equal ${SCALAR} Hello, globe! Second And Half Should Be",
"Equal ${SCALAR} Hello, globe! Third Test Should Be Equal ${SCALAR}",
"The Somewhat Long Name Of The Test S1Test 09 1",
"S1Test 05 1 The Somewhat Long Name Of The Test",
"The Test S1Test 09 --test Test.The Somewhat Long Name Of",
"Test S1Test 03 1 The Somewhat Long Name Of The",
"Long Name Of The Test S1Test 01 1 The Somewhat",
"import shutil import subprocess class PabotOrderingGroupTest(unittest.TestCase): def setUp(self): self.tmpdir =",
"stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) if ROBOT_VERSION < \"4.0\": expected_write",
"Sed eu metus volutpat, hendrerit nibh in, auctor urna. Nunc",
"Hello, globe! Third Test Should Be Equal ${SCALAR} Hello, globe!",
"if ROBOT_VERSION < \"4.0\": expected_write = \"5 critical tests, 5",
"The Somewhat Long Name Of The Test S1Test 02 1",
"Long Name Of The Test S1Test 05 --test Test.The Somewhat",
"*** Test Cases *** The Somewhat Long Name Of The",
"expected_write = \"5 tests, 5 passed, 0 failed, 0 skipped.\"",
"Somewhat Long Name Of The Test S1Test 11 1 The",
"passed, 0 failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 3)",
"Hello, globe! Second And Half Should Be Equal ${SCALAR} Hello,",
"Hello, world! Second Test Should Be Equal ${SCALAR} Hello, world!",
"stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 2) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout,",
"ipsum dolor sit amet, consectetur adipiscing elit. Mauris eu velit",
"Log Test \"\"\", \"\"\" --test Invalid \"\"\", ) if sys.version_info",
"Test Should Be Equal ${SCALAR} Hello, world! Second And Quarter",
"*** ${SCALAR} Hello, globe! *** Test Cases *** First Test",
"1) def test_longnames_in_tests(self): stdout, stderr = self._run_tests_with( \"\"\" *** Settings",
"1 The Somewhat Long Name Of The Test S1Test 03",
"Variable ${SCALAR} Hello, world! Second Test Should Be Equal ${SCALAR}",
"metus volutpat, hendrerit nibh in, auctor urna. Nunc a sodales.",
"stderr) self.assertEqual(stdout.count(\"PASSED\"), 1) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr)",
"1 The Somewhat Long Name Of The Test S1Test 07",
"\"w\") robot_file.write(textwrap.dedent(testfile)) robot_file.close() with open(\"{}/order.dat\".format(self.tmpdir), \"w\") as f: f.write(textwrap.dedent(orderfile)) process",
"self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 2) def test_two_orders(self): stdout, stderr =",
"skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 3) def test_too_big_testname(self): stdout, stderr",
"S1Test 03 1 The Somewhat Long Name Of The Test",
"Of The Test S1Test 11 1 The Somewhat Long Name",
"test_longnames_in_tests(self): stdout, stderr = self._run_tests_with( \"\"\" *** Settings *** Test",
"The Test S1Test 04 1 The Somewhat Long Name Of",
"Long Name Of The Test S1Test 03 1 The Somewhat",
"09 --test Test.The Somewhat Long Name Of The Test S1Test",
"5 passed, 0 failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"),",
"Long Name Of The Test S1Test 11 --test Test.The Somewhat",
"purus eget orci porta blandit sed ut tortor. Nunc vel",
"01 --test Test.The Somewhat Long Name Of The Test S1Test",
"Should Be Equal ${SCALAR} Hello, globe! Second And Half Should",
"tempfile import textwrap import unittest import shutil import subprocess class",
"Somewhat Long Name Of The Test S1Test 08 1 The",
"S1Test 11 1 The Somewhat Long Name Of The Test",
"tearDown(self): shutil.rmtree(self.tmpdir) def _run_tests_with(self, testfile, orderfile): robot_file = open(\"{}/test.robot\".format(self.tmpdir), \"w\")",
"Of The Test S1Test 12 } \"\"\", ) if sys.version_info",
"def setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def _run_tests_with(self,",
"stderr) if ROBOT_VERSION < \"4.0\": expected_write = b\"5 critical tests,",
"--test Test.Third Test \"\"\", ) if sys.version_info < (3, 0):",
"3) def test_too_big_testname(self): stdout, stderr = self._run_tests_with( \"\"\" *** Test",
"= \"5 tests, 5 passed, 0 failed, 0 skipped.\" self.assertIn(expected_write,",
"ROBOT_VERSION import sys import tempfile import textwrap import unittest import",
"Long Name Of The Test S1Test 02 1 The Somewhat",
"Long Name Of The Test S1Test 07 --test Test.The Somewhat",
"Equal ${SCALAR} Hello, world! Second And Quarter Should Be Equal",
"Of The Test S1Test 01 --test Test.The Somewhat Long Name",
"Test S1Test 06 } { --test Test.The Somewhat Long Name",
"skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 3) else: self.assertIn(b\"PASSED\", stdout, stderr)",
"Keywords *** Test1 [Arguments] ${arg} Log Test \"\"\", \"\"\" {",
"12 } \"\"\", ) if sys.version_info < (3, 0): self.assertIn(\"PASSED\",",
"} { --test Test.The Somewhat Long Name Of The Test",
"Somewhat Long Name Of The Test S1Test 04 --test Test.The",
"0 failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 3) else:",
"Somewhat Long Name Of The Test S1Test 11 --test Test.The",
"Nunc a sodales. Log Test \"\"\", \"\"\" --test Invalid \"\"\",",
"\"\"\" --test Invalid \"\"\", ) if sys.version_info < (3, 0):",
"stdout, stderr = self._run_tests_with( \"\"\" *** Test Cases *** Test",
"velit nunc. Duis eget purus eget orci porta blandit sed",
"S1Test 03 --test Test.The Somewhat Long Name Of The Test",
"Test.The Somewhat Long Name Of The Test S1Test 11 --test",
"Name Of The Test S1Test 03 --test Test.The Somewhat Long",
"def tearDown(self): shutil.rmtree(self.tmpdir) def _run_tests_with(self, testfile, orderfile): robot_file = open(\"{}/test.robot\".format(self.tmpdir),",
"= tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def _run_tests_with(self, testfile, orderfile): robot_file",
"Test.Second And Half } --test Test.Third Test \"\"\", ) if",
"Name Of The Test S1Test 11 1 The Somewhat Long",
"0 failed\" else: expected_write = b\"5 tests, 5 passed, 0",
"Test Should Be Equal ${SCALAR} Hello, globe! \"\"\", \"\"\" {",
"amet, consectetur adipiscing elit. Mauris eu velit nunc. Duis eget",
"Somewhat Long Name Of The Test S1Test 03 1 The",
"And Half } --test Test.Third Test \"\"\", ) if sys.version_info",
"Test --test Test.Second Test } { --test Test.Second And Quarter",
"--test Test.The Somewhat Long Name Of The Test S1Test 11",
"stdout, stderr) if ROBOT_VERSION < \"4.0\": expected_write = b\"5 critical",
"Long Name Of The Test S1Test 06 1 The Somewhat",
"S1Test 09 --test Test.The Somewhat Long Name Of The Test",
"self._run_tests_with( \"\"\" *** Variables *** ${SCALAR} Hello, globe! *** Test",
"The Test S1Test 05 --test Test.The Somewhat Long Name Of",
"Of The Test S1Test 12 1 *** Keywords *** Test1",
"consectetur adipiscing elit. Mauris eu velit nunc. Duis eget purus",
"S1Test 07 --test Test.The Somewhat Long Name Of The Test",
"Test Template Test1 *** Test Cases *** The Somewhat Long",
"Test S1Test 10 1 The Somewhat Long Name Of The",
"Name Of The Test S1Test 01 1 The Somewhat Long",
"ROBOT_VERSION < \"4.0\": expected_write = \"5 critical tests, 5 passed,",
"\"--testlevelsplit\", \"--ordering\", \"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir), ], cwd=self.tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) return",
"0 failed\" else: expected_write = \"5 tests, 5 passed, 0",
"Name Of The Test S1Test 06 } { --test Test.The",
"*** Test Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
"The Somewhat Long Name Of The Test S1Test 01 1",
"And Half Should Be Equal ${SCALAR} Hello, globe! Third Test",
"The Somewhat Long Name Of The Test S1Test 03 1",
"globe! Third Test Should Be Equal ${SCALAR} Hello, globe! \"\"\",",
"Name Of The Test S1Test 12 1 *** Keywords ***",
"Long Name Of The Test S1Test 12 1 *** Keywords",
"1 The Somewhat Long Name Of The Test S1Test 10",
"Equal ${SCALAR} Hello, globe! \"\"\", \"\"\" { --test Test.First Test",
"Cases *** The Somewhat Long Name Of The Test S1Test",
"S1Test 08 1 The Somewhat Long Name Of The Test",
"Of The Test S1Test 07 1 The Somewhat Long Name",
"\"5 critical tests, 5 passed, 0 failed\" else: expected_write =",
"sem ac, molestie risus. Sed eu metus volutpat, hendrerit nibh",
"The Test S1Test 11 1 The Somewhat Long Name Of",
"Test S1Test 01 1 The Somewhat Long Name Of The",
"Template Test1 *** Test Cases *** The Somewhat Long Name",
"Be Equal ${SCALAR} Hello, world! Third Test Should Be Equal",
"ac, molestie risus. Sed eu metus volutpat, hendrerit nibh in,",
"world! Second Test Should Be Equal ${SCALAR} Hello, world! Second",
"def test_orders(self): stdout, stderr = self._run_tests_with( \"\"\" *** Variables ***",
"else: expected_write = b\"5 tests, 5 passed, 0 failed, 0",
"\"4.0\": expected_write = \"5 critical tests, 5 passed, 0 failed\"",
"Test.The Somewhat Long Name Of The Test S1Test 04 --test",
"${SCALAR} Hello, globe! *** Test Cases *** First Test Set",
"Should Be Equal ${SCALAR} Hello, globe! \"\"\", \"\"\" { --test",
"06 1 The Somewhat Long Name Of The Test S1Test",
"0 failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 3) def",
"Somewhat Long Name Of The Test S1Test 08 --test Test.The",
"The Somewhat Long Name Of The Test S1Test 11 1",
"1 The Somewhat Long Name Of The Test S1Test 05",
"stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 3) def test_too_big_testname(self): stdout, stderr = self._run_tests_with(",
"Long Name Of The Test S1Test 10 1 The Somewhat",
"1) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 1)",
"shutil.rmtree(self.tmpdir) def _run_tests_with(self, testfile, orderfile): robot_file = open(\"{}/test.robot\".format(self.tmpdir), \"w\") robot_file.write(textwrap.dedent(testfile))",
"Of The Test S1Test 10 --test Test.The Somewhat Long Name",
"Name Of The Test S1Test 07 1 The Somewhat Long",
"Somewhat Long Name Of The Test S1Test 04 1 The",
"{ --test Test.First Test --test Test.Second Test } --test Test.Third",
"\"-m\" \"pabot.pabot\", \"--testlevelsplit\", \"--ordering\", \"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir), ], cwd=self.tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE,",
"failed\" else: expected_write = \"5 tests, 5 passed, 0 failed,",
"self.assertEqual(stdout.count(\"PASSED\"), 2) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"),",
"--test Invalid \"\"\", ) if sys.version_info < (3, 0): self.assertIn(\"PASSED\",",
"${SCALAR} Hello, globe! \"\"\", \"\"\" { --test Test.First Test --test",
"Third Test Should Be Equal ${SCALAR} Hello, globe! \"\"\", \"\"\"",
"elit. Mauris eu velit nunc. Duis eget purus eget orci",
"Test S1Test 11 1 The Somewhat Long Name Of The",
"The Somewhat Long Name Of The Test S1Test 12 1",
"sed ut tortor. Nunc vel nulla bibendum, auctor sem ac,",
"Somewhat Long Name Of The Test S1Test 09 1 The",
"Name Of The Test S1Test 11 --test Test.The Somewhat Long",
"\"\"\" { --test Test.First Test --test Test.Second Test } --test",
"dolor sit amet, consectetur adipiscing elit. Mauris eu velit nunc.",
"robot import __version__ as ROBOT_VERSION import sys import tempfile import",
"stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 1) def test_longnames_in_tests(self): stdout, stderr",
"Name Of The Test S1Test 10 --test Test.The Somewhat Long",
"import sys import tempfile import textwrap import unittest import shutil",
"Somewhat Long Name Of The Test S1Test 10 --test Test.The",
"Test } --test Test.Third Test \"\"\", ) if sys.version_info <",
"world! Second And Quarter Should Be Equal ${SCALAR} Hello, globe!",
"Test S1Test 09 1 The Somewhat Long Name Of The",
"Somewhat Long Name Of The Test S1Test 06 } {",
"Name Of The Test S1Test 05 1 The Somewhat Long",
"Of The Test S1Test 10 1 The Somewhat Long Name",
"Test S1Test 04 --test Test.The Somewhat Long Name Of The",
"failed, 0 skipped.\" self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 3) def test_too_big_testname(self):",
"PabotOrderingGroupTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def",
"Half Should Be Equal ${SCALAR} Hello, globe! Third Test Should",
"The Test S1Test 11 --test Test.The Somewhat Long Name Of",
"The Test S1Test 05 1 The Somewhat Long Name Of",
"Of The Test S1Test 03 --test Test.The Somewhat Long Name",
"Name Of The Test S1Test 01 --test Test.The Somewhat Long",
"Test.The Somewhat Long Name Of The Test S1Test 12 }",
"Name Of The Test S1Test 02 1 The Somewhat Long",
"world! Third Test Should Be Equal ${SCALAR} Hello, globe! \"\"\",",
"{ --test Test.Second And Quarter --test Test.Second And Half }",
"Test.Second Test } { --test Test.Second And Quarter --test Test.Second",
"} { --test Test.Second And Quarter --test Test.Second And Half",
"stderr) self.assertEqual(stdout.count(b\"PASSED\"), 2) def test_two_orders(self): stdout, stderr = self._run_tests_with( \"\"\"",
"[ sys.executable, \"-m\" \"pabot.pabot\", \"--testlevelsplit\", \"--ordering\", \"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir), ], cwd=self.tmpdir,",
"Of The Test S1Test 04 1 The Somewhat Long Name",
"as ROBOT_VERSION import sys import tempfile import textwrap import unittest",
"--test Test.The Somewhat Long Name Of The Test S1Test 06",
"if ROBOT_VERSION < \"4.0\": expected_write = b\"5 critical tests, 5",
"stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 1) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout,",
"The Test S1Test 06 } { --test Test.The Somewhat Long",
"Test.Second And Quarter --test Test.Second And Half } --test Test.Third",
"The Test S1Test 03 1 The Somewhat Long Name Of",
"Equal ${SCALAR} Hello, world! Third Test Should Be Equal ${SCALAR}",
"12 1 *** Keywords *** Test1 [Arguments] ${arg} Log Test",
"Test Cases *** The Somewhat Long Name Of The Test",
"\"\"\" *** Variables *** ${SCALAR} Hello, globe! *** Test Cases",
"< (3, 0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"),",
"self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) if ROBOT_VERSION < \"4.0\":",
"--test Test.The Somewhat Long Name Of The Test S1Test 03",
"Of The Test S1Test 11 --test Test.The Somewhat Long Name",
"else: expected_write = \"5 tests, 5 passed, 0 failed, 0",
"Second Test Should Be Equal ${SCALAR} Hello, world! Third Test",
"Test S1Test 07 1 The Somewhat Long Name Of The",
"\"--ordering\", \"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir), ], cwd=self.tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) return process.communicate()",
"expected_write = b\"5 critical tests, 5 passed, 0 failed\" else:",
"stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 3) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout,",
"self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def _run_tests_with(self, testfile, orderfile):",
"stderr) self.assertEqual(stdout.count(b\"PASSED\"), 3) def test_too_big_testname(self): stdout, stderr = self._run_tests_with( \"\"\"",
"auctor sem ac, molestie risus. Sed eu metus volutpat, hendrerit",
"06 } { --test Test.The Somewhat Long Name Of The",
"stderr = self._run_tests_with( \"\"\" *** Settings *** Test Template Test1",
"Suite Variable ${SCALAR} Hello, world! Second Test Should Be Equal",
"${arg} Log Test \"\"\", \"\"\" { --test Test.The Somewhat Long",
"11 1 The Somewhat Long Name Of The Test S1Test",
"The Test S1Test 07 --test Test.The Somewhat Long Name Of",
"5 passed, 0 failed\" else: expected_write = b\"5 tests, 5",
"S1Test 07 1 The Somewhat Long Name Of The Test",
"Settings *** Test Template Test1 *** Test Cases *** The",
"03 --test Test.The Somewhat Long Name Of The Test S1Test",
"The Test S1Test 02 1 The Somewhat Long Name Of",
"Test S1Test 11 --test Test.The Somewhat Long Name Of The",
"The Test S1Test 08 1 The Somewhat Long Name Of",
"Somewhat Long Name Of The Test S1Test 12 } \"\"\",",
"*** First Test Set Suite Variable ${SCALAR} Hello, world! Second",
"ut tortor. Nunc vel nulla bibendum, auctor sem ac, molestie",
"*** The Somewhat Long Name Of The Test S1Test 01",
"Second Test Should Be Equal ${SCALAR} Hello, world! Second And",
"The Test S1Test 12 1 *** Keywords *** Test1 [Arguments]",
"${SCALAR} Hello, globe! Third Test Should Be Equal ${SCALAR} Hello,",
"_run_tests_with(self, testfile, orderfile): robot_file = open(\"{}/test.robot\".format(self.tmpdir), \"w\") robot_file.write(textwrap.dedent(testfile)) robot_file.close() with",
"S1Test 11 --test Test.The Somewhat Long Name Of The Test",
"Test } { --test Test.Second And Quarter --test Test.Second And",
"self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 1) def test_longnames_in_tests(self):",
"S1Test 10 1 The Somewhat Long Name Of The Test",
"--test Test.The Somewhat Long Name Of The Test S1Test 09",
"*** Variables *** ${SCALAR} Hello, globe! *** Test Cases ***",
"Test1 *** Test Cases *** The Somewhat Long Name Of",
"3) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) if ROBOT_VERSION",
"world! Second Test Should Be Equal ${SCALAR} Hello, world! Third",
"*** Test1 [Arguments] ${arg} Log Test \"\"\", \"\"\" { --test",
"{ --test Test.First Test --test Test.Second Test } { --test",
"test_orders(self): stdout, stderr = self._run_tests_with( \"\"\" *** Variables *** ${SCALAR}",
"Somewhat Long Name Of The Test S1Test 07 --test Test.The",
"expected_write = b\"5 tests, 5 passed, 0 failed, 0 skipped.\"",
"The Test S1Test 09 1 The Somewhat Long Name Of",
"blandit sed ut tortor. Nunc vel nulla bibendum, auctor sem",
"sit amet, consectetur adipiscing elit. Mauris eu velit nunc. Duis",
"Should Be Equal ${SCALAR} Hello, globe! Third Test Should Be",
"test_two_orders(self): stdout, stderr = self._run_tests_with( \"\"\" *** Variables *** ${SCALAR}",
"\"{}/order.dat\".format(self.tmpdir), \"{}/test.robot\".format(self.tmpdir), ], cwd=self.tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) return process.communicate() def",
"Test S1Test 06 1 The Somewhat Long Name Of The",
"Hello, world! Third Test Should Be Equal ${SCALAR} Hello, globe!",
"urna. Nunc a sodales. Log Test \"\"\", \"\"\" --test Invalid",
"Invalid \"\"\", ) if sys.version_info < (3, 0): self.assertIn(\"PASSED\", stdout,",
"Variables *** ${SCALAR} Hello, globe! *** Test Cases *** First",
"Hello, world! Second And Quarter Should Be Equal ${SCALAR} Hello,",
"tests, 5 passed, 0 failed\" else: expected_write = b\"5 tests,",
"< \"4.0\": expected_write = \"5 critical tests, 5 passed, 0",
"Test S1Test 02 --test Test.The Somewhat Long Name Of The",
"Of The Test S1Test 08 --test Test.The Somewhat Long Name",
"05 1 The Somewhat Long Name Of The Test S1Test",
"--test Test.The Somewhat Long Name Of The Test S1Test 05",
"--test Test.First Test --test Test.Second Test } --test Test.Third Test",
"stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 1) def test_longnames_in_tests(self): stdout, stderr = self._run_tests_with(",
"if sys.version_info < (3, 0): self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout,",
"${SCALAR} Hello, world! Second And Quarter Should Be Equal ${SCALAR}",
"Test.First Test --test Test.Second Test } --test Test.Third Test \"\"\",",
"\"\"\" { --test Test.The Somewhat Long Name Of The Test",
"setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) def _run_tests_with(self, testfile,",
"--test Test.First Test --test Test.Second Test } { --test Test.Second",
"Long Name Of The Test S1Test 04 1 The Somewhat",
"Test Cases *** Test Lorem ipsum dolor sit amet, consectetur",
"08 --test Test.The Somewhat Long Name Of The Test S1Test",
"Somewhat Long Name Of The Test S1Test 09 --test Test.The",
"stderr) self.assertEqual(stdout.count(\"PASSED\"), 3) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr)",
"f.write(textwrap.dedent(orderfile)) process = subprocess.Popen( [ sys.executable, \"-m\" \"pabot.pabot\", \"--testlevelsplit\", \"--ordering\",",
"self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 2) else: self.assertIn(b\"PASSED\",",
"${SCALAR} Hello, world! Third Test Should Be Equal ${SCALAR} Hello,",
"sodales. Log Test \"\"\", \"\"\" --test Invalid \"\"\", ) if",
"eget purus eget orci porta blandit sed ut tortor. Nunc",
"stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 2) else: self.assertIn(b\"PASSED\", stdout,",
"Name Of The Test S1Test 04 1 The Somewhat Long",
"a sodales. Log Test \"\"\", \"\"\" --test Invalid \"\"\", )",
"Nunc vel nulla bibendum, auctor sem ac, molestie risus. Sed",
"The Test S1Test 08 --test Test.The Somewhat Long Name Of",
"= self._run_tests_with( \"\"\" *** Test Cases *** Test Lorem ipsum",
"def test_two_orders(self): stdout, stderr = self._run_tests_with( \"\"\" *** Variables ***",
"Of The Test S1Test 08 1 The Somewhat Long Name",
"], cwd=self.tmpdir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) return process.communicate() def test_orders(self): stdout,",
"Of The Test S1Test 03 1 The Somewhat Long Name",
"__version__ as ROBOT_VERSION import sys import tempfile import textwrap import",
"And Quarter --test Test.Second And Half } --test Test.Third Test",
"open(\"{}/test.robot\".format(self.tmpdir), \"w\") robot_file.write(textwrap.dedent(testfile)) robot_file.close() with open(\"{}/order.dat\".format(self.tmpdir), \"w\") as f: f.write(textwrap.dedent(orderfile))",
"--test Test.The Somewhat Long Name Of The Test S1Test 08",
"self.assertIn(expected_write, stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 3) else: self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\",",
"Half } --test Test.Third Test \"\"\", ) if sys.version_info <",
"Of The Test S1Test 06 1 The Somewhat Long Name",
"Test S1Test 09 --test Test.The Somewhat Long Name Of The",
"orci porta blandit sed ut tortor. Nunc vel nulla bibendum,",
"Set Suite Variable ${SCALAR} Hello, world! Second Test Should Be",
"self.assertIn(\"PASSED\", stdout, stderr) self.assertNotIn(\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(\"PASSED\"), 1) else: self.assertIn(b\"PASSED\",",
"import subprocess class PabotOrderingGroupTest(unittest.TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() def",
"Long Name Of The Test S1Test 07 1 The Somewhat",
"\"w\") as f: f.write(textwrap.dedent(orderfile)) process = subprocess.Popen( [ sys.executable, \"-m\"",
"self.assertIn(b\"PASSED\", stdout, stderr) self.assertNotIn(b\"FAILED\", stdout, stderr) self.assertEqual(stdout.count(b\"PASSED\"), 2) def test_two_orders(self):",
"eu metus volutpat, hendrerit nibh in, auctor urna. Nunc a",
"Test S1Test 08 1 The Somewhat Long Name Of The",
"2) def test_two_orders(self): stdout, stderr = self._run_tests_with( \"\"\" *** Variables",
"Of The Test S1Test 05 --test Test.The Somewhat Long Name",
"stdout, stderr = self._run_tests_with( \"\"\" *** Variables *** ${SCALAR} Hello,",
"stdout, stderr = self._run_tests_with( \"\"\" *** Settings *** Test Template",
"Test.The Somewhat Long Name Of The Test S1Test 05 --test",
"Somewhat Long Name Of The Test S1Test 02 1 The",
"self.assertNotIn(b\"FAILED\", stdout, stderr) if ROBOT_VERSION < \"4.0\": expected_write = b\"5",
"Test S1Test 02 1 The Somewhat Long Name Of The",
"Somewhat Long Name Of The Test S1Test 01 --test Test.The",
"5 passed, 0 failed\" else: expected_write = \"5 tests, 5"
] |
[
"Console', 'Intended Audience :: Developers', 'License :: OSI Approved ::",
":: Utilities' ] packages = ['pageviewapi'] requires = ['requests', 'attrdict']",
"distutils.core import setup try: import pageviewapi version = pageviewapi.__version__ except",
"from setuptools import setup except ImportError: from distutils.core import setup",
"[ 'Development Status :: 4 - Beta', 'Environment :: Console',",
"script.\"\"\" try: from setuptools import setup except ImportError: from distutils.core",
"<reponame>Commonists/pageview-api<filename>setup.py #!/usr/bin/python # -*- coding: latin-1 -*- \"\"\"Setup script.\"\"\" try:",
"except ImportError: from distutils.core import setup try: import pageviewapi version",
"pageviewapi version = pageviewapi.__version__ except ImportError: version = 'Undefined' classifiers",
"latin-1 -*- \"\"\"Setup script.\"\"\" try: from setuptools import setup except",
"] packages = ['pageviewapi'] requires = ['requests', 'attrdict'] setup( name='pageviewapi',",
"OSI Approved :: MIT License', 'Operating System :: OS Independent',",
"= 'Undefined' classifiers = [ 'Development Status :: 4 -",
":: 4 - Beta', 'Environment :: Console', 'Intended Audience ::",
"['requests', 'attrdict'] setup( name='pageviewapi', version=version, author='Commonists', author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api', description='Wikimedia Pageview",
"setup( name='pageviewapi', version=version, author='Commonists', author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api', description='Wikimedia Pageview API client',",
"= [ 'Development Status :: 4 - Beta', 'Environment ::",
"try: from setuptools import setup except ImportError: from distutils.core import",
"import setup except ImportError: from distutils.core import setup try: import",
"classifiers = [ 'Development Status :: 4 - Beta', 'Environment",
"MIT License', 'Operating System :: OS Independent', 'Programming Language ::",
"# -*- coding: latin-1 -*- \"\"\"Setup script.\"\"\" try: from setuptools",
"description='Wikimedia Pageview API client', long_description=open('README.md').read(), license='MIT', packages=packages, install_requires=requires, classifiers=classifiers )",
"= ['requests', 'attrdict'] setup( name='pageviewapi', version=version, author='Commonists', author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api', description='Wikimedia",
"'Intended Audience :: Developers', 'License :: OSI Approved :: MIT",
":: Developers', 'License :: OSI Approved :: MIT License', 'Operating",
"import pageviewapi version = pageviewapi.__version__ except ImportError: version = 'Undefined'",
"Utilities' ] packages = ['pageviewapi'] requires = ['requests', 'attrdict'] setup(",
"version = 'Undefined' classifiers = [ 'Development Status :: 4",
":: Python', 'Topic :: Utilities' ] packages = ['pageviewapi'] requires",
"packages = ['pageviewapi'] requires = ['requests', 'attrdict'] setup( name='pageviewapi', version=version,",
"'Undefined' classifiers = [ 'Development Status :: 4 - Beta',",
"Independent', 'Programming Language :: Python', 'Topic :: Utilities' ] packages",
"from distutils.core import setup try: import pageviewapi version = pageviewapi.__version__",
"Approved :: MIT License', 'Operating System :: OS Independent', 'Programming",
"Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience",
"Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License ::",
"= pageviewapi.__version__ except ImportError: version = 'Undefined' classifiers = [",
"License', 'Operating System :: OS Independent', 'Programming Language :: Python',",
"version = pageviewapi.__version__ except ImportError: version = 'Undefined' classifiers =",
"'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI",
"pageviewapi.__version__ except ImportError: version = 'Undefined' classifiers = [ 'Development",
":: OSI Approved :: MIT License', 'Operating System :: OS",
"-*- coding: latin-1 -*- \"\"\"Setup script.\"\"\" try: from setuptools import",
"['pageviewapi'] requires = ['requests', 'attrdict'] setup( name='pageviewapi', version=version, author='Commonists', author_email='<EMAIL>',",
"url='http://github.com/Commonists/pageview-api', description='Wikimedia Pageview API client', long_description=open('README.md').read(), license='MIT', packages=packages, install_requires=requires, classifiers=classifiers",
"4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers',",
"\"\"\"Setup script.\"\"\" try: from setuptools import setup except ImportError: from",
"Developers', 'License :: OSI Approved :: MIT License', 'Operating System",
"'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic",
"coding: latin-1 -*- \"\"\"Setup script.\"\"\" try: from setuptools import setup",
"requires = ['requests', 'attrdict'] setup( name='pageviewapi', version=version, author='Commonists', author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api',",
":: Console', 'Intended Audience :: Developers', 'License :: OSI Approved",
"author='Commonists', author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api', description='Wikimedia Pageview API client', long_description=open('README.md').read(), license='MIT', packages=packages,",
"Python', 'Topic :: Utilities' ] packages = ['pageviewapi'] requires =",
"System :: OS Independent', 'Programming Language :: Python', 'Topic ::",
":: OS Independent', 'Programming Language :: Python', 'Topic :: Utilities'",
"setup except ImportError: from distutils.core import setup try: import pageviewapi",
"-*- \"\"\"Setup script.\"\"\" try: from setuptools import setup except ImportError:",
"#!/usr/bin/python # -*- coding: latin-1 -*- \"\"\"Setup script.\"\"\" try: from",
"'attrdict'] setup( name='pageviewapi', version=version, author='Commonists', author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api', description='Wikimedia Pageview API",
"= ['pageviewapi'] requires = ['requests', 'attrdict'] setup( name='pageviewapi', version=version, author='Commonists',",
"'Topic :: Utilities' ] packages = ['pageviewapi'] requires = ['requests',",
"setuptools import setup except ImportError: from distutils.core import setup try:",
"import setup try: import pageviewapi version = pageviewapi.__version__ except ImportError:",
"name='pageviewapi', version=version, author='Commonists', author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api', description='Wikimedia Pageview API client', long_description=open('README.md').read(),",
"ImportError: from distutils.core import setup try: import pageviewapi version =",
"'License :: OSI Approved :: MIT License', 'Operating System ::",
"except ImportError: version = 'Undefined' classifiers = [ 'Development Status",
"Language :: Python', 'Topic :: Utilities' ] packages = ['pageviewapi']",
"'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended",
"- Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License",
"try: import pageviewapi version = pageviewapi.__version__ except ImportError: version =",
":: MIT License', 'Operating System :: OS Independent', 'Programming Language",
"ImportError: version = 'Undefined' classifiers = [ 'Development Status ::",
"'Programming Language :: Python', 'Topic :: Utilities' ] packages =",
"Audience :: Developers', 'License :: OSI Approved :: MIT License',",
"OS Independent', 'Programming Language :: Python', 'Topic :: Utilities' ]",
"setup try: import pageviewapi version = pageviewapi.__version__ except ImportError: version",
"version=version, author='Commonists', author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api', description='Wikimedia Pageview API client', long_description=open('README.md').read(), license='MIT',",
"author_email='<EMAIL>', url='http://github.com/Commonists/pageview-api', description='Wikimedia Pageview API client', long_description=open('README.md').read(), license='MIT', packages=packages, install_requires=requires,"
] |
[
"fa = f_to_c(f) print(\"Celsius of \" + str(c) + \"",
"= (f-32) * 5/9 #Formula to convert Fareheit to Celsius",
"+ str(c) + \" is \" + str(k) + \"",
"#Formula to convert Celsius to Kelvin return k def f_to_c(f):",
"k = c + 273.15 #Formula to convert Celsius to",
"273.15 #Formula to convert Celsius to Kelvin return k def",
"to convert Celsius to Kelvin return k def f_to_c(f): fa",
"program is to convert Celsius to Kelvin def c_to_k(c): k",
"f_to_c(f) print(\"Celsius of \" + str(c) + \" is \"",
"str(c) + \" is \" + str(k) + \" in",
"\" in Kelvin\") print(\"Farenheit of \" + str(f) + \"",
"of \" + str(f) + \" is \" + str(fa)",
"5/9 #Formula to convert Fareheit to Celsius return fa c",
"#a2_t1b.py #This program is to convert Celsius to Kelvin def",
"100.0 k = c_to_k(c) fa = f_to_c(f) print(\"Celsius of \"",
"is \" + str(k) + \" in Kelvin\") print(\"Farenheit of",
"= c + 273.15 #Formula to convert Celsius to Kelvin",
"+ \" is \" + str(k) + \" in Kelvin\")",
"\" + str(f) + \" is \" + str(fa) +",
"= f_to_c(f) print(\"Celsius of \" + str(c) + \" is",
"\" is \" + str(k) + \" in Kelvin\") print(\"Farenheit",
"+ str(f) + \" is \" + str(fa) + \"",
"to Kelvin def c_to_k(c): k = c + 273.15 #Formula",
"k = c_to_k(c) fa = f_to_c(f) print(\"Celsius of \" +",
"Kelvin return k def f_to_c(f): fa = (f-32) * 5/9",
"str(k) + \" in Kelvin\") print(\"Farenheit of \" + str(f)",
"Celsius to Kelvin def c_to_k(c): k = c + 273.15",
"#Formula to convert Fareheit to Celsius return fa c =",
"Fareheit to Celsius return fa c = 25.0 f =",
"25.0 f = 100.0 k = c_to_k(c) fa = f_to_c(f)",
"+ \" in Kelvin\") print(\"Farenheit of \" + str(f) +",
"Kelvin def c_to_k(c): k = c + 273.15 #Formula to",
"def f_to_c(f): fa = (f-32) * 5/9 #Formula to convert",
"(f-32) * 5/9 #Formula to convert Fareheit to Celsius return",
"fa c = 25.0 f = 100.0 k = c_to_k(c)",
"c = 25.0 f = 100.0 k = c_to_k(c) fa",
"c_to_k(c): k = c + 273.15 #Formula to convert Celsius",
"Celsius to Kelvin return k def f_to_c(f): fa = (f-32)",
"str(f) + \" is \" + str(fa) + \" in",
"convert Celsius to Kelvin def c_to_k(c): k = c +",
"fa = (f-32) * 5/9 #Formula to convert Fareheit to",
"= 100.0 k = c_to_k(c) fa = f_to_c(f) print(\"Celsius of",
"Kelvin\") print(\"Farenheit of \" + str(f) + \" is \"",
"= c_to_k(c) fa = f_to_c(f) print(\"Celsius of \" + str(c)",
"to Celsius return fa c = 25.0 f = 100.0",
"print(\"Celsius of \" + str(c) + \" is \" +",
"def c_to_k(c): k = c + 273.15 #Formula to convert",
"#This program is to convert Celsius to Kelvin def c_to_k(c):",
"to convert Celsius to Kelvin def c_to_k(c): k = c",
"+ 273.15 #Formula to convert Celsius to Kelvin return k",
"to Kelvin return k def f_to_c(f): fa = (f-32) *",
"in Kelvin\") print(\"Farenheit of \" + str(f) + \" is",
"c_to_k(c) fa = f_to_c(f) print(\"Celsius of \" + str(c) +",
"c + 273.15 #Formula to convert Celsius to Kelvin return",
"\" + str(c) + \" is \" + str(k) +",
"convert Celsius to Kelvin return k def f_to_c(f): fa =",
"= 25.0 f = 100.0 k = c_to_k(c) fa =",
"return k def f_to_c(f): fa = (f-32) * 5/9 #Formula",
"Celsius return fa c = 25.0 f = 100.0 k",
"f = 100.0 k = c_to_k(c) fa = f_to_c(f) print(\"Celsius",
"of \" + str(c) + \" is \" + str(k)",
"\" + str(k) + \" in Kelvin\") print(\"Farenheit of \"",
"is to convert Celsius to Kelvin def c_to_k(c): k =",
"+ str(k) + \" in Kelvin\") print(\"Farenheit of \" +",
"* 5/9 #Formula to convert Fareheit to Celsius return fa",
"f_to_c(f): fa = (f-32) * 5/9 #Formula to convert Fareheit",
"k def f_to_c(f): fa = (f-32) * 5/9 #Formula to",
"print(\"Farenheit of \" + str(f) + \" is \" +",
"convert Fareheit to Celsius return fa c = 25.0 f",
"<reponame>juby-gif/assignment1<gh_stars>0 #a2_t1b.py #This program is to convert Celsius to Kelvin",
"+ \" is \" + str(fa) + \" in Celsius\")",
"return fa c = 25.0 f = 100.0 k =",
"to convert Fareheit to Celsius return fa c = 25.0"
] |
[
"import PermissionDenied from django.core.urlresolvers import reverse, reverse_lazy from django.core.mail import",
"HttpResponse, HttpResponseRedirect from django.utils.decorators import method_decorator from django.utils.translation import ugettext_lazy",
"ContactUs.new_email.send( sender=self.__class__, user_email=email, cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username, body=message ) messages.add_message(self.request, messages.SUCCESS, #",
"'next': reverse_lazy('contact'), }) return initial def form_valid(self, form): # Adding",
"_('Your message has been sent. We\\'ll get back to you",
"if ('message' in self.request.GET): initial.update({ 'message': self.request.GET['message'], }) initial.update({ 'next':",
"login_required from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse, reverse_lazy",
"super(ContactUsView, self).get_initial() # @TODO: This sort of gets repeated in",
"to ensure the user is a wikipedia editor. try: assert",
"is a wikipedia editor. try: assert self.request.user.editor email = form.cleaned_data['email']",
"(AssertionError, AttributeError) as e: messages.add_message (self.request, messages.WARNING, # Translators: This",
"e: messages.add_message (self.request, messages.WARNING, # Translators: This message is shown",
"self.request.user.email, }) if ('message' in self.request.GET): initial.update({ 'message': self.request.GET['message'], })",
"gets repeated in ContactUsForm. # We could probably be factored",
"initial.update({ 'message': self.request.GET['message'], }) initial.update({ 'next': reverse_lazy('contact'), }) return initial",
"to a common place for DRYness. if self.request.user.is_authenticated(): if self.request.user.email:",
"self.request.GET): initial.update({ 'message': self.request.GET['message'], }) initial.update({ 'next': reverse_lazy('contact'), }) return",
"they successfully submit a new message using the contact us",
"soon!')) return HttpResponseRedirect(reverse('contact')) except (AssertionError, AttributeError) as e: messages.add_message (self.request,",
"self.request.user.email: initial.update({ 'email': self.request.user.email, }) if ('message' in self.request.GET): initial.update({",
"# Adding an extra check to ensure the user is",
"return HttpResponseRedirect(reverse('contact')) except (AssertionError, AttributeError) as e: messages.add_message (self.request, messages.WARNING,",
"class ContactUsView(FormView): template_name = 'emails/contact.html' form_class = ContactUsForm success_url =",
"contact us form. _('Your message has been sent. We\\'ll get",
"carbon_copy = form.cleaned_data['cc'] ContactUs.new_email.send( sender=self.__class__, user_email=email, cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username, body=message )",
"initial.update({ 'email': self.request.user.email, }) if ('message' in self.request.GET): initial.update({ 'message':",
"method_decorator from django.utils.translation import ugettext_lazy as _ from django.views.generic.edit import",
"ContactUsForm success_url = reverse_lazy('contact') def get_initial(self): initial = super(ContactUsView, self).get_initial()",
"of gets repeated in ContactUsForm. # We could probably be",
"django.core.mail import BadHeaderError, send_mail from django.http import HttpResponse, HttpResponseRedirect from",
"import ugettext_lazy as _ from django.views.generic.edit import FormView from TWLight.emails.forms",
"self).get_initial() # @TODO: This sort of gets repeated in ContactUsForm.",
"# Translators: This message is shown to non-wikipedia editors who",
"ContactUsView(FormView): template_name = 'emails/contact.html' form_class = ContactUsForm success_url = reverse_lazy('contact')",
"ContactUsForm. # We could probably be factored out to a",
"send_mail from django.http import HttpResponse, HttpResponseRedirect from django.utils.decorators import method_decorator",
"This sort of gets repeated in ContactUsForm. # We could",
"_ from django.views.generic.edit import FormView from TWLight.emails.forms import ContactUsForm from",
"ensure the user is a wikipedia editor. try: assert self.request.user.editor",
"reverse_lazy('contact'), }) return initial def form_valid(self, form): # Adding an",
"assert self.request.user.editor email = form.cleaned_data['email'] message = form.cleaned_data['message'] carbon_copy =",
"from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.core.urlresolvers",
"message = form.cleaned_data['message'] carbon_copy = form.cleaned_data['cc'] ContactUs.new_email.send( sender=self.__class__, user_email=email, cc=carbon_copy,",
"Translators: This message is shown to non-wikipedia editors who attempt",
"ContactUs @method_decorator(login_required, name='post') class ContactUsView(FormView): template_name = 'emails/contact.html' form_class =",
"form.cleaned_data['email'] message = form.cleaned_data['message'] carbon_copy = form.cleaned_data['cc'] ContactUs.new_email.send( sender=self.__class__, user_email=email,",
"get_initial(self): initial = super(ContactUsView, self).get_initial() # @TODO: This sort of",
"from TWLight.emails.signals import ContactUs @method_decorator(login_required, name='post') class ContactUsView(FormView): template_name =",
"from TWLight.emails.forms import ContactUsForm from TWLight.emails.signals import ContactUs @method_decorator(login_required, name='post')",
"DRYness. if self.request.user.is_authenticated(): if self.request.user.email: initial.update({ 'email': self.request.user.email, }) if",
"email = form.cleaned_data['email'] message = form.cleaned_data['message'] carbon_copy = form.cleaned_data['cc'] ContactUs.new_email.send(",
"ContactUsForm from TWLight.emails.signals import ContactUs @method_decorator(login_required, name='post') class ContactUsView(FormView): template_name",
") messages.add_message(self.request, messages.SUCCESS, # Translators: Shown to users when they",
"has been sent. We\\'ll get back to you soon!')) return",
"form_valid(self, form): # Adding an extra check to ensure the",
"shown to non-wikipedia editors who attempt to post data to",
"import FormView from TWLight.emails.forms import ContactUsForm from TWLight.emails.signals import ContactUs",
"= 'emails/contact.html' form_class = ContactUsForm success_url = reverse_lazy('contact') def get_initial(self):",
"import method_decorator from django.utils.translation import ugettext_lazy as _ from django.views.generic.edit",
"could probably be factored out to a common place for",
"post data to the contact us form. _('You must be",
"attempt to post data to the contact us form. _('You",
"message has been sent. We\\'ll get back to you soon!'))",
"success_url = reverse_lazy('contact') def get_initial(self): initial = super(ContactUsView, self).get_initial() #",
"_('You must be a Wikipedia editor to do that.')) raise",
"from django.views.generic.edit import FormView from TWLight.emails.forms import ContactUsForm from TWLight.emails.signals",
"contact us form. _('You must be a Wikipedia editor to",
"self.request.GET['message'], }) initial.update({ 'next': reverse_lazy('contact'), }) return initial def form_valid(self,",
"= form.cleaned_data['message'] carbon_copy = form.cleaned_data['cc'] ContactUs.new_email.send( sender=self.__class__, user_email=email, cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username,",
"the contact us form. _('You must be a Wikipedia editor",
"return initial def form_valid(self, form): # Adding an extra check",
"you soon!')) return HttpResponseRedirect(reverse('contact')) except (AssertionError, AttributeError) as e: messages.add_message",
"who attempt to post data to the contact us form.",
"form): # Adding an extra check to ensure the user",
"django.utils.translation import ugettext_lazy as _ from django.views.generic.edit import FormView from",
"place for DRYness. if self.request.user.is_authenticated(): if self.request.user.email: initial.update({ 'email': self.request.user.email,",
"a Wikipedia editor to do that.')) raise PermissionDenied return self.request.user.editor",
"def form_valid(self, form): # Adding an extra check to ensure",
"@TODO: This sort of gets repeated in ContactUsForm. # We",
"form.cleaned_data['cc'] ContactUs.new_email.send( sender=self.__class__, user_email=email, cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username, body=message ) messages.add_message(self.request, messages.SUCCESS,",
"from django.core.mail import BadHeaderError, send_mail from django.http import HttpResponse, HttpResponseRedirect",
"'email': self.request.user.email, }) if ('message' in self.request.GET): initial.update({ 'message': self.request.GET['message'],",
"# We could probably be factored out to a common",
"django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.core.urlresolvers import",
"import ContactUs @method_decorator(login_required, name='post') class ContactUsView(FormView): template_name = 'emails/contact.html' form_class",
"the user is a wikipedia editor. try: assert self.request.user.editor email",
"sent. We\\'ll get back to you soon!')) return HttpResponseRedirect(reverse('contact')) except",
"in self.request.GET): initial.update({ 'message': self.request.GET['message'], }) initial.update({ 'next': reverse_lazy('contact'), })",
"for DRYness. if self.request.user.is_authenticated(): if self.request.user.email: initial.update({ 'email': self.request.user.email, })",
"us form. _('You must be a Wikipedia editor to do",
"django.utils.decorators import method_decorator from django.utils.translation import ugettext_lazy as _ from",
"import login_required from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse,",
"TWLight.emails.forms import ContactUsForm from TWLight.emails.signals import ContactUs @method_decorator(login_required, name='post') class",
"self.request.user.editor email = form.cleaned_data['email'] message = form.cleaned_data['message'] carbon_copy = form.cleaned_data['cc']",
"}) initial.update({ 'next': reverse_lazy('contact'), }) return initial def form_valid(self, form):",
"get back to you soon!')) return HttpResponseRedirect(reverse('contact')) except (AssertionError, AttributeError)",
"# Translators: Shown to users when they successfully submit a",
"editor. try: assert self.request.user.editor email = form.cleaned_data['email'] message = form.cleaned_data['message']",
"= super(ContactUsView, self).get_initial() # @TODO: This sort of gets repeated",
"form.cleaned_data['message'] carbon_copy = form.cleaned_data['cc'] ContactUs.new_email.send( sender=self.__class__, user_email=email, cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username, body=message",
"reverse_lazy('contact') def get_initial(self): initial = super(ContactUsView, self).get_initial() # @TODO: This",
"in ContactUsForm. # We could probably be factored out to",
"from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.exceptions",
"cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username, body=message ) messages.add_message(self.request, messages.SUCCESS, # Translators: Shown to",
"import HttpResponse, HttpResponseRedirect from django.utils.decorators import method_decorator from django.utils.translation import",
"form. _('Your message has been sent. We\\'ll get back to",
"# @TODO: This sort of gets repeated in ContactUsForm. #",
"from django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse, reverse_lazy from",
"FormView from TWLight.emails.forms import ContactUsForm from TWLight.emails.signals import ContactUs @method_decorator(login_required,",
"sort of gets repeated in ContactUsForm. # We could probably",
"from django.http import HttpResponse, HttpResponseRedirect from django.utils.decorators import method_decorator from",
"as e: messages.add_message (self.request, messages.WARNING, # Translators: This message is",
"check to ensure the user is a wikipedia editor. try:",
"We\\'ll get back to you soon!')) return HttpResponseRedirect(reverse('contact')) except (AssertionError,",
"if self.request.user.is_authenticated(): if self.request.user.email: initial.update({ 'email': self.request.user.email, }) if ('message'",
"@method_decorator(login_required, name='post') class ContactUsView(FormView): template_name = 'emails/contact.html' form_class = ContactUsForm",
"initial.update({ 'next': reverse_lazy('contact'), }) return initial def form_valid(self, form): #",
"messages.SUCCESS, # Translators: Shown to users when they successfully submit",
"editor_wp_username=self.request.user.editor.wp_username, body=message ) messages.add_message(self.request, messages.SUCCESS, # Translators: Shown to users",
"repeated in ContactUsForm. # We could probably be factored out",
"must be a Wikipedia editor to do that.')) raise PermissionDenied",
"PermissionDenied from django.core.urlresolvers import reverse, reverse_lazy from django.core.mail import BadHeaderError,",
"initial def form_valid(self, form): # Adding an extra check to",
"django.core.urlresolvers import reverse, reverse_lazy from django.core.mail import BadHeaderError, send_mail from",
"try: assert self.request.user.editor email = form.cleaned_data['email'] message = form.cleaned_data['message'] carbon_copy",
"template_name = 'emails/contact.html' form_class = ContactUsForm success_url = reverse_lazy('contact') def",
"to you soon!')) return HttpResponseRedirect(reverse('contact')) except (AssertionError, AttributeError) as e:",
"from django.utils.decorators import method_decorator from django.utils.translation import ugettext_lazy as _",
"This message is shown to non-wikipedia editors who attempt to",
"out to a common place for DRYness. if self.request.user.is_authenticated(): if",
"<filename>TWLight/emails/views.py from django.contrib import messages from django.contrib.auth.decorators import login_required from",
"to the contact us form. _('You must be a Wikipedia",
"HttpResponseRedirect from django.utils.decorators import method_decorator from django.utils.translation import ugettext_lazy as",
"= reverse_lazy('contact') def get_initial(self): initial = super(ContactUsView, self).get_initial() # @TODO:",
"new message using the contact us form. _('Your message has",
"import ContactUsForm from TWLight.emails.signals import ContactUs @method_decorator(login_required, name='post') class ContactUsView(FormView):",
"except (AssertionError, AttributeError) as e: messages.add_message (self.request, messages.WARNING, # Translators:",
"form_class = ContactUsForm success_url = reverse_lazy('contact') def get_initial(self): initial =",
"using the contact us form. _('Your message has been sent.",
"non-wikipedia editors who attempt to post data to the contact",
"body=message ) messages.add_message(self.request, messages.SUCCESS, # Translators: Shown to users when",
"We could probably be factored out to a common place",
"form. _('You must be a Wikipedia editor to do that.'))",
"django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.exceptions import",
"import reverse, reverse_lazy from django.core.mail import BadHeaderError, send_mail from django.http",
"editors who attempt to post data to the contact us",
"extra check to ensure the user is a wikipedia editor.",
"to users when they successfully submit a new message using",
"message is shown to non-wikipedia editors who attempt to post",
"common place for DRYness. if self.request.user.is_authenticated(): if self.request.user.email: initial.update({ 'email':",
"'emails/contact.html' form_class = ContactUsForm success_url = reverse_lazy('contact') def get_initial(self): initial",
"when they successfully submit a new message using the contact",
"as _ from django.views.generic.edit import FormView from TWLight.emails.forms import ContactUsForm",
"'message': self.request.GET['message'], }) initial.update({ 'next': reverse_lazy('contact'), }) return initial def",
"messages.add_message(self.request, messages.SUCCESS, # Translators: Shown to users when they successfully",
"Shown to users when they successfully submit a new message",
"django.core.exceptions import PermissionDenied from django.core.urlresolvers import reverse, reverse_lazy from django.core.mail",
"Adding an extra check to ensure the user is a",
"is shown to non-wikipedia editors who attempt to post data",
"Translators: Shown to users when they successfully submit a new",
"been sent. We\\'ll get back to you soon!')) return HttpResponseRedirect(reverse('contact'))",
"successfully submit a new message using the contact us form.",
"import BadHeaderError, send_mail from django.http import HttpResponse, HttpResponseRedirect from django.utils.decorators",
"= form.cleaned_data['email'] message = form.cleaned_data['message'] carbon_copy = form.cleaned_data['cc'] ContactUs.new_email.send( sender=self.__class__,",
"HttpResponseRedirect(reverse('contact')) except (AssertionError, AttributeError) as e: messages.add_message (self.request, messages.WARNING, #",
"messages from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from",
"if self.request.user.email: initial.update({ 'email': self.request.user.email, }) if ('message' in self.request.GET):",
"submit a new message using the contact us form. _('Your",
"message using the contact us form. _('Your message has been",
"factored out to a common place for DRYness. if self.request.user.is_authenticated():",
"a wikipedia editor. try: assert self.request.user.editor email = form.cleaned_data['email'] message",
"messages.WARNING, # Translators: This message is shown to non-wikipedia editors",
"users when they successfully submit a new message using the",
"from django.core.urlresolvers import reverse, reverse_lazy from django.core.mail import BadHeaderError, send_mail",
"name='post') class ContactUsView(FormView): template_name = 'emails/contact.html' form_class = ContactUsForm success_url",
"ugettext_lazy as _ from django.views.generic.edit import FormView from TWLight.emails.forms import",
"django.views.generic.edit import FormView from TWLight.emails.forms import ContactUsForm from TWLight.emails.signals import",
"probably be factored out to a common place for DRYness.",
"BadHeaderError, send_mail from django.http import HttpResponse, HttpResponseRedirect from django.utils.decorators import",
"self.request.user.is_authenticated(): if self.request.user.email: initial.update({ 'email': self.request.user.email, }) if ('message' in",
"be a Wikipedia editor to do that.')) raise PermissionDenied return",
"to non-wikipedia editors who attempt to post data to the",
"}) return initial def form_valid(self, form): # Adding an extra",
"an extra check to ensure the user is a wikipedia",
"from django.utils.translation import ugettext_lazy as _ from django.views.generic.edit import FormView",
"def get_initial(self): initial = super(ContactUsView, self).get_initial() # @TODO: This sort",
"import messages from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied",
"initial = super(ContactUsView, self).get_initial() # @TODO: This sort of gets",
"('message' in self.request.GET): initial.update({ 'message': self.request.GET['message'], }) initial.update({ 'next': reverse_lazy('contact'),",
"back to you soon!')) return HttpResponseRedirect(reverse('contact')) except (AssertionError, AttributeError) as",
"user is a wikipedia editor. try: assert self.request.user.editor email =",
"= form.cleaned_data['cc'] ContactUs.new_email.send( sender=self.__class__, user_email=email, cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username, body=message ) messages.add_message(self.request,",
"a common place for DRYness. if self.request.user.is_authenticated(): if self.request.user.email: initial.update({",
"us form. _('Your message has been sent. We\\'ll get back",
"messages.add_message (self.request, messages.WARNING, # Translators: This message is shown to",
"AttributeError) as e: messages.add_message (self.request, messages.WARNING, # Translators: This message",
"= ContactUsForm success_url = reverse_lazy('contact') def get_initial(self): initial = super(ContactUsView,",
"user_email=email, cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username, body=message ) messages.add_message(self.request, messages.SUCCESS, # Translators: Shown",
"reverse_lazy from django.core.mail import BadHeaderError, send_mail from django.http import HttpResponse,",
"a new message using the contact us form. _('Your message",
"(self.request, messages.WARNING, # Translators: This message is shown to non-wikipedia",
"}) if ('message' in self.request.GET): initial.update({ 'message': self.request.GET['message'], }) initial.update({",
"sender=self.__class__, user_email=email, cc=carbon_copy, editor_wp_username=self.request.user.editor.wp_username, body=message ) messages.add_message(self.request, messages.SUCCESS, # Translators:",
"to post data to the contact us form. _('You must",
"reverse, reverse_lazy from django.core.mail import BadHeaderError, send_mail from django.http import",
"be factored out to a common place for DRYness. if",
"wikipedia editor. try: assert self.request.user.editor email = form.cleaned_data['email'] message =",
"TWLight.emails.signals import ContactUs @method_decorator(login_required, name='post') class ContactUsView(FormView): template_name = 'emails/contact.html'",
"data to the contact us form. _('You must be a",
"the contact us form. _('Your message has been sent. We\\'ll",
"django.http import HttpResponse, HttpResponseRedirect from django.utils.decorators import method_decorator from django.utils.translation"
] |
[
"use the 'default' configuration. Parameters ---------- app: flask.Flask Flask app",
"class used as base for all environments.\"\"\" DEBUG = False",
"all environments.\"\"\" DEBUG = False TESTING = False LOGGING_FORMAT =",
"app.config.from_object(config[config_name]) # Configure logging handler = logging.FileHandler(app.config[\"LOGGING_LOCATION\"]) handler.setLevel(app.config[\"LOGGING_LEVEL\"]) formatter =",
"DEBUG = False TESTING = False LOGGING_FORMAT = \"[%(asctime)s] %(levelname)s",
"used as base for all environments.\"\"\" DEBUG = False TESTING",
"Parameters ---------- Base: base configuration object. \"\"\" DEBUG = True",
"dotenv import frontend.constants as constants dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\")) class Base: \"\"\"Configuration",
"} def configure_app(app): \"\"\"Configures the Flask app according to the",
"True TESTING = False ENV = \"dev\" class Staging(Base): \"\"\"Configuration",
"frontend.constants as constants dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\")) class Base: \"\"\"Configuration class used",
"= \"prod\" config = { \"development\": \"frontend.config.Development\", \"staging\": \"frontend.config.Staging\", \"production\":",
"\"\"\"Configuration class for development environment. Parameters ---------- Base: base configuration",
"= \"staging\" class Production(Base): \"\"\"Configuration class for development production environment.",
"Base: base configuration object. \"\"\" DEBUG = True TESTING =",
"the Flask app according to the FLASK_ENV envar. In case",
"configuration object. \"\"\" DEBUG = False TESTING = False ENV",
"False TESTING = True ENV = \"staging\" class Production(Base): \"\"\"Configuration",
"Parameters ---------- app: flask.Flask Flask app Module. \"\"\" # Configure",
"DEBUG = True TESTING = False ENV = \"dev\" class",
"---------- Base: base configuration object. \"\"\" DEBUG = False TESTING",
"config_name = os.environ.get(\"FLASK_ENV\", \"default\") app.config.from_object(config[config_name]) # Configure logging handler =",
"# Configure app config_name = os.environ.get(\"FLASK_ENV\", \"default\") app.config.from_object(config[config_name]) # Configure",
"class for development environment. Parameters ---------- Base: base configuration object.",
"<filename>frontend/config.py \"\"\"Flask App configuration file.\"\"\" import logging import os import",
"\"production\": \"frontend.config.Production\", \"default\": \"frontend.config.Development\", } def configure_app(app): \"\"\"Configures the Flask",
"development environment. Parameters ---------- Base: base configuration object. \"\"\" DEBUG",
"False ENV = \"prod\" config = { \"development\": \"frontend.config.Development\", \"staging\":",
"app: flask.Flask Flask app Module. \"\"\" # Configure app config_name",
"configuration object. \"\"\" DEBUG = False TESTING = True ENV",
"def configure_app(app): \"\"\"Configures the Flask app according to the FLASK_ENV",
"%(message)s\" LOGGING_LOCATION = \"frontend.log\" LOGGING_LEVEL = os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG) class Development(Base):",
"object. \"\"\" DEBUG = False TESTING = True ENV =",
"# Configure logging handler = logging.FileHandler(app.config[\"LOGGING_LOCATION\"]) handler.setLevel(app.config[\"LOGGING_LEVEL\"]) formatter = logging.Formatter(app.config[\"LOGGING_FORMAT\"])",
"envar. In case FLASK_ENV is not defined, then use the",
"LOGGING_LOCATION = \"frontend.log\" LOGGING_LEVEL = os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG) class Development(Base): \"\"\"Configuration",
"defined, then use the 'default' configuration. Parameters ---------- app: flask.Flask",
"for all environments.\"\"\" DEBUG = False TESTING = False LOGGING_FORMAT",
"\"staging\": \"frontend.config.Staging\", \"production\": \"frontend.config.Production\", \"default\": \"frontend.config.Development\", } def configure_app(app): \"\"\"Configures",
"configure_app(app): \"\"\"Configures the Flask app according to the FLASK_ENV envar.",
"constants dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\")) class Base: \"\"\"Configuration class used as base",
"FLASK_ENV envar. In case FLASK_ENV is not defined, then use",
"True ENV = \"staging\" class Production(Base): \"\"\"Configuration class for development",
"In case FLASK_ENV is not defined, then use the 'default'",
"then use the 'default' configuration. Parameters ---------- app: flask.Flask Flask",
"False TESTING = False ENV = \"prod\" config = {",
"os import dotenv import frontend.constants as constants dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\")) class",
"object. \"\"\" DEBUG = False TESTING = False ENV =",
"False TESTING = False LOGGING_FORMAT = \"[%(asctime)s] %(levelname)s in %(message)s\"",
"---------- app: flask.Flask Flask app Module. \"\"\" # Configure app",
"= False TESTING = True ENV = \"staging\" class Production(Base):",
"= \"dev\" class Staging(Base): \"\"\"Configuration class for development staging environment.",
"base configuration object. \"\"\" DEBUG = False TESTING = False",
"is not defined, then use the 'default' configuration. Parameters ----------",
"as constants dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\")) class Base: \"\"\"Configuration class used as",
"{ \"development\": \"frontend.config.Development\", \"staging\": \"frontend.config.Staging\", \"production\": \"frontend.config.Production\", \"default\": \"frontend.config.Development\", }",
"environments.\"\"\" DEBUG = False TESTING = False LOGGING_FORMAT = \"[%(asctime)s]",
"TESTING = False ENV = \"prod\" config = { \"development\":",
"staging environment. Parameters ---------- Base: base configuration object. \"\"\" DEBUG",
"= True TESTING = False ENV = \"dev\" class Staging(Base):",
"TESTING = True ENV = \"staging\" class Production(Base): \"\"\"Configuration class",
"\"development\": \"frontend.config.Development\", \"staging\": \"frontend.config.Staging\", \"production\": \"frontend.config.Production\", \"default\": \"frontend.config.Development\", } def",
"TESTING = False ENV = \"dev\" class Staging(Base): \"\"\"Configuration class",
"import logging import os import dotenv import frontend.constants as constants",
"Module. \"\"\" # Configure app config_name = os.environ.get(\"FLASK_ENV\", \"default\") app.config.from_object(config[config_name])",
"for development staging environment. Parameters ---------- Base: base configuration object.",
"FLASK_ENV is not defined, then use the 'default' configuration. Parameters",
"\"default\": \"frontend.config.Development\", } def configure_app(app): \"\"\"Configures the Flask app according",
"Production(Base): \"\"\"Configuration class for development production environment. Parameters ---------- Base:",
"config = { \"development\": \"frontend.config.Development\", \"staging\": \"frontend.config.Staging\", \"production\": \"frontend.config.Production\", \"default\":",
"App configuration file.\"\"\" import logging import os import dotenv import",
"%(levelname)s in %(message)s\" LOGGING_LOCATION = \"frontend.log\" LOGGING_LEVEL = os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG)",
"class Development(Base): \"\"\"Configuration class for development environment. Parameters ---------- Base:",
"configuration file.\"\"\" import logging import os import dotenv import frontend.constants",
"import os import dotenv import frontend.constants as constants dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\"))",
"---------- Base: base configuration object. \"\"\" DEBUG = True TESTING",
"Parameters ---------- Base: base configuration object. \"\"\" DEBUG = False",
"base configuration object. \"\"\" DEBUG = False TESTING = True",
"development staging environment. Parameters ---------- Base: base configuration object. \"\"\"",
"Base: \"\"\"Configuration class used as base for all environments.\"\"\" DEBUG",
"DEBUG = False TESTING = False ENV = \"prod\" config",
"object. \"\"\" DEBUG = True TESTING = False ENV =",
"development production environment. Parameters ---------- Base: base configuration object. \"\"\"",
"\"frontend.config.Staging\", \"production\": \"frontend.config.Production\", \"default\": \"frontend.config.Development\", } def configure_app(app): \"\"\"Configures the",
"= False LOGGING_FORMAT = \"[%(asctime)s] %(levelname)s in %(message)s\" LOGGING_LOCATION =",
"\"frontend.config.Development\", \"staging\": \"frontend.config.Staging\", \"production\": \"frontend.config.Production\", \"default\": \"frontend.config.Development\", } def configure_app(app):",
"os.environ.get(\"FLASK_ENV\", \"default\") app.config.from_object(config[config_name]) # Configure logging handler = logging.FileHandler(app.config[\"LOGGING_LOCATION\"]) handler.setLevel(app.config[\"LOGGING_LEVEL\"])",
"ENV = \"dev\" class Staging(Base): \"\"\"Configuration class for development staging",
"as base for all environments.\"\"\" DEBUG = False TESTING =",
"app Module. \"\"\" # Configure app config_name = os.environ.get(\"FLASK_ENV\", \"default\")",
"base for all environments.\"\"\" DEBUG = False TESTING = False",
"False LOGGING_FORMAT = \"[%(asctime)s] %(levelname)s in %(message)s\" LOGGING_LOCATION = \"frontend.log\"",
"\"\"\"Configuration class for development production environment. Parameters ---------- Base: base",
"not defined, then use the 'default' configuration. Parameters ---------- app:",
"\"\"\" # Configure app config_name = os.environ.get(\"FLASK_ENV\", \"default\") app.config.from_object(config[config_name]) #",
"\"\"\"Configures the Flask app according to the FLASK_ENV envar. In",
"\"frontend.log\" LOGGING_LEVEL = os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG) class Development(Base): \"\"\"Configuration class for",
"\"\"\" DEBUG = False TESTING = True ENV = \"staging\"",
"= False TESTING = False ENV = \"prod\" config =",
"LOGGING_FORMAT = \"[%(asctime)s] %(levelname)s in %(message)s\" LOGGING_LOCATION = \"frontend.log\" LOGGING_LEVEL",
"\"dev\" class Staging(Base): \"\"\"Configuration class for development staging environment. Parameters",
"os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG) class Development(Base): \"\"\"Configuration class for development environment. Parameters",
"= os.environ.get(\"FLASK_ENV\", \"default\") app.config.from_object(config[config_name]) # Configure logging handler = logging.FileHandler(app.config[\"LOGGING_LOCATION\"])",
"= \"[%(asctime)s] %(levelname)s in %(message)s\" LOGGING_LOCATION = \"frontend.log\" LOGGING_LEVEL =",
"configuration. Parameters ---------- app: flask.Flask Flask app Module. \"\"\" #",
"Configure logging handler = logging.FileHandler(app.config[\"LOGGING_LOCATION\"]) handler.setLevel(app.config[\"LOGGING_LEVEL\"]) formatter = logging.Formatter(app.config[\"LOGGING_FORMAT\"]) handler.setFormatter(formatter)",
"logging handler = logging.FileHandler(app.config[\"LOGGING_LOCATION\"]) handler.setLevel(app.config[\"LOGGING_LEVEL\"]) formatter = logging.Formatter(app.config[\"LOGGING_FORMAT\"]) handler.setFormatter(formatter) app.logger.addHandler(handler)",
"Configure app config_name = os.environ.get(\"FLASK_ENV\", \"default\") app.config.from_object(config[config_name]) # Configure logging",
"the 'default' configuration. Parameters ---------- app: flask.Flask Flask app Module.",
"for development environment. Parameters ---------- Base: base configuration object. \"\"\"",
"\"\"\"Configuration class used as base for all environments.\"\"\" DEBUG =",
"DEBUG = False TESTING = True ENV = \"staging\" class",
"Development(Base): \"\"\"Configuration class for development environment. Parameters ---------- Base: base",
"\"frontend.config.Development\", } def configure_app(app): \"\"\"Configures the Flask app according to",
"in %(message)s\" LOGGING_LOCATION = \"frontend.log\" LOGGING_LEVEL = os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG) class",
"app config_name = os.environ.get(\"FLASK_ENV\", \"default\") app.config.from_object(config[config_name]) # Configure logging handler",
"class Base: \"\"\"Configuration class used as base for all environments.\"\"\"",
"\"default\") app.config.from_object(config[config_name]) # Configure logging handler = logging.FileHandler(app.config[\"LOGGING_LOCATION\"]) handler.setLevel(app.config[\"LOGGING_LEVEL\"]) formatter",
"= True ENV = \"staging\" class Production(Base): \"\"\"Configuration class for",
"Staging(Base): \"\"\"Configuration class for development staging environment. Parameters ---------- Base:",
"\"\"\" DEBUG = True TESTING = False ENV = \"dev\"",
"\"staging\" class Production(Base): \"\"\"Configuration class for development production environment. Parameters",
"import dotenv import frontend.constants as constants dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\")) class Base:",
"TESTING = False LOGGING_FORMAT = \"[%(asctime)s] %(levelname)s in %(message)s\" LOGGING_LOCATION",
"\"frontend.config.Production\", \"default\": \"frontend.config.Development\", } def configure_app(app): \"\"\"Configures the Flask app",
"= False ENV = \"dev\" class Staging(Base): \"\"\"Configuration class for",
"'default' configuration. Parameters ---------- app: flask.Flask Flask app Module. \"\"\"",
"= False TESTING = False LOGGING_FORMAT = \"[%(asctime)s] %(levelname)s in",
"flask.Flask Flask app Module. \"\"\" # Configure app config_name =",
"LOGGING_LEVEL = os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG) class Development(Base): \"\"\"Configuration class for development",
"the FLASK_ENV envar. In case FLASK_ENV is not defined, then",
"\"\"\"Flask App configuration file.\"\"\" import logging import os import dotenv",
"Flask app according to the FLASK_ENV envar. In case FLASK_ENV",
"dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\")) class Base: \"\"\"Configuration class used as base for",
"base configuration object. \"\"\" DEBUG = True TESTING = False",
"ENV = \"staging\" class Production(Base): \"\"\"Configuration class for development production",
"ENV = \"prod\" config = { \"development\": \"frontend.config.Development\", \"staging\": \"frontend.config.Staging\",",
"Base: base configuration object. \"\"\" DEBUG = False TESTING =",
"to the FLASK_ENV envar. In case FLASK_ENV is not defined,",
"import frontend.constants as constants dotenv.load_dotenv(os.path.join(constants.BASEDIR, \"frontend.env\")) class Base: \"\"\"Configuration class",
"class Staging(Base): \"\"\"Configuration class for development staging environment. Parameters ----------",
"according to the FLASK_ENV envar. In case FLASK_ENV is not",
"\"prod\" config = { \"development\": \"frontend.config.Development\", \"staging\": \"frontend.config.Staging\", \"production\": \"frontend.config.Production\",",
"class for development production environment. Parameters ---------- Base: base configuration",
"logging import os import dotenv import frontend.constants as constants dotenv.load_dotenv(os.path.join(constants.BASEDIR,",
"app according to the FLASK_ENV envar. In case FLASK_ENV is",
"logging.DEBUG) class Development(Base): \"\"\"Configuration class for development environment. Parameters ----------",
"configuration object. \"\"\" DEBUG = True TESTING = False ENV",
"production environment. Parameters ---------- Base: base configuration object. \"\"\" DEBUG",
"\"[%(asctime)s] %(levelname)s in %(message)s\" LOGGING_LOCATION = \"frontend.log\" LOGGING_LEVEL = os.environ.get(\"LOGGING_LEVEL\",",
"for development production environment. Parameters ---------- Base: base configuration object.",
"\"\"\" DEBUG = False TESTING = False ENV = \"prod\"",
"environment. Parameters ---------- Base: base configuration object. \"\"\" DEBUG =",
"\"frontend.env\")) class Base: \"\"\"Configuration class used as base for all",
"file.\"\"\" import logging import os import dotenv import frontend.constants as",
"case FLASK_ENV is not defined, then use the 'default' configuration.",
"= False ENV = \"prod\" config = { \"development\": \"frontend.config.Development\",",
"class for development staging environment. Parameters ---------- Base: base configuration",
"= os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG) class Development(Base): \"\"\"Configuration class for development environment.",
"= { \"development\": \"frontend.config.Development\", \"staging\": \"frontend.config.Staging\", \"production\": \"frontend.config.Production\", \"default\": \"frontend.config.Development\",",
"\"\"\"Configuration class for development staging environment. Parameters ---------- Base: base",
"Flask app Module. \"\"\" # Configure app config_name = os.environ.get(\"FLASK_ENV\",",
"class Production(Base): \"\"\"Configuration class for development production environment. Parameters ----------",
"= \"frontend.log\" LOGGING_LEVEL = os.environ.get(\"LOGGING_LEVEL\", logging.DEBUG) class Development(Base): \"\"\"Configuration class",
"False ENV = \"dev\" class Staging(Base): \"\"\"Configuration class for development"
] |
[
"with self.assertRaises(NoQueryError): Query.doesnotexist() def test_no_aggregate(self): with self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def test_no_suggester(self):",
"jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Query, test_name,",
"else parse_arg(v) for k, v in kwargs.iteritems() } output =",
") class TestSuggesters(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'suggesters')",
"( _test_query(self, Suggester, test_name, test_data) ) class TestFails(TestCase): def test_no_query(self):",
"not a list with self.assertRaises(ValueError): Query.bool(must=set()) # And now an",
"( CLASS_NAMES[arg](arg, {}) if (isinstance(arg, basestring) and arg.startswith('_')) else arg",
"test_no_suggester(self): with self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def test_missing_arg(self): with self.assertRaises(MissingArgError): Query.term(None) def",
"jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Suggester, test_name,",
"invalid list with self.assertRaises(ValueError): Query.range('field', gte=['error']) # Empty list should",
"test_data: ( _test_query(self, Aggregate, test_name, test_data) ) class TestSuggesters(TestCase): __metaclass__",
"test_data.get('kwargs', {}) kwargs = { k: parse_arg(v) if isinstance(v, list)",
"kwargs = test_data.get('kwargs', {}) kwargs = { k: parse_arg(v) if",
"Query.bool(must=[None]) # And now an invalid list with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test',",
"# ElasticQuery # File: tests/test_dsl.py # Desc: tests for ElasticQuery",
"test_data): method = getattr(query, test_name) def parse_arg(arg): if isinstance(arg, list):",
"class TestQueries(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'queries') jsontest_function",
"<filename>tests/test_dsl.py<gh_stars>0 # ElasticQuery # File: tests/test_dsl.py # Desc: tests for",
"else: return ( CLASS_NAMES[arg](arg, {}) if (isinstance(arg, basestring) and arg.startswith('_'))",
"with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')]) # And now an invalid list",
"# And now an invalid list with self.assertRaises(ValueError): Query.bool(must=[None]) #",
"isinstance(arg, list): return [parse_arg(a) for a in arg] else: return",
"Desc: tests for ElasticQuery DSL objects (Filter, Query, Aggregate) from",
"# Desc: tests for ElasticQuery DSL objects (Filter, Query, Aggregate)",
"self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def test_missing_arg(self): with self.assertRaises(MissingArgError): Query.term(None) def test_invalid_arg(self): #",
"Query, Aggregate) from os import path from unittest import TestCase",
"method = getattr(query, test_name) def parse_arg(arg): if isinstance(arg, list): return",
"NoAggregateError, NoSuggesterError, MissingArgError ) from .util import assert_equal CLASS_NAMES =",
"= test_data.get('kwargs', {}) kwargs = { k: parse_arg(v) if isinstance(v,",
"test_data) ) class TestSuggesters(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests',",
"'suggesters') jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Suggester,",
"output, test_data['output']) class TestQueries(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests',",
"(isinstance(arg, basestring) and arg.startswith('_')) else arg ) args = test_data.get('args',",
"arg ) args = test_data.get('args', []) args = parse_arg(args) kwargs",
"lambda self, test_name, test_data: ( _test_query(self, Suggester, test_name, test_data) )",
"TestFails(TestCase): def test_no_query(self): with self.assertRaises(NoQueryError): Query.doesnotexist() def test_no_aggregate(self): with self.assertRaises(NoAggregateError):",
") args = test_data.get('args', []) args = parse_arg(args) kwargs =",
"import Query, Aggregate, Suggester from elasticquery.exceptions import ( NoQueryError, NoAggregateError,",
"def _test_query(self, query, test_name, test_data): method = getattr(query, test_name) def",
"from elasticquery.exceptions import ( NoQueryError, NoAggregateError, NoSuggesterError, MissingArgError ) from",
"[]) args = parse_arg(args) kwargs = test_data.get('kwargs', {}) kwargs =",
"JsonTest jsontest_files = path.join('tests', 'queries') jsontest_function = lambda self, test_name,",
"class TestAggregates(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'aggregates') jsontest_function",
"= lambda self, test_name, test_data: ( _test_query(self, Suggester, test_name, test_data)",
"self.assertRaises(MissingArgError): Query.term(None) def test_invalid_arg(self): # Test passing not a list",
"And now an invalid list with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')]) #",
"arg] else: return ( CLASS_NAMES[arg](arg, {}) if (isinstance(arg, basestring) and",
"class TestFails(TestCase): def test_no_query(self): with self.assertRaises(NoQueryError): Query.doesnotexist() def test_no_aggregate(self): with",
"self, test_name, test_data: ( _test_query(self, Query, test_name, test_data) ) class",
"JsonTest jsontest_files = path.join('tests', 'suggesters') jsontest_function = lambda self, test_name,",
"in arg] else: return ( CLASS_NAMES[arg](arg, {}) if (isinstance(arg, basestring)",
"Suggester from elasticquery.exceptions import ( NoQueryError, NoAggregateError, NoSuggesterError, MissingArgError )",
"( NoQueryError, NoAggregateError, NoSuggesterError, MissingArgError ) from .util import assert_equal",
".util import assert_equal CLASS_NAMES = { '_query': Query } def",
"else arg ) args = test_data.get('args', []) args = parse_arg(args)",
"jsontest_files = path.join('tests', 'aggregates') jsontest_function = lambda self, test_name, test_data:",
"Query.bool(must=set()) # And now an invalid list with self.assertRaises(ValueError): Query.bool(must=[None])",
"now an invalid list with self.assertRaises(ValueError): Query.bool(must=[None]) # And now",
"Aggregate, test_name, test_data) ) class TestSuggesters(TestCase): __metaclass__ = JsonTest jsontest_files",
"test_name, test_data: ( _test_query(self, Aggregate, test_name, test_data) ) class TestSuggesters(TestCase):",
"import ( NoQueryError, NoAggregateError, NoSuggesterError, MissingArgError ) from .util import",
"= JsonTest jsontest_files = path.join('tests', 'queries') jsontest_function = lambda self,",
"= parse_arg(args) kwargs = test_data.get('kwargs', {}) kwargs = { k:",
"NoQueryError, NoAggregateError, NoSuggesterError, MissingArgError ) from .util import assert_equal CLASS_NAMES",
"k: parse_arg(v) if isinstance(v, list) else parse_arg(v) for k, v",
"**kwargs).dict() assert_equal(self, output, test_data['output']) class TestQueries(TestCase): __metaclass__ = JsonTest jsontest_files",
"NoSuggesterError, MissingArgError ) from .util import assert_equal CLASS_NAMES = {",
"args = parse_arg(args) kwargs = test_data.get('kwargs', {}) kwargs = {",
"JsonTest from elasticquery import Query, Aggregate, Suggester from elasticquery.exceptions import",
"isinstance(v, list) else parse_arg(v) for k, v in kwargs.iteritems() }",
"test_data) ) class TestAggregates(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests',",
"return ( CLASS_NAMES[arg](arg, {}) if (isinstance(arg, basestring) and arg.startswith('_')) else",
"tests for ElasticQuery DSL objects (Filter, Query, Aggregate) from os",
"self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')]) # And now an invalid list with",
"test_name) def parse_arg(arg): if isinstance(arg, list): return [parse_arg(a) for a",
"= test_data.get('args', []) args = parse_arg(args) kwargs = test_data.get('kwargs', {})",
") class TestFails(TestCase): def test_no_query(self): with self.assertRaises(NoQueryError): Query.doesnotexist() def test_no_aggregate(self):",
"} output = method(*args, **kwargs).dict() assert_equal(self, output, test_data['output']) class TestQueries(TestCase):",
"TestAggregates(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'aggregates') jsontest_function =",
"args = test_data.get('args', []) args = parse_arg(args) kwargs = test_data.get('kwargs',",
"an invalid list with self.assertRaises(ValueError): Query.range('field', gte=['error']) # Empty list",
"def parse_arg(arg): if isinstance(arg, list): return [parse_arg(a) for a in",
"with self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def test_missing_arg(self): with self.assertRaises(MissingArgError): Query.term(None) def test_invalid_arg(self):",
"jsontest_files = path.join('tests', 'suggesters') jsontest_function = lambda self, test_name, test_data:",
"Query, Aggregate, Suggester from elasticquery.exceptions import ( NoQueryError, NoAggregateError, NoSuggesterError,",
"= lambda self, test_name, test_data: ( _test_query(self, Aggregate, test_name, test_data)",
"if (isinstance(arg, basestring) and arg.startswith('_')) else arg ) args =",
"parse_arg(args) kwargs = test_data.get('kwargs', {}) kwargs = { k: parse_arg(v)",
"getattr(query, test_name) def parse_arg(arg): if isinstance(arg, list): return [parse_arg(a) for",
"TestQueries(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'queries') jsontest_function =",
"test_data: ( _test_query(self, Query, test_name, test_data) ) class TestAggregates(TestCase): __metaclass__",
"test_name, test_data): method = getattr(query, test_name) def parse_arg(arg): if isinstance(arg,",
"os import path from unittest import TestCase from jsontest import",
"invalid list with self.assertRaises(ValueError): Query.bool(must=[None]) # And now an invalid",
"import TestCase from jsontest import JsonTest from elasticquery import Query,",
"MissingArgError ) from .util import assert_equal CLASS_NAMES = { '_query':",
"objects (Filter, Query, Aggregate) from os import path from unittest",
"parse_arg(v) if isinstance(v, list) else parse_arg(v) for k, v in",
"test_data: ( _test_query(self, Suggester, test_name, test_data) ) class TestFails(TestCase): def",
"test_data) ) class TestFails(TestCase): def test_no_query(self): with self.assertRaises(NoQueryError): Query.doesnotexist() def",
"with self.assertRaises(ValueError): Query.range('field', gte=['error']) # Empty list should be OK/ignored",
"Test passing not a list with self.assertRaises(ValueError): Query.bool(must=set()) # And",
"elasticquery.exceptions import ( NoQueryError, NoAggregateError, NoSuggesterError, MissingArgError ) from .util",
"_test_query(self, Suggester, test_name, test_data) ) class TestFails(TestCase): def test_no_query(self): with",
"test_name, test_data) ) class TestAggregates(TestCase): __metaclass__ = JsonTest jsontest_files =",
"def test_no_query(self): with self.assertRaises(NoQueryError): Query.doesnotexist() def test_no_aggregate(self): with self.assertRaises(NoAggregateError): Aggregate.doesnotexist()",
"JsonTest jsontest_files = path.join('tests', 'aggregates') jsontest_function = lambda self, test_name,",
"invalid list with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')]) # And now an",
"Aggregate) from os import path from unittest import TestCase from",
"from .util import assert_equal CLASS_NAMES = { '_query': Query }",
"query, test_name, test_data): method = getattr(query, test_name) def parse_arg(arg): if",
"import path from unittest import TestCase from jsontest import JsonTest",
"parse_arg(arg): if isinstance(arg, list): return [parse_arg(a) for a in arg]",
"with self.assertRaises(MissingArgError): Query.term(None) def test_invalid_arg(self): # Test passing not a",
"method(*args, **kwargs).dict() assert_equal(self, output, test_data['output']) class TestQueries(TestCase): __metaclass__ = JsonTest",
"( _test_query(self, Query, test_name, test_data) ) class TestAggregates(TestCase): __metaclass__ =",
"test_name, test_data: ( _test_query(self, Suggester, test_name, test_data) ) class TestFails(TestCase):",
"ElasticQuery # File: tests/test_dsl.py # Desc: tests for ElasticQuery DSL",
"in kwargs.iteritems() } output = method(*args, **kwargs).dict() assert_equal(self, output, test_data['output'])",
"and arg.startswith('_')) else arg ) args = test_data.get('args', []) args",
"jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Aggregate, test_name,",
"# Test passing not a list with self.assertRaises(ValueError): Query.bool(must=set()) #",
"self.assertRaises(ValueError): Query.bool(must=[None]) # And now an invalid list with self.assertRaises(ValueError):",
"Aggregate, Suggester from elasticquery.exceptions import ( NoQueryError, NoAggregateError, NoSuggesterError, MissingArgError",
"( _test_query(self, Aggregate, test_name, test_data) ) class TestSuggesters(TestCase): __metaclass__ =",
"And now an invalid list with self.assertRaises(ValueError): Query.range('field', gte=['error']) #",
"list with self.assertRaises(ValueError): Query.bool(must=set()) # And now an invalid list",
"File: tests/test_dsl.py # Desc: tests for ElasticQuery DSL objects (Filter,",
"# File: tests/test_dsl.py # Desc: tests for ElasticQuery DSL objects",
"self, test_name, test_data: ( _test_query(self, Aggregate, test_name, test_data) ) class",
"class TestSuggesters(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'suggesters') jsontest_function",
"assert_equal CLASS_NAMES = { '_query': Query } def _test_query(self, query,",
"v in kwargs.iteritems() } output = method(*args, **kwargs).dict() assert_equal(self, output,",
"And now an invalid list with self.assertRaises(ValueError): Query.bool(must=[None]) # And",
"test_name, test_data) ) class TestFails(TestCase): def test_no_query(self): with self.assertRaises(NoQueryError): Query.doesnotexist()",
"DSL objects (Filter, Query, Aggregate) from os import path from",
"= getattr(query, test_name) def parse_arg(arg): if isinstance(arg, list): return [parse_arg(a)",
"Query.bool(must=[Aggregate.terms('test', 'test')]) # And now an invalid list with self.assertRaises(ValueError):",
"output = method(*args, **kwargs).dict() assert_equal(self, output, test_data['output']) class TestQueries(TestCase): __metaclass__",
"import assert_equal CLASS_NAMES = { '_query': Query } def _test_query(self,",
"for ElasticQuery DSL objects (Filter, Query, Aggregate) from os import",
"import JsonTest from elasticquery import Query, Aggregate, Suggester from elasticquery.exceptions",
"'_query': Query } def _test_query(self, query, test_name, test_data): method =",
"} def _test_query(self, query, test_name, test_data): method = getattr(query, test_name)",
"for a in arg] else: return ( CLASS_NAMES[arg](arg, {}) if",
"test_data.get('args', []) args = parse_arg(args) kwargs = test_data.get('kwargs', {}) kwargs",
"parse_arg(v) for k, v in kwargs.iteritems() } output = method(*args,",
"assert_equal(self, output, test_data['output']) class TestQueries(TestCase): __metaclass__ = JsonTest jsontest_files =",
"def test_no_aggregate(self): with self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def test_no_suggester(self): with self.assertRaises(NoSuggesterError): Suggester.doesnotexist()",
"arg.startswith('_')) else arg ) args = test_data.get('args', []) args =",
"jsontest_files = path.join('tests', 'queries') jsontest_function = lambda self, test_name, test_data:",
"self, test_name, test_data: ( _test_query(self, Suggester, test_name, test_data) ) class",
"kwargs.iteritems() } output = method(*args, **kwargs).dict() assert_equal(self, output, test_data['output']) class",
"{}) kwargs = { k: parse_arg(v) if isinstance(v, list) else",
"self.assertRaises(ValueError): Query.range('field', gte=['error']) # Empty list should be OK/ignored Query.bool(must=[])",
"= path.join('tests', 'suggesters') jsontest_function = lambda self, test_name, test_data: (",
"lambda self, test_name, test_data: ( _test_query(self, Query, test_name, test_data) )",
"# And now an invalid list with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')])",
"self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def test_no_suggester(self): with self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def test_missing_arg(self): with",
"list with self.assertRaises(ValueError): Query.range('field', gte=['error']) # Empty list should be",
"'test')]) # And now an invalid list with self.assertRaises(ValueError): Query.range('field',",
"if isinstance(v, list) else parse_arg(v) for k, v in kwargs.iteritems()",
"self.assertRaises(NoQueryError): Query.doesnotexist() def test_no_aggregate(self): with self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def test_no_suggester(self): with",
"test_name, test_data: ( _test_query(self, Query, test_name, test_data) ) class TestAggregates(TestCase):",
"Suggester.doesnotexist() def test_missing_arg(self): with self.assertRaises(MissingArgError): Query.term(None) def test_invalid_arg(self): # Test",
"from jsontest import JsonTest from elasticquery import Query, Aggregate, Suggester",
"_test_query(self, query, test_name, test_data): method = getattr(query, test_name) def parse_arg(arg):",
"test_missing_arg(self): with self.assertRaises(MissingArgError): Query.term(None) def test_invalid_arg(self): # Test passing not",
"tests/test_dsl.py # Desc: tests for ElasticQuery DSL objects (Filter, Query,",
"(Filter, Query, Aggregate) from os import path from unittest import",
"TestSuggesters(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'suggesters') jsontest_function =",
"= { '_query': Query } def _test_query(self, query, test_name, test_data):",
"a in arg] else: return ( CLASS_NAMES[arg](arg, {}) if (isinstance(arg,",
"elasticquery import Query, Aggregate, Suggester from elasticquery.exceptions import ( NoQueryError,",
"CLASS_NAMES[arg](arg, {}) if (isinstance(arg, basestring) and arg.startswith('_')) else arg )",
"= JsonTest jsontest_files = path.join('tests', 'suggesters') jsontest_function = lambda self,",
"= JsonTest jsontest_files = path.join('tests', 'aggregates') jsontest_function = lambda self,",
"test_no_query(self): with self.assertRaises(NoQueryError): Query.doesnotexist() def test_no_aggregate(self): with self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def",
"if isinstance(arg, list): return [parse_arg(a) for a in arg] else:",
"k, v in kwargs.iteritems() } output = method(*args, **kwargs).dict() assert_equal(self,",
") class TestAggregates(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'aggregates')",
"= { k: parse_arg(v) if isinstance(v, list) else parse_arg(v) for",
"_test_query(self, Query, test_name, test_data) ) class TestAggregates(TestCase): __metaclass__ = JsonTest",
"with self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def test_no_suggester(self): with self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def test_missing_arg(self):",
"{ '_query': Query } def _test_query(self, query, test_name, test_data): method",
"list with self.assertRaises(ValueError): Query.bool(must=[None]) # And now an invalid list",
"list with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')]) # And now an invalid",
"return [parse_arg(a) for a in arg] else: return ( CLASS_NAMES[arg](arg,",
"Query.term(None) def test_invalid_arg(self): # Test passing not a list with",
") from .util import assert_equal CLASS_NAMES = { '_query': Query",
"path.join('tests', 'queries') jsontest_function = lambda self, test_name, test_data: ( _test_query(self,",
"passing not a list with self.assertRaises(ValueError): Query.bool(must=set()) # And now",
"= lambda self, test_name, test_data: ( _test_query(self, Query, test_name, test_data)",
"list) else parse_arg(v) for k, v in kwargs.iteritems() } output",
"unittest import TestCase from jsontest import JsonTest from elasticquery import",
"a list with self.assertRaises(ValueError): Query.bool(must=set()) # And now an invalid",
"# And now an invalid list with self.assertRaises(ValueError): Query.range('field', gte=['error'])",
"for k, v in kwargs.iteritems() } output = method(*args, **kwargs).dict()",
"'queries') jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Query,",
"from os import path from unittest import TestCase from jsontest",
"now an invalid list with self.assertRaises(ValueError): Query.range('field', gte=['error']) # Empty",
"= method(*args, **kwargs).dict() assert_equal(self, output, test_data['output']) class TestQueries(TestCase): __metaclass__ =",
"list): return [parse_arg(a) for a in arg] else: return (",
"'aggregates') jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Aggregate,",
"__metaclass__ = JsonTest jsontest_files = path.join('tests', 'suggesters') jsontest_function = lambda",
"test_invalid_arg(self): # Test passing not a list with self.assertRaises(ValueError): Query.bool(must=set())",
"with self.assertRaises(ValueError): Query.bool(must=[None]) # And now an invalid list with",
"lambda self, test_name, test_data: ( _test_query(self, Aggregate, test_name, test_data) )",
"Query.doesnotexist() def test_no_aggregate(self): with self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def test_no_suggester(self): with self.assertRaises(NoSuggesterError):",
"test_data['output']) class TestQueries(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'queries')",
"CLASS_NAMES = { '_query': Query } def _test_query(self, query, test_name,",
"TestCase from jsontest import JsonTest from elasticquery import Query, Aggregate,",
"Aggregate.doesnotexist() def test_no_suggester(self): with self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def test_missing_arg(self): with self.assertRaises(MissingArgError):",
"__metaclass__ = JsonTest jsontest_files = path.join('tests', 'aggregates') jsontest_function = lambda",
"[parse_arg(a) for a in arg] else: return ( CLASS_NAMES[arg](arg, {})",
"_test_query(self, Aggregate, test_name, test_data) ) class TestSuggesters(TestCase): __metaclass__ = JsonTest",
"def test_missing_arg(self): with self.assertRaises(MissingArgError): Query.term(None) def test_invalid_arg(self): # Test passing",
"an invalid list with self.assertRaises(ValueError): Query.bool(must=[None]) # And now an",
"test_no_aggregate(self): with self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def test_no_suggester(self): with self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def",
"path from unittest import TestCase from jsontest import JsonTest from",
"with self.assertRaises(ValueError): Query.bool(must=set()) # And now an invalid list with",
"= path.join('tests', 'queries') jsontest_function = lambda self, test_name, test_data: (",
"self.assertRaises(ValueError): Query.bool(must=set()) # And now an invalid list with self.assertRaises(ValueError):",
"kwargs = { k: parse_arg(v) if isinstance(v, list) else parse_arg(v)",
"from elasticquery import Query, Aggregate, Suggester from elasticquery.exceptions import (",
"Suggester, test_name, test_data) ) class TestFails(TestCase): def test_no_query(self): with self.assertRaises(NoQueryError):",
"now an invalid list with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')]) # And",
"Query } def _test_query(self, query, test_name, test_data): method = getattr(query,",
"def test_invalid_arg(self): # Test passing not a list with self.assertRaises(ValueError):",
"{}) if (isinstance(arg, basestring) and arg.startswith('_')) else arg ) args",
"= path.join('tests', 'aggregates') jsontest_function = lambda self, test_name, test_data: (",
"test_name, test_data) ) class TestSuggesters(TestCase): __metaclass__ = JsonTest jsontest_files =",
"basestring) and arg.startswith('_')) else arg ) args = test_data.get('args', [])",
"path.join('tests', 'suggesters') jsontest_function = lambda self, test_name, test_data: ( _test_query(self,",
"Query, test_name, test_data) ) class TestAggregates(TestCase): __metaclass__ = JsonTest jsontest_files",
"an invalid list with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')]) # And now",
"{ k: parse_arg(v) if isinstance(v, list) else parse_arg(v) for k,",
"path.join('tests', 'aggregates') jsontest_function = lambda self, test_name, test_data: ( _test_query(self,",
"def test_no_suggester(self): with self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def test_missing_arg(self): with self.assertRaises(MissingArgError): Query.term(None)",
"__metaclass__ = JsonTest jsontest_files = path.join('tests', 'queries') jsontest_function = lambda",
"from unittest import TestCase from jsontest import JsonTest from elasticquery",
"jsontest import JsonTest from elasticquery import Query, Aggregate, Suggester from",
"ElasticQuery DSL objects (Filter, Query, Aggregate) from os import path"
] |
[
"in states_dict.keys(): if \"running_var\" in key or \"running_mean\" in key:",
"torch.load(ckp_path) states_dict_new = states_dict.copy() for key in states_dict.keys(): if \"running_var\"",
"= './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' states_dict = torch.load(ckp_path) states_dict_new = states_dict.copy() for key",
"= torch.load(ckp_path) states_dict_new = states_dict.copy() for key in states_dict.keys(): if",
"'./checkpoints/fashion_PATN/latest_net_netG.pth' save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' states_dict = torch.load(ckp_path) states_dict_new = states_dict.copy()",
"import torch ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth' save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' states_dict =",
"save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' states_dict = torch.load(ckp_path) states_dict_new = states_dict.copy() for",
"in key or \"running_mean\" in key: del states_dict_new[key] torch.save(states_dict_new, save_path)",
"states_dict.copy() for key in states_dict.keys(): if \"running_var\" in key or",
"states_dict.keys(): if \"running_var\" in key or \"running_mean\" in key: del",
"states_dict = torch.load(ckp_path) states_dict_new = states_dict.copy() for key in states_dict.keys():",
"if \"running_var\" in key or \"running_mean\" in key: del states_dict_new[key]",
"key in states_dict.keys(): if \"running_var\" in key or \"running_mean\" in",
"= './checkpoints/fashion_PATN/latest_net_netG.pth' save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' states_dict = torch.load(ckp_path) states_dict_new =",
"ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth' save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' states_dict = torch.load(ckp_path) states_dict_new",
"'./checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' states_dict = torch.load(ckp_path) states_dict_new = states_dict.copy() for key in",
"= states_dict.copy() for key in states_dict.keys(): if \"running_var\" in key",
"\"running_var\" in key or \"running_mean\" in key: del states_dict_new[key] torch.save(states_dict_new,",
"states_dict_new = states_dict.copy() for key in states_dict.keys(): if \"running_var\" in",
"torch ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth' save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth' states_dict = torch.load(ckp_path)",
"for key in states_dict.keys(): if \"running_var\" in key or \"running_mean\""
] |
[
"result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result) def cb_move_to_bin_left(self, goal): if goal.position:",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"print \"looking up position for %s/%s\" % (goal.bin, pos) pos",
"position=Twist(p, r)) self.srv_highlevel_left.set_succeeded(result) def cb_move_to_right(self, goal): print \"moving away left",
"away right arm, then moving left arm:\" print goal.target_position result",
"numpy.asarray([550, -146, 752, 184, 0, 180]) p = Vector3(pos[0], pos[1],",
"arm:\" print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result) def cb_move_to_bin_left(self,",
"# # Licensed under the Apache License, Version 2.0 (the",
"compliance with the License. # You may obtain a copy",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"2.0 (the \"License\"); # you may not use this file",
"agreed to in writing, software # distributed under the License",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"Unless required by applicable law or agreed to in writing,",
"RobotArmMoveAction, execute_cb=self.cb_move_to_right, auto_start=False) self.srv_highlevel_right = \\ actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction, execute_cb=self.cb_move_to_bin_right, auto_start=False)",
"auto_start=False) self.srv_highlevel_right = \\ actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction, execute_cb=self.cb_move_to_bin_right, auto_start=False) self.srv_lowlevel_left.start() self.srv_highlevel_left.start()",
"numpy.asarray([550, -146, 752, 181, 0, 180]) p = Vector3(pos[0], pos[1],",
"pos[1], pos[2]) r = Vector3(pos[3], pos[4], pos[5]) print \"moving away",
"self.srv_lowlevel_left = \\ actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction, execute_cb=self.cb_move_to_left, auto_start=False) self.srv_highlevel_left = \\",
"r = Vector3(pos[3], pos[4], pos[5]) print \"moving away left arm,",
"distributed under the License is distributed on an \"AS IS\"",
"Vector3(pos[0], pos[1], pos[2]) r = Vector3(pos[3], pos[4], pos[5]) print \"moving",
"752, 184, 0, 180]) p = Vector3(pos[0], pos[1], pos[2]) r",
"arm:\" print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result) def cb_move_to_bin_right(self,",
"r = Vector3(pos[3], pos[4], pos[5]) print \"moving away right arm,",
"= RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result) def cb_move_to_bin_left(self, goal): if goal.position: pos",
"= \\ actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction, execute_cb=self.cb_move_to_left, auto_start=False) self.srv_highlevel_left = \\ actionlib.SimpleActionServer('move_to_bin_left',",
"= \"photo\" print \"looking up position for %s/%s\" % (goal.bin,",
"the specific language governing permissions and # limitations under the",
"-146, 752, 181, 0, 180]) p = Vector3(pos[0], pos[1], pos[2])",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"import numpy import rospy import actionlib from geometry_msgs.msg import Twist,",
"print \"moving away left arm, then moving right arm:\" print",
"away left arm, then moving right arm:\" print goal.target_position result",
"self.srv_lowlevel_right = \\ actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction, execute_cb=self.cb_move_to_right, auto_start=False) self.srv_highlevel_right = \\",
"def cb_move_to_right(self, goal): print \"moving away left arm, then moving",
"= numpy.asarray([550, -146, 752, 184, 0, 180]) p = Vector3(pos[0],",
"express or implied. # See the License for the specific",
"applicable law or agreed to in writing, software # distributed",
"from geometry_msgs.msg import Twist, Vector3 from apc2016.msg import * class",
"\\ actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction, execute_cb=self.cb_move_to_right, auto_start=False) self.srv_highlevel_right = \\ actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction,",
"except in compliance with the License. # You may obtain",
"then moving left arm\" result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_left.set_succeeded(result)",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"print \"moving away right arm, then moving left arm\" result",
"= \\ actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction, execute_cb=self.cb_move_to_bin_right, auto_start=False) self.srv_lowlevel_left.start() self.srv_highlevel_left.start() self.srv_lowlevel_right.start() self.srv_highlevel_right.start()",
"not use this file except in compliance with the License.",
"moving left arm:\" print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result)",
"goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result) def cb_move_to_bin_left(self, goal): if",
"% (goal.bin, pos) pos = numpy.asarray([550, -146, 752, 181, 0,",
"class DummyArmControl(object): def __init__(self): self.srv_lowlevel_left = \\ actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction, execute_cb=self.cb_move_to_left,",
"(goal.bin, pos) pos = numpy.asarray([550, -146, 752, 181, 0, 180])",
"def __init__(self): self.srv_lowlevel_left = \\ actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction, execute_cb=self.cb_move_to_left, auto_start=False) self.srv_highlevel_left",
"goal): print \"moving away right arm, then moving left arm:\"",
"print \"moving away right arm, then moving left arm:\" print",
"writing, software # distributed under the License is distributed on",
"Vector3 from apc2016.msg import * class DummyArmControl(object): def __init__(self): self.srv_lowlevel_left",
"right arm:\" print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result) def",
"in writing, software # distributed under the License is distributed",
"you may not use this file except in compliance with",
"\"looking up position for %s/%s\" % (goal.bin, pos) pos =",
"actionlib from geometry_msgs.msg import Twist, Vector3 from apc2016.msg import *",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"language governing permissions and # limitations under the License. import",
"= BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_left.set_succeeded(result) def cb_move_to_right(self, goal): print \"moving",
"= Vector3(pos[3], pos[4], pos[5]) print \"moving away right arm, then",
"(goal.bin, pos) pos = numpy.asarray([550, -146, 752, 184, 0, 180])",
"goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result) def cb_move_to_bin_right(self, goal): if",
"= BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_right.set_succeeded(result) if __name__ == '__main__': rospy.init_node(\"arm_control_dummy\",",
"752, 181, 0, 180]) p = Vector3(pos[0], pos[1], pos[2]) r",
"left arm\" result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_left.set_succeeded(result) def cb_move_to_right(self,",
"pos) pos = numpy.asarray([550, -146, 752, 181, 0, 180]) p",
"position for %s/%s\" % (goal.bin, pos) pos = numpy.asarray([550, -146,",
"use this file except in compliance with the License. #",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_right.set_succeeded(result) if __name__ == '__main__':",
"if goal.position: pos = goal.position else: pos = \"photo\" print",
"pos[5]) print \"moving away left arm, then moving right arm\"",
"cb_move_to_bin_left(self, goal): if goal.position: pos = goal.position else: pos =",
"position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result) def cb_move_to_bin_left(self, goal): if goal.position: pos = goal.position",
"self.srv_highlevel_left.set_succeeded(result) def cb_move_to_right(self, goal): print \"moving away left arm, then",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"execute_cb=self.cb_move_to_bin_right, auto_start=False) self.srv_lowlevel_left.start() self.srv_highlevel_left.start() self.srv_lowlevel_right.start() self.srv_highlevel_right.start() def cb_move_to_left(self, goal): print",
"DummyArmControl(object): def __init__(self): self.srv_lowlevel_left = \\ actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction, execute_cb=self.cb_move_to_left, auto_start=False)",
"self.srv_highlevel_right = \\ actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction, execute_cb=self.cb_move_to_bin_right, auto_start=False) self.srv_lowlevel_left.start() self.srv_highlevel_left.start() self.srv_lowlevel_right.start()",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result) def cb_move_to_bin_right(self, goal): if goal.position: pos =",
"= RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result) def cb_move_to_bin_right(self, goal): if goal.position: pos",
"or implied. # See the License for the specific language",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"def cb_move_to_bin_right(self, goal): if goal.position: pos = goal.position else: pos",
"License. # You may obtain a copy of the License",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"License, Version 2.0 (the \"License\"); # you may not use",
"-146, 752, 184, 0, 180]) p = Vector3(pos[0], pos[1], pos[2])",
"# You may obtain a copy of the License at",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"cb_move_to_bin_right(self, goal): if goal.position: pos = goal.position else: pos =",
"under the License is distributed on an \"AS IS\" BASIS,",
"self.srv_lowlevel_left.set_succeeded(result) def cb_move_to_bin_left(self, goal): if goal.position: pos = goal.position else:",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"BinToteMoveAction, execute_cb=self.cb_move_to_bin_left, auto_start=False) self.srv_lowlevel_right = \\ actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction, execute_cb=self.cb_move_to_right, auto_start=False)",
"%s/%s\" % (goal.bin, pos) pos = numpy.asarray([550, -146, 752, 181,",
"License for the specific language governing permissions and # limitations",
"pos) pos = numpy.asarray([550, -146, 752, 184, 0, 180]) p",
"self.srv_highlevel_left.start() self.srv_lowlevel_right.start() self.srv_highlevel_right.start() def cb_move_to_left(self, goal): print \"moving away right",
"moving right arm\" result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_right.set_succeeded(result) if",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"Preferred Networks, Inc. # # Licensed under the Apache License,",
"BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_left.set_succeeded(result) def cb_move_to_right(self, goal): print \"moving away",
"apc2016.msg import * class DummyArmControl(object): def __init__(self): self.srv_lowlevel_left = \\",
"moving left arm\" result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_left.set_succeeded(result) def",
"pos[2]) r = Vector3(pos[3], pos[4], pos[5]) print \"moving away right",
"left arm:\" print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result) def",
"under the License. import numpy import rospy import actionlib from",
"the License. import numpy import rospy import actionlib from geometry_msgs.msg",
"\"photo\" print \"looking up position for %s/%s\" % (goal.bin, pos)",
"and # limitations under the License. import numpy import rospy",
"execute_cb=self.cb_move_to_bin_left, auto_start=False) self.srv_lowlevel_right = \\ actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction, execute_cb=self.cb_move_to_right, auto_start=False) self.srv_highlevel_right",
"Twist, Vector3 from apc2016.msg import * class DummyArmControl(object): def __init__(self):",
"the License for the specific language governing permissions and #",
"position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result) def cb_move_to_bin_right(self, goal): if goal.position: pos = goal.position",
"184, 0, 180]) p = Vector3(pos[0], pos[1], pos[2]) r =",
"(the \"License\"); # you may not use this file except",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"# you may not use this file except in compliance",
"r)) self.srv_highlevel_right.set_succeeded(result) if __name__ == '__main__': rospy.init_node(\"arm_control_dummy\", anonymous=True) DummyArmControl() rospy.spin()",
"either express or implied. # See the License for the",
"arm\" result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_left.set_succeeded(result) def cb_move_to_right(self, goal):",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"pos[4], pos[5]) print \"moving away left arm, then moving right",
"BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_right.set_succeeded(result) if __name__ == '__main__': rospy.init_node(\"arm_control_dummy\", anonymous=True)",
"arm\" result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_right.set_succeeded(result) if __name__ ==",
"pos[2]) r = Vector3(pos[3], pos[4], pos[5]) print \"moving away left",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"\"moving away left arm, then moving right arm:\" print goal.target_position",
"print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result) def cb_move_to_bin_left(self, goal):",
"the License is distributed on an \"AS IS\" BASIS, #",
"auto_start=False) self.srv_lowlevel_left.start() self.srv_highlevel_left.start() self.srv_lowlevel_right.start() self.srv_highlevel_right.start() def cb_move_to_left(self, goal): print \"moving",
"def cb_move_to_bin_left(self, goal): if goal.position: pos = goal.position else: pos",
"pos[5]) print \"moving away right arm, then moving left arm\"",
"* class DummyArmControl(object): def __init__(self): self.srv_lowlevel_left = \\ actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction,",
"= Vector3(pos[3], pos[4], pos[5]) print \"moving away left arm, then",
"in compliance with the License. # You may obtain a",
"= \\ actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction, execute_cb=self.cb_move_to_bin_left, auto_start=False) self.srv_lowlevel_right = \\ actionlib.SimpleActionServer('move_to_right',",
"Copyright 2016 Preferred Networks, Inc. # # Licensed under the",
"software # distributed under the License is distributed on an",
"geometry_msgs.msg import Twist, Vector3 from apc2016.msg import * class DummyArmControl(object):",
"\\ actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction, execute_cb=self.cb_move_to_bin_left, auto_start=False) self.srv_lowlevel_right = \\ actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction,",
"auto_start=False) self.srv_lowlevel_right = \\ actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction, execute_cb=self.cb_move_to_right, auto_start=False) self.srv_highlevel_right =",
"self.srv_lowlevel_right.start() self.srv_highlevel_right.start() def cb_move_to_left(self, goal): print \"moving away right arm,",
"\"moving away right arm, then moving left arm:\" print goal.target_position",
"governing permissions and # limitations under the License. import numpy",
"up position for %s/%s\" % (goal.bin, pos) pos = numpy.asarray([550,",
"= \\ actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction, execute_cb=self.cb_move_to_right, auto_start=False) self.srv_highlevel_right = \\ actionlib.SimpleActionServer('move_to_bin_right',",
"right arm, then moving left arm\" result = BinToteMoveResult(success=True, position=Twist(p,",
"# # Unless required by applicable law or agreed to",
"181, 0, 180]) p = Vector3(pos[0], pos[1], pos[2]) r =",
"self.srv_lowlevel_right.set_succeeded(result) def cb_move_to_bin_right(self, goal): if goal.position: pos = goal.position else:",
"\"moving away right arm, then moving left arm\" result =",
"goal.position else: pos = \"photo\" print \"looking up position for",
"result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result) def cb_move_to_bin_right(self, goal): if goal.position:",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"r)) self.srv_highlevel_left.set_succeeded(result) def cb_move_to_right(self, goal): print \"moving away left arm,",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"2016 Preferred Networks, Inc. # # Licensed under the Apache",
"pos = numpy.asarray([550, -146, 752, 181, 0, 180]) p =",
"cb_move_to_right(self, goal): print \"moving away left arm, then moving right",
"cb_move_to_left(self, goal): print \"moving away right arm, then moving left",
"Version 2.0 (the \"License\"); # you may not use this",
"right arm, then moving left arm:\" print goal.target_position result =",
"= numpy.asarray([550, -146, 752, 181, 0, 180]) p = Vector3(pos[0],",
"self.srv_lowlevel_left.start() self.srv_highlevel_left.start() self.srv_lowlevel_right.start() self.srv_highlevel_right.start() def cb_move_to_left(self, goal): print \"moving away",
"License. import numpy import rospy import actionlib from geometry_msgs.msg import",
"\\ actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction, execute_cb=self.cb_move_to_left, auto_start=False) self.srv_highlevel_left = \\ actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction,",
"law or agreed to in writing, software # distributed under",
"right arm\" result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_right.set_succeeded(result) if __name__",
"self.srv_highlevel_right.start() def cb_move_to_left(self, goal): print \"moving away right arm, then",
"permissions and # limitations under the License. import numpy import",
"actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction, execute_cb=self.cb_move_to_left, auto_start=False) self.srv_highlevel_left = \\ actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction, execute_cb=self.cb_move_to_bin_left,",
"pos = numpy.asarray([550, -146, 752, 184, 0, 180]) p =",
"\"moving away left arm, then moving right arm\" result =",
"import rospy import actionlib from geometry_msgs.msg import Twist, Vector3 from",
"implied. # See the License for the specific language governing",
"rospy import actionlib from geometry_msgs.msg import Twist, Vector3 from apc2016.msg",
"BinToteMoveAction, execute_cb=self.cb_move_to_bin_right, auto_start=False) self.srv_lowlevel_left.start() self.srv_highlevel_left.start() self.srv_lowlevel_right.start() self.srv_highlevel_right.start() def cb_move_to_left(self, goal):",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction, execute_cb=self.cb_move_to_right, auto_start=False) self.srv_highlevel_right = \\ actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction, execute_cb=self.cb_move_to_bin_right,",
"import * class DummyArmControl(object): def __init__(self): self.srv_lowlevel_left = \\ actionlib.SimpleActionServer('move_to_left',",
"\"License\"); # you may not use this file except in",
"arm, then moving right arm\" result = BinToteMoveResult(success=True, position=Twist(p, r))",
"def cb_move_to_left(self, goal): print \"moving away right arm, then moving",
"from apc2016.msg import * class DummyArmControl(object): def __init__(self): self.srv_lowlevel_left =",
"pos[4], pos[5]) print \"moving away right arm, then moving left",
"execute_cb=self.cb_move_to_right, auto_start=False) self.srv_highlevel_right = \\ actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction, execute_cb=self.cb_move_to_bin_right, auto_start=False) self.srv_lowlevel_left.start()",
"print \"moving away left arm, then moving right arm\" result",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"<filename>script/dummy/arm_control.py #!/usr/bin/python # Copyright 2016 Preferred Networks, Inc. # #",
"#!/usr/bin/python # Copyright 2016 Preferred Networks, Inc. # # Licensed",
"then moving left arm:\" print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position)",
"= goal.position else: pos = \"photo\" print \"looking up position",
"% (goal.bin, pos) pos = numpy.asarray([550, -146, 752, 184, 0,",
"Vector3(pos[3], pos[4], pos[5]) print \"moving away right arm, then moving",
"by applicable law or agreed to in writing, software #",
"# distributed under the License is distributed on an \"AS",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"auto_start=False) self.srv_highlevel_left = \\ actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction, execute_cb=self.cb_move_to_bin_left, auto_start=False) self.srv_lowlevel_right =",
"goal): print \"moving away left arm, then moving right arm:\"",
"may obtain a copy of the License at # #",
"# Unless required by applicable law or agreed to in",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"pos = \"photo\" print \"looking up position for %s/%s\" %",
"numpy import rospy import actionlib from geometry_msgs.msg import Twist, Vector3",
"180]) p = Vector3(pos[0], pos[1], pos[2]) r = Vector3(pos[3], pos[4],",
"away left arm, then moving right arm\" result = BinToteMoveResult(success=True,",
"RobotArmMoveAction, execute_cb=self.cb_move_to_left, auto_start=False) self.srv_highlevel_left = \\ actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction, execute_cb=self.cb_move_to_bin_left, auto_start=False)",
"arm, then moving right arm:\" print goal.target_position result = RobotArmMoveResult(success=True,",
"Networks, Inc. # # Licensed under the Apache License, Version",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"0, 180]) p = Vector3(pos[0], pos[1], pos[2]) r = Vector3(pos[3],",
"= Vector3(pos[0], pos[1], pos[2]) r = Vector3(pos[3], pos[4], pos[5]) print",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"to in writing, software # distributed under the License is",
"limitations under the License. import numpy import rospy import actionlib",
"goal.position: pos = goal.position else: pos = \"photo\" print \"looking",
"p = Vector3(pos[0], pos[1], pos[2]) r = Vector3(pos[3], pos[4], pos[5])",
"print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result) def cb_move_to_bin_right(self, goal):",
"then moving right arm\" result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_right.set_succeeded(result)",
"__init__(self): self.srv_lowlevel_left = \\ actionlib.SimpleActionServer('move_to_left', RobotArmMoveAction, execute_cb=self.cb_move_to_left, auto_start=False) self.srv_highlevel_left =",
"position=Twist(p, r)) self.srv_highlevel_right.set_succeeded(result) if __name__ == '__main__': rospy.init_node(\"arm_control_dummy\", anonymous=True) DummyArmControl()",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"Inc. # # Licensed under the Apache License, Version 2.0",
"# See the License for the specific language governing permissions",
"# limitations under the License. import numpy import rospy import",
"result = BinToteMoveResult(success=True, position=Twist(p, r)) self.srv_highlevel_left.set_succeeded(result) def cb_move_to_right(self, goal): print",
"left arm, then moving right arm\" result = BinToteMoveResult(success=True, position=Twist(p,",
"You may obtain a copy of the License at #",
"# Copyright 2016 Preferred Networks, Inc. # # Licensed under",
"away right arm, then moving left arm\" result = BinToteMoveResult(success=True,",
"may not use this file except in compliance with the",
"or agreed to in writing, software # distributed under the",
"arm, then moving left arm:\" print goal.target_position result = RobotArmMoveResult(success=True,",
"arm, then moving left arm\" result = BinToteMoveResult(success=True, position=Twist(p, r))",
"required by applicable law or agreed to in writing, software",
"execute_cb=self.cb_move_to_left, auto_start=False) self.srv_highlevel_left = \\ actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction, execute_cb=self.cb_move_to_bin_left, auto_start=False) self.srv_lowlevel_right",
"pos = goal.position else: pos = \"photo\" print \"looking up",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction, execute_cb=self.cb_move_to_bin_right, auto_start=False) self.srv_lowlevel_left.start() self.srv_highlevel_left.start() self.srv_lowlevel_right.start() self.srv_highlevel_right.start() def cb_move_to_left(self,",
"\\ actionlib.SimpleActionServer('move_to_bin_right', BinToteMoveAction, execute_cb=self.cb_move_to_bin_right, auto_start=False) self.srv_lowlevel_left.start() self.srv_highlevel_left.start() self.srv_lowlevel_right.start() self.srv_highlevel_right.start() def",
"then moving right arm:\" print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position)",
"for %s/%s\" % (goal.bin, pos) pos = numpy.asarray([550, -146, 752,",
"with the License. # You may obtain a copy of",
"this file except in compliance with the License. # You",
"RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_left.set_succeeded(result) def cb_move_to_bin_left(self, goal): if goal.position: pos =",
"left arm, then moving right arm:\" print goal.target_position result =",
"else: pos = \"photo\" print \"looking up position for %s/%s\"",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction, execute_cb=self.cb_move_to_bin_left, auto_start=False) self.srv_lowlevel_right = \\ actionlib.SimpleActionServer('move_to_right', RobotArmMoveAction, execute_cb=self.cb_move_to_right,",
"Vector3(pos[3], pos[4], pos[5]) print \"moving away left arm, then moving",
"import Twist, Vector3 from apc2016.msg import * class DummyArmControl(object): def",
"moving right arm:\" print goal.target_position result = RobotArmMoveResult(success=True, position=goal.target_position) self.srv_lowlevel_right.set_succeeded(result)",
"%s/%s\" % (goal.bin, pos) pos = numpy.asarray([550, -146, 752, 184,",
"import actionlib from geometry_msgs.msg import Twist, Vector3 from apc2016.msg import",
"self.srv_highlevel_left = \\ actionlib.SimpleActionServer('move_to_bin_left', BinToteMoveAction, execute_cb=self.cb_move_to_bin_left, auto_start=False) self.srv_lowlevel_right = \\",
"goal): if goal.position: pos = goal.position else: pos = \"photo\""
] |
[
"= {ctx.guild.id}\") chrow = await cursor.fetchone() if chrow is None:",
"await ctx.send(embed=await utilities.generate_embed(f\"To finish the command bot must have {perms}",
"import commands from discord import Embed class Events(commands.Cog): \"\"\"Event Handler",
"permissions and re-try\")) self.bot.logger.critical(f\"Ignoring Exception in {ctx.command}\\nError: {error}\") @commands.Cog.listener() async",
"elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(embed=await utilities.generate_embed(f\"Command {ctx.prefix} {ctx.command} requires **{error.param.name}**",
"guild): #TODO: implement blacklist sytem self.bot.logger.info(f\"Joined on {guild} > Total",
"implement blacklist sytem self.bot.logger.info(f\"Joined on {guild} > Total Guilds: {len(self.bot.guilds)}\")",
"description=msg[0].format(name=name, mention=mention, members=members, server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.created_at) embed.set_footer(text=f\"{member.name}",
"members=members, server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.created_at) embed.set_footer(text=f\"{member.name} Created on",
"= member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.joined_at) embed.set_footer(text=f\"{member.name} Created joined",
"RompDodger\"\"\" def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def",
"{member.guild.id}\") msg = await msgrow.fetchone() name = member.name mention =",
"{ctx.prefix} {ctx.command} requires **{error.param.name}** argument, but you missed giving that\"))",
"await self.bot.db.execute(f\"SELECT msg FROM leaver WHERE guild_id = {member.guild.id}\") name",
"async def on_guild_remove(self, guild): self.bot.logger.info(f\"Removed on {guild} > Total Guilds:",
"__init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_command_error(self, ctx,",
"blacklist sytem self.bot.logger.info(f\"Joined on {guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener()",
"self.bot.logger.info(f\"Removed on {guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def",
"'on_error'): return if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)): return elif isinstance(error, commands.MissingRequiredArgument):",
"chrow is None: return else: msg = await self.bot.db.execute(f\"SELECT msg",
"WHERE guild_id = {member.guild.id}\") name = member.name mention = member.mention",
"on_command_error(self, ctx, error): if hasattr(ctx.command, 'on_error'): return if isinstance(error, (commands.CommandNotFound,",
"Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_member_join(self, member): cursor =",
"must have {perms} permission, give the bot appropriate permissions and",
"Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_member_join(self, member): cursor = await",
"size=2048)}\") created = format_time(member.created_at) embed.set_footer(text=f\"{member.name} Created on {created}\") ch =",
"finish the command bot must have {perms} permission, give the",
"{error}\") @commands.Cog.listener() async def on_guild_join(self, guild): #TODO: implement blacklist sytem",
"= member.mention server = member.server members = member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\")",
"class Events(commands.Cog): \"\"\"Event Handler for RompDodger\"\"\" def __init__(self, bot): self.bot",
"bot): self.bot = bot @commands.Cog.listener() async def on_command_error(self, ctx, error):",
"hasattr(ctx.command, 'on_error'): return if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)): return elif isinstance(error,",
"and re-try\")) self.bot.logger.critical(f\"Ignoring Exception in {ctx.command}\\nError: {error}\") @commands.Cog.listener() async def",
"in {ctx.command}\\nError: {error}\") @commands.Cog.listener() async def on_guild_join(self, guild): #TODO: implement",
"self.bot = bot @commands.Cog.listener() async def on_command_error(self, ctx, error): if",
"await self.bot.db.execute(f\"SELECT message FROM welcomer WHERE guild_id = {member.guild.id}\") msg",
"embed.set_footer(text=f\"{member.name} Created on {created}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await",
"on_guild_remove(self, guild): self.bot.logger.info(f\"Removed on {guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener()",
"ch.send(embed=embed) await cursor.close() @commands.Cog.listener() async def on_member_remove(self, member): cursor =",
"{len(self.bot.guilds)}\") @commands.Cog.listener() async def on_member_join(self, member): cursor = await self.bot.db.execute(f\"SELECT",
"await self.bot.db.execute(f\"SELECT channel FROM leaver WHERE guild_id = {ctx.guild.id}\") chrow",
"ctx.send(embed=await utilities.generate_embed(f\"Command {ctx.prefix} {ctx.command} requires **{error.param.name}** argument, but you missed",
"giving that\")) elif isinstance(error, commands.BotMissingPermissions): perms = \"\".join(error.missing_perms) await ctx.send(embed=await",
"cursor.fetchone() if chrow is None: return else: msgrow = await",
"await cursor.fetchone() if chrow is None: return else: msg =",
"commands.NoPrivateMessage)): return elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(embed=await utilities.generate_embed(f\"Command {ctx.prefix} {ctx.command}",
"cursor.fetchone() if chrow is None: return else: msg = await",
"perms = \"\".join(error.missing_perms) await ctx.send(embed=await utilities.generate_embed(f\"To finish the command bot",
"return else: msg = await self.bot.db.execute(f\"SELECT msg FROM leaver WHERE",
"{guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_guild_remove(self, guild):",
"name = member.name mention = member.mention members = member.guild.member_count server",
"created = format_time(member.joined_at) embed.set_footer(text=f\"{member.name} Created joined on {joined}\") ch =",
"appropriate permissions and re-try\")) self.bot.logger.critical(f\"Ignoring Exception in {ctx.command}\\nError: {error}\") @commands.Cog.listener()",
"sytem self.bot.logger.info(f\"Joined on {guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async",
"msgrow.fetchone() name = member.name mention = member.mention members = member.guild.member_count",
"channel FROM leaver WHERE guild_id = {ctx.guild.id}\") chrow = await",
"cursor = await self.bot.db.execute(f\"SELECT channel FROM welcomer WHERE guild_id =",
"utilities.generate_embed(f\"To finish the command bot must have {perms} permission, give",
"ctx.send(embed=await utilities.generate_embed(f\"To finish the command bot must have {perms} permission,",
"{joined}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close() def setup(bot):",
"= await self.bot.db.execute(f\"SELECT message FROM welcomer WHERE guild_id = {member.guild.id}\")",
"member.guild embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members, server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\")",
"discord import Embed class Events(commands.Cog): \"\"\"Event Handler for RompDodger\"\"\" def",
"await msgrow.fetchone() name = member.name mention = member.mention members =",
"return else: msgrow = await self.bot.db.execute(f\"SELECT message FROM welcomer WHERE",
"None: return else: msg = await self.bot.db.execute(f\"SELECT msg FROM leaver",
"= member.name mention = member.mention server = member.server members =",
"utils.time import format_time from utils import utilities from discord.ext import",
"return if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)): return elif isinstance(error, commands.MissingRequiredArgument): await",
"created = format_time(member.created_at) embed.set_footer(text=f\"{member.name} Created on {created}\") ch = self.bot.get_channel(int(chrow[0]))",
"import Embed class Events(commands.Cog): \"\"\"Event Handler for RompDodger\"\"\" def __init__(self,",
"have {perms} permission, give the bot appropriate permissions and re-try\"))",
"> Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_member_join(self, member): cursor",
"= member.mention members = member.guild.member_count server = member.guild embed =",
"name = member.name mention = member.mention server = member.server members",
"self.bot.db.execute(f\"SELECT msg FROM leaver WHERE guild_id = {member.guild.id}\") name =",
"Events(commands.Cog): \"\"\"Event Handler for RompDodger\"\"\" def __init__(self, bot): self.bot =",
"await self.bot.db.execute(f\"SELECT channel FROM welcomer WHERE guild_id = {member.guild.id}\") chrow",
"> Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_guild_remove(self, guild): self.bot.logger.info(f\"Removed",
"self.bot.db.execute(f\"SELECT channel FROM welcomer WHERE guild_id = {member.guild.id}\") chrow =",
"embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members, server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created",
"if chrow is None: return else: msgrow = await self.bot.db.execute(f\"SELECT",
"cursor.close() @commands.Cog.listener() async def on_member_remove(self, member): cursor = await self.bot.db.execute(f\"SELECT",
"commands from discord import Embed class Events(commands.Cog): \"\"\"Event Handler for",
"{ctx.command} requires **{error.param.name}** argument, but you missed giving that\")) elif",
"@commands.Cog.listener() async def on_member_remove(self, member): cursor = await self.bot.db.execute(f\"SELECT channel",
"size=2048)}\") created = format_time(member.joined_at) embed.set_footer(text=f\"{member.name} Created joined on {joined}\") ch",
"msg = await self.bot.db.execute(f\"SELECT msg FROM leaver WHERE guild_id =",
"you missed giving that\")) elif isinstance(error, commands.BotMissingPermissions): perms = \"\".join(error.missing_perms)",
"{member.guild.id}\") name = member.name mention = member.mention server = member.server",
"async def on_member_remove(self, member): cursor = await self.bot.db.execute(f\"SELECT channel FROM",
"isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)): return elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(embed=await utilities.generate_embed(f\"Command",
"commands.MissingRequiredArgument): await ctx.send(embed=await utilities.generate_embed(f\"Command {ctx.prefix} {ctx.command} requires **{error.param.name}** argument, but",
"guild): self.bot.logger.info(f\"Removed on {guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async",
"**{error.param.name}** argument, but you missed giving that\")) elif isinstance(error, commands.BotMissingPermissions):",
"await ch.send(embed=embed) await cursor.close() @commands.Cog.listener() async def on_member_remove(self, member): cursor",
"elif isinstance(error, commands.BotMissingPermissions): perms = \"\".join(error.missing_perms) await ctx.send(embed=await utilities.generate_embed(f\"To finish",
"the command bot must have {perms} permission, give the bot",
"member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.joined_at) embed.set_footer(text=f\"{member.name} Created joined on",
"Created joined on {joined}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await",
"bot appropriate permissions and re-try\")) self.bot.logger.critical(f\"Ignoring Exception in {ctx.command}\\nError: {error}\")",
"from discord import Embed class Events(commands.Cog): \"\"\"Event Handler for RompDodger\"\"\"",
"@commands.Cog.listener() async def on_member_join(self, member): cursor = await self.bot.db.execute(f\"SELECT channel",
"= {member.guild.id}\") msg = await msgrow.fetchone() name = member.name mention",
"member.mention server = member.server members = member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created",
"is None: return else: msg = await self.bot.db.execute(f\"SELECT msg FROM",
"{ctx.command}\\nError: {error}\") @commands.Cog.listener() async def on_guild_join(self, guild): #TODO: implement blacklist",
"format_time from utils import utilities from discord.ext import commands from",
"the bot appropriate permissions and re-try\")) self.bot.logger.critical(f\"Ignoring Exception in {ctx.command}\\nError:",
"member.name mention = member.mention server = member.server members = member.guild.member_count",
"msgrow = await self.bot.db.execute(f\"SELECT message FROM welcomer WHERE guild_id =",
"= member.guild embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members, server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png',",
"= member.name mention = member.mention members = member.guild.member_count server =",
"members = member.guild.member_count server = member.guild embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name,",
"{created}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close() @commands.Cog.listener() async",
"welcomer WHERE guild_id = {member.guild.id}\") msg = await msgrow.fetchone() name",
"FROM leaver WHERE guild_id = {ctx.guild.id}\") chrow = await cursor.fetchone()",
"\"\".join(error.missing_perms) await ctx.send(embed=await utilities.generate_embed(f\"To finish the command bot must have",
"return elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(embed=await utilities.generate_embed(f\"Command {ctx.prefix} {ctx.command} requires",
"def on_member_remove(self, member): cursor = await self.bot.db.execute(f\"SELECT channel FROM leaver",
"= {member.guild.id}\") name = member.name mention = member.mention server =",
"on_member_remove(self, member): cursor = await self.bot.db.execute(f\"SELECT channel FROM leaver WHERE",
"discord from utils.time import format_time from utils import utilities from",
"embed.set_footer(text=f\"{member.name} Created joined on {joined}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed)",
"self.bot.db.execute(f\"SELECT channel FROM leaver WHERE guild_id = {ctx.guild.id}\") chrow =",
"(commands.CommandNotFound, commands.NoPrivateMessage)): return elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(embed=await utilities.generate_embed(f\"Command {ctx.prefix}",
"guild_id = {member.guild.id}\") msg = await msgrow.fetchone() name = member.name",
"Created on {created}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close()",
"argument, but you missed giving that\")) elif isinstance(error, commands.BotMissingPermissions): perms",
"async def on_command_error(self, ctx, error): if hasattr(ctx.command, 'on_error'): return if",
"give the bot appropriate permissions and re-try\")) self.bot.logger.critical(f\"Ignoring Exception in",
"requires **{error.param.name}** argument, but you missed giving that\")) elif isinstance(error,",
"= self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close() @commands.Cog.listener() async def on_member_remove(self,",
"@commands.Cog.listener() async def on_guild_join(self, guild): #TODO: implement blacklist sytem self.bot.logger.info(f\"Joined",
"json import discord from utils.time import format_time from utils import",
"WHERE guild_id = {ctx.guild.id}\") chrow = await cursor.fetchone() if chrow",
"for RompDodger\"\"\" def __init__(self, bot): self.bot = bot @commands.Cog.listener() async",
"re-try\")) self.bot.logger.critical(f\"Ignoring Exception in {ctx.command}\\nError: {error}\") @commands.Cog.listener() async def on_guild_join(self,",
"channel FROM welcomer WHERE guild_id = {member.guild.id}\") chrow = await",
"None: return else: msgrow = await self.bot.db.execute(f\"SELECT message FROM welcomer",
"= format_time(member.joined_at) embed.set_footer(text=f\"{member.name} Created joined on {joined}\") ch = self.bot.get_channel(int(chrow[0]))",
"on {guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_member_join(self,",
"isinstance(error, commands.BotMissingPermissions): perms = \"\".join(error.missing_perms) await ctx.send(embed=await utilities.generate_embed(f\"To finish the",
"= await self.bot.db.execute(f\"SELECT channel FROM welcomer WHERE guild_id = {member.guild.id}\")",
"chrow = await cursor.fetchone() if chrow is None: return else:",
"is None: return else: msgrow = await self.bot.db.execute(f\"SELECT message FROM",
"on_member_join(self, member): cursor = await self.bot.db.execute(f\"SELECT channel FROM welcomer WHERE",
"{ctx.guild.id}\") chrow = await cursor.fetchone() if chrow is None: return",
"ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close() def setup(bot): bot.add_cog(Events(bot))",
"{len(self.bot.guilds)}\") @commands.Cog.listener() async def on_guild_remove(self, guild): self.bot.logger.info(f\"Removed on {guild} >",
"cursor = await self.bot.db.execute(f\"SELECT channel FROM leaver WHERE guild_id =",
"self.bot.logger.critical(f\"Ignoring Exception in {ctx.command}\\nError: {error}\") @commands.Cog.listener() async def on_guild_join(self, guild):",
"commands.BotMissingPermissions): perms = \"\".join(error.missing_perms) await ctx.send(embed=await utilities.generate_embed(f\"To finish the command",
"= \"\".join(error.missing_perms) await ctx.send(embed=await utilities.generate_embed(f\"To finish the command bot must",
"permission, give the bot appropriate permissions and re-try\")) self.bot.logger.critical(f\"Ignoring Exception",
"guild_id = {ctx.guild.id}\") chrow = await cursor.fetchone() if chrow is",
"server = member.guild embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members, server=server))",
"from utils.time import format_time from utils import utilities from discord.ext",
"ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close() @commands.Cog.listener() async def",
"mention=mention, members=members, server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.created_at) embed.set_footer(text=f\"{member.name} Created",
"{guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_member_join(self, member):",
"FROM leaver WHERE guild_id = {member.guild.id}\") name = member.name mention",
"= bot @commands.Cog.listener() async def on_command_error(self, ctx, error): if hasattr(ctx.command,",
"format_time(member.joined_at) embed.set_footer(text=f\"{member.name} Created joined on {joined}\") ch = self.bot.get_channel(int(chrow[0])) await",
"utilities.generate_embed(f\"Command {ctx.prefix} {ctx.command} requires **{error.param.name}** argument, but you missed giving",
"error): if hasattr(ctx.command, 'on_error'): return if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)): return",
"msg = await msgrow.fetchone() name = member.name mention = member.mention",
"= await self.bot.db.execute(f\"SELECT msg FROM leaver WHERE guild_id = {member.guild.id}\")",
"embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.created_at) embed.set_footer(text=f\"{member.name} Created on {created}\") ch",
"guild_id = {member.guild.id}\") name = member.name mention = member.mention server",
"if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)): return elif isinstance(error, commands.MissingRequiredArgument): await ctx.send(embed=await",
"members = member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.joined_at) embed.set_footer(text=f\"{member.name} Created",
"Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_guild_remove(self, guild): self.bot.logger.info(f\"Removed on",
"FROM welcomer WHERE guild_id = {member.guild.id}\") chrow = await cursor.fetchone()",
"member.name mention = member.mention members = member.guild.member_count server = member.guild",
"but you missed giving that\")) elif isinstance(error, commands.BotMissingPermissions): perms =",
"Exception in {ctx.command}\\nError: {error}\") @commands.Cog.listener() async def on_guild_join(self, guild): #TODO:",
"format_time(member.created_at) embed.set_footer(text=f\"{member.name} Created on {created}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed)",
"@commands.Cog.listener() async def on_command_error(self, ctx, error): if hasattr(ctx.command, 'on_error'): return",
"async def on_guild_join(self, guild): #TODO: implement blacklist sytem self.bot.logger.info(f\"Joined on",
"WHERE guild_id = {member.guild.id}\") msg = await msgrow.fetchone() name =",
"if hasattr(ctx.command, 'on_error'): return if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)): return elif",
"= member.server members = member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.joined_at)",
"import utilities from discord.ext import commands from discord import Embed",
"embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.joined_at) embed.set_footer(text=f\"{member.name} Created joined on {joined}\")",
"= member.guild.member_count server = member.guild embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention,",
"isinstance(error, commands.MissingRequiredArgument): await ctx.send(embed=await utilities.generate_embed(f\"Command {ctx.prefix} {ctx.command} requires **{error.param.name}** argument,",
"import discord from utils.time import format_time from utils import utilities",
"import json import discord from utils.time import format_time from utils",
"FROM welcomer WHERE guild_id = {member.guild.id}\") msg = await msgrow.fetchone()",
"= await msgrow.fetchone() name = member.name mention = member.mention members",
"await ctx.send(embed=await utilities.generate_embed(f\"Command {ctx.prefix} {ctx.command} requires **{error.param.name}** argument, but you",
"discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members, server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.created_at)",
"bot @commands.Cog.listener() async def on_command_error(self, ctx, error): if hasattr(ctx.command, 'on_error'):",
"self.bot.logger.info(f\"Joined on {guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def",
"message FROM welcomer WHERE guild_id = {member.guild.id}\") msg = await",
"Handler for RompDodger\"\"\" def __init__(self, bot): self.bot = bot @commands.Cog.listener()",
"on_guild_join(self, guild): #TODO: implement blacklist sytem self.bot.logger.info(f\"Joined on {guild} >",
"member): cursor = await self.bot.db.execute(f\"SELECT channel FROM welcomer WHERE guild_id",
"mention = member.mention server = member.server members = member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png',",
"server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.created_at) embed.set_footer(text=f\"{member.name} Created on {created}\")",
"= await cursor.fetchone() if chrow is None: return else: msg",
"mention = member.mention members = member.guild.member_count server = member.guild embed",
"ctx, error): if hasattr(ctx.command, 'on_error'): return if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)):",
"def on_guild_join(self, guild): #TODO: implement blacklist sytem self.bot.logger.info(f\"Joined on {guild}",
"#TODO: implement blacklist sytem self.bot.logger.info(f\"Joined on {guild} > Total Guilds:",
"import format_time from utils import utilities from discord.ext import commands",
"WHERE guild_id = {member.guild.id}\") chrow = await cursor.fetchone() if chrow",
"= format_time(member.created_at) embed.set_footer(text=f\"{member.name} Created on {created}\") ch = self.bot.get_channel(int(chrow[0])) await",
"self.bot.db.execute(f\"SELECT message FROM welcomer WHERE guild_id = {member.guild.id}\") msg =",
"self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close() @commands.Cog.listener() async def on_member_remove(self, member):",
"on {joined}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close() def",
"def on_member_join(self, member): cursor = await self.bot.db.execute(f\"SELECT channel FROM welcomer",
"{perms} permission, give the bot appropriate permissions and re-try\")) self.bot.logger.critical(f\"Ignoring",
"member): cursor = await self.bot.db.execute(f\"SELECT channel FROM leaver WHERE guild_id",
"utilities from discord.ext import commands from discord import Embed class",
"command bot must have {perms} permission, give the bot appropriate",
"welcomer WHERE guild_id = {member.guild.id}\") chrow = await cursor.fetchone() if",
"msg FROM leaver WHERE guild_id = {member.guild.id}\") name = member.name",
"on {guild} > Total Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_guild_remove(self,",
"\"\"\"Event Handler for RompDodger\"\"\" def __init__(self, bot): self.bot = bot",
"chrow is None: return else: msgrow = await self.bot.db.execute(f\"SELECT message",
"def on_guild_remove(self, guild): self.bot.logger.info(f\"Removed on {guild} > Total Guilds: {len(self.bot.guilds)}\")",
"@commands.Cog.listener() async def on_guild_remove(self, guild): self.bot.logger.info(f\"Removed on {guild} > Total",
"else: msgrow = await self.bot.db.execute(f\"SELECT message FROM welcomer WHERE guild_id",
"leaver WHERE guild_id = {member.guild.id}\") name = member.name mention =",
"from discord.ext import commands from discord import Embed class Events(commands.Cog):",
"await cursor.close() @commands.Cog.listener() async def on_member_remove(self, member): cursor = await",
"joined on {joined}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close()",
"Embed class Events(commands.Cog): \"\"\"Event Handler for RompDodger\"\"\" def __init__(self, bot):",
"= discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members, server=server)) embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created =",
"bot must have {perms} permission, give the bot appropriate permissions",
"else: msg = await self.bot.db.execute(f\"SELECT msg FROM leaver WHERE guild_id",
"if chrow is None: return else: msg = await self.bot.db.execute(f\"SELECT",
"async def on_member_join(self, member): cursor = await self.bot.db.execute(f\"SELECT channel FROM",
"def on_command_error(self, ctx, error): if hasattr(ctx.command, 'on_error'): return if isinstance(error,",
"missed giving that\")) elif isinstance(error, commands.BotMissingPermissions): perms = \"\".join(error.missing_perms) await",
"discord.ext import commands from discord import Embed class Events(commands.Cog): \"\"\"Event",
"await cursor.fetchone() if chrow is None: return else: msgrow =",
"{member.guild.id}\") chrow = await cursor.fetchone() if chrow is None: return",
"member.server members = member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created = format_time(member.joined_at) embed.set_footer(text=f\"{member.name}",
"on {created}\") ch = self.bot.get_channel(int(chrow[0])) await ch.send(embed=embed) await cursor.close() @commands.Cog.listener()",
"member.guild.member_count server = member.guild embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members,",
"= {member.guild.id}\") chrow = await cursor.fetchone() if chrow is None:",
"= await self.bot.db.execute(f\"SELECT channel FROM leaver WHERE guild_id = {ctx.guild.id}\")",
"server = member.server members = member.guild.member_count embed.set_thumbnail(url=f\"{member.avatar_url_as(format='png', size=2048)}\") created =",
"from utils import utilities from discord.ext import commands from discord",
"def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_command_error(self,",
"= await cursor.fetchone() if chrow is None: return else: msgrow",
"member.mention members = member.guild.member_count server = member.guild embed = discord.Embed(color=discord.Color.dark_green(),",
"leaver WHERE guild_id = {ctx.guild.id}\") chrow = await cursor.fetchone() if",
"<gh_stars>0 import json import discord from utils.time import format_time from",
"guild_id = {member.guild.id}\") chrow = await cursor.fetchone() if chrow is",
"Guilds: {len(self.bot.guilds)}\") @commands.Cog.listener() async def on_guild_remove(self, guild): self.bot.logger.info(f\"Removed on {guild}",
"that\")) elif isinstance(error, commands.BotMissingPermissions): perms = \"\".join(error.missing_perms) await ctx.send(embed=await utilities.generate_embed(f\"To",
"utils import utilities from discord.ext import commands from discord import"
] |
[
"2 == 0: return 2 search_max = int(floor(sqrt(n))) + 1",
"pool.map(lowest_factor,numbers) low_factor,number = max((l,f) for l,f in zip(factors,numbers)) all_factors =",
"number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers) print(' The one with the largest",
"[] while n > 1: pf.append(lowest) n //= lowest lowest",
"if n % i == 0: return i return n",
"prime_factors_of_number_with_lowest_prime_factor(numbers): pool = multiprocessing.Pool(processes=5) factors = pool.map(lowest_factor,numbers) low_factor,number = max((l,f)",
"[33, 44, 55, 275] def lowest_factor(n, _start=3): if n %",
"55, 275] def lowest_factor(n, _start=3): if n % 2 ==",
"numbers)) number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers) print(' The one with the",
"search_max = int(floor(sqrt(n))) + 1 for i in range(_start, search_max,",
"[ 112272537195293, 112582718962171, 112272537095293, 115280098190773, 115797840077099, 1099726829285419] # numbers =",
"========== #Python3 - concurrent def prime_factors_of_number_with_lowest_prime_factor(numbers): pool = multiprocessing.Pool(processes=5) factors",
"i == 0: return i return n def prime_factors(n, lowest):",
"numbers:') print('\\n '.join(str(p) for p in numbers)) number, all_factors =",
"concurrent def prime_factors_of_number_with_lowest_prime_factor(numbers): pool = multiprocessing.Pool(processes=5) factors = pool.map(lowest_factor,numbers) low_factor,number",
"= prime_factors(number,low_factor) return number,all_factors if __name__ == '__main__': print('For these",
"//= lowest lowest = lowest_factor(n, max(lowest, 3)) return pf #",
"multiprocessing # ========== #Python3 - concurrent from math import floor,",
"= pool.map(lowest_factor,numbers) low_factor,number = max((l,f) for l,f in zip(factors,numbers)) all_factors",
"lowest = lowest_factor(n, max(lowest, 3)) return pf # ========== #Python3",
"concurrent from math import floor, sqrt numbers = [ 112272537195293,",
"= [ 112272537195293, 112582718962171, 112272537095293, 115280098190773, 115797840077099, 1099726829285419] # numbers",
"search_max, 2): if n % i == 0: return i",
"int(floor(sqrt(n))) + 1 for i in range(_start, search_max, 2): if",
"p in numbers)) number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers) print(' The one",
"112272537195293, 112582718962171, 112272537095293, 115280098190773, 115797840077099, 1099726829285419] # numbers = [33,",
"3)) return pf # ========== #Python3 - concurrent def prime_factors_of_number_with_lowest_prime_factor(numbers):",
"one with the largest minimum prime factor is {}:'.format(number)) print('",
"max((l,f) for l,f in zip(factors,numbers)) all_factors = prime_factors(number,low_factor) return number,all_factors",
"115280098190773, 115797840077099, 1099726829285419] # numbers = [33, 44, 55, 275]",
"prime factor is {}:'.format(number)) print(' All its prime factors in",
"- concurrent from math import floor, sqrt numbers = [",
"math import floor, sqrt numbers = [ 112272537195293, 112582718962171, 112272537095293,",
"== 0: return 2 search_max = int(floor(sqrt(n))) + 1 for",
"def prime_factors_of_number_with_lowest_prime_factor(numbers): pool = multiprocessing.Pool(processes=5) factors = pool.map(lowest_factor,numbers) low_factor,number =",
"if n % 2 == 0: return 2 search_max =",
"#Python3 - concurrent def prime_factors_of_number_with_lowest_prime_factor(numbers): pool = multiprocessing.Pool(processes=5) factors =",
"== 0: return i return n def prime_factors(n, lowest): pf",
"275] def lowest_factor(n, _start=3): if n % 2 == 0:",
"numbers = [ 112272537195293, 112582718962171, 112272537095293, 115280098190773, 115797840077099, 1099726829285419] #",
"__name__ == '__main__': print('For these numbers:') print('\\n '.join(str(p) for p",
"= multiprocessing.Pool(processes=5) factors = pool.map(lowest_factor,numbers) low_factor,number = max((l,f) for l,f",
"low_factor,number = max((l,f) for l,f in zip(factors,numbers)) all_factors = prime_factors(number,low_factor)",
"112582718962171, 112272537095293, 115280098190773, 115797840077099, 1099726829285419] # numbers = [33, 44,",
"in zip(factors,numbers)) all_factors = prime_factors(number,low_factor) return number,all_factors if __name__ ==",
"0: return i return n def prime_factors(n, lowest): pf =",
"print('For these numbers:') print('\\n '.join(str(p) for p in numbers)) number,",
"in numbers)) number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers) print(' The one with",
"import floor, sqrt numbers = [ 112272537195293, 112582718962171, 112272537095293, 115280098190773,",
"1: pf.append(lowest) n //= lowest lowest = lowest_factor(n, max(lowest, 3))",
"_start=3): if n % 2 == 0: return 2 search_max",
"% i == 0: return i return n def prime_factors(n,",
"# ========== #Python3 - concurrent def prime_factors_of_number_with_lowest_prime_factor(numbers): pool = multiprocessing.Pool(processes=5)",
"lowest_factor(n, _start=3): if n % 2 == 0: return 2",
"from math import floor, sqrt numbers = [ 112272537195293, 112582718962171,",
"lowest_factor(n, max(lowest, 3)) return pf # ========== #Python3 - concurrent",
"factors = pool.map(lowest_factor,numbers) low_factor,number = max((l,f) for l,f in zip(factors,numbers))",
"with the largest minimum prime factor is {}:'.format(number)) print(' All",
"112272537095293, 115280098190773, 115797840077099, 1099726829285419] # numbers = [33, 44, 55,",
"lowest lowest = lowest_factor(n, max(lowest, 3)) return pf # ==========",
"> 1: pf.append(lowest) n //= lowest lowest = lowest_factor(n, max(lowest,",
"= prime_factors_of_number_with_lowest_prime_factor(numbers) print(' The one with the largest minimum prime",
"number,all_factors if __name__ == '__main__': print('For these numbers:') print('\\n '.join(str(p)",
"= lowest_factor(n, max(lowest, 3)) return pf # ========== #Python3 -",
"numbers = [33, 44, 55, 275] def lowest_factor(n, _start=3): if",
"n def prime_factors(n, lowest): pf = [] while n >",
"floor, sqrt numbers = [ 112272537195293, 112582718962171, 112272537095293, 115280098190773, 115797840077099,",
"all_factors = prime_factors(number,low_factor) return number,all_factors if __name__ == '__main__': print('For",
"import multiprocessing # ========== #Python3 - concurrent from math import",
"- concurrent def prime_factors_of_number_with_lowest_prime_factor(numbers): pool = multiprocessing.Pool(processes=5) factors = pool.map(lowest_factor,numbers)",
"pf # ========== #Python3 - concurrent def prime_factors_of_number_with_lowest_prime_factor(numbers): pool =",
"if __name__ == '__main__': print('For these numbers:') print('\\n '.join(str(p) for",
"= [] while n > 1: pf.append(lowest) n //= lowest",
"= int(floor(sqrt(n))) + 1 for i in range(_start, search_max, 2):",
"zip(factors,numbers)) all_factors = prime_factors(number,low_factor) return number,all_factors if __name__ == '__main__':",
"i in range(_start, search_max, 2): if n % i ==",
"44, 55, 275] def lowest_factor(n, _start=3): if n % 2",
"in range(_start, search_max, 2): if n % i == 0:",
"l,f in zip(factors,numbers)) all_factors = prime_factors(number,low_factor) return number,all_factors if __name__",
"% 2 == 0: return 2 search_max = int(floor(sqrt(n))) +",
"print(' The one with the largest minimum prime factor is",
"115797840077099, 1099726829285419] # numbers = [33, 44, 55, 275] def",
"= [33, 44, 55, 275] def lowest_factor(n, _start=3): if n",
"pf.append(lowest) n //= lowest lowest = lowest_factor(n, max(lowest, 3)) return",
"for l,f in zip(factors,numbers)) all_factors = prime_factors(number,low_factor) return number,all_factors if",
"2 search_max = int(floor(sqrt(n))) + 1 for i in range(_start,",
"the largest minimum prime factor is {}:'.format(number)) print(' All its",
"# numbers = [33, 44, 55, 275] def lowest_factor(n, _start=3):",
"def lowest_factor(n, _start=3): if n % 2 == 0: return",
"for i in range(_start, search_max, 2): if n % i",
"'.join(str(p) for p in numbers)) number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers) print('",
"1 for i in range(_start, search_max, 2): if n %",
"range(_start, search_max, 2): if n % i == 0: return",
"multiprocessing.Pool(processes=5) factors = pool.map(lowest_factor,numbers) low_factor,number = max((l,f) for l,f in",
"i return n def prime_factors(n, lowest): pf = [] while",
"n //= lowest lowest = lowest_factor(n, max(lowest, 3)) return pf",
"return number,all_factors if __name__ == '__main__': print('For these numbers:') print('\\n",
"factor is {}:'.format(number)) print(' All its prime factors in order",
"for p in numbers)) number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers) print(' The",
"+ 1 for i in range(_start, search_max, 2): if n",
"lowest): pf = [] while n > 1: pf.append(lowest) n",
"these numbers:') print('\\n '.join(str(p) for p in numbers)) number, all_factors",
"largest minimum prime factor is {}:'.format(number)) print(' All its prime",
"n > 1: pf.append(lowest) n //= lowest lowest = lowest_factor(n,",
"sqrt numbers = [ 112272537195293, 112582718962171, 112272537095293, 115280098190773, 115797840077099, 1099726829285419]",
"prime_factors(n, lowest): pf = [] while n > 1: pf.append(lowest)",
"prime_factors(number,low_factor) return number,all_factors if __name__ == '__main__': print('For these numbers:')",
"= max((l,f) for l,f in zip(factors,numbers)) all_factors = prime_factors(number,low_factor) return",
"========== #Python3 - concurrent from math import floor, sqrt numbers",
"return i return n def prime_factors(n, lowest): pf = []",
"'__main__': print('For these numbers:') print('\\n '.join(str(p) for p in numbers))",
"pool = multiprocessing.Pool(processes=5) factors = pool.map(lowest_factor,numbers) low_factor,number = max((l,f) for",
"pf = [] while n > 1: pf.append(lowest) n //=",
"# ========== #Python3 - concurrent from math import floor, sqrt",
"return pf # ========== #Python3 - concurrent def prime_factors_of_number_with_lowest_prime_factor(numbers): pool",
"The one with the largest minimum prime factor is {}:'.format(number))",
"{}:'.format(number)) print(' All its prime factors in order are: {}'.format(all_factors))",
"1099726829285419] # numbers = [33, 44, 55, 275] def lowest_factor(n,",
"minimum prime factor is {}:'.format(number)) print(' All its prime factors",
"is {}:'.format(number)) print(' All its prime factors in order are:",
"return 2 search_max = int(floor(sqrt(n))) + 1 for i in",
"max(lowest, 3)) return pf # ========== #Python3 - concurrent def",
"n % i == 0: return i return n def",
"== '__main__': print('For these numbers:') print('\\n '.join(str(p) for p in",
"def prime_factors(n, lowest): pf = [] while n > 1:",
"prime_factors_of_number_with_lowest_prime_factor(numbers) print(' The one with the largest minimum prime factor",
"#Python3 - concurrent from math import floor, sqrt numbers =",
"return n def prime_factors(n, lowest): pf = [] while n",
"0: return 2 search_max = int(floor(sqrt(n))) + 1 for i",
"print('\\n '.join(str(p) for p in numbers)) number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers)",
"all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers) print(' The one with the largest minimum",
"while n > 1: pf.append(lowest) n //= lowest lowest =",
"2): if n % i == 0: return i return",
"n % 2 == 0: return 2 search_max = int(floor(sqrt(n)))"
] |
[
"air pollution using online search log', author='<NAME>(IR Lab)', license='MIT', )",
"setuptools import find_packages, setup setup( name='src', packages=find_packages(), version='0.1.0', description='Project: Nowcasting",
"name='src', packages=find_packages(), version='0.1.0', description='Project: Nowcasting the air pollution using online",
"version='0.1.0', description='Project: Nowcasting the air pollution using online search log',",
"setup setup( name='src', packages=find_packages(), version='0.1.0', description='Project: Nowcasting the air pollution",
"setup( name='src', packages=find_packages(), version='0.1.0', description='Project: Nowcasting the air pollution using",
"import find_packages, setup setup( name='src', packages=find_packages(), version='0.1.0', description='Project: Nowcasting the",
"the air pollution using online search log', author='<NAME>(IR Lab)', license='MIT',",
"Nowcasting the air pollution using online search log', author='<NAME>(IR Lab)',",
"packages=find_packages(), version='0.1.0', description='Project: Nowcasting the air pollution using online search",
"description='Project: Nowcasting the air pollution using online search log', author='<NAME>(IR",
"from setuptools import find_packages, setup setup( name='src', packages=find_packages(), version='0.1.0', description='Project:",
"find_packages, setup setup( name='src', packages=find_packages(), version='0.1.0', description='Project: Nowcasting the air"
] |
[
"... + 1000^1000. \"\"\" def main(): max_digits = 1000 sum",
"def main(): max_digits = 1000 sum = 0 for i",
"1000^1000. \"\"\" def main(): max_digits = 1000 sum = 0",
"Problem 48 Self powers Solved by <NAME> The series, 1^1",
"0 for i in range(1, max_digits+1): sum += i**i print",
"+ 3^3 + ... + 1000^1000. \"\"\" def main(): max_digits",
"+ ... + 1000^1000. \"\"\" def main(): max_digits = 1000",
"1000 sum = 0 for i in range(1, max_digits+1): sum",
"+ 3^3 + ... + 10^10 = 10405071317. Find the",
"... + 10^10 = 10405071317. Find the last ten digits",
"+ ... + 10^10 = 10405071317. Find the last ten",
"+ 2^2 + 3^3 + ... + 1000^1000. \"\"\" def",
"powers Solved by <NAME> The series, 1^1 + 2^2 +",
"= 1000 sum = 0 for i in range(1, max_digits+1):",
"of the series, 1^1 + 2^2 + 3^3 + ...",
"by <NAME> The series, 1^1 + 2^2 + 3^3 +",
"= 10405071317. Find the last ten digits of the series,",
"the last ten digits of the series, 1^1 + 2^2",
"last ten digits of the series, 1^1 + 2^2 +",
"Solved by <NAME> The series, 1^1 + 2^2 + 3^3",
"The series, 1^1 + 2^2 + 3^3 + ... +",
"for i in range(1, max_digits+1): sum += i**i print str(sum)[-10:]",
"\"\"\" Project Euler Problem 48 Self powers Solved by <NAME>",
"Find the last ten digits of the series, 1^1 +",
"1^1 + 2^2 + 3^3 + ... + 1000^1000. \"\"\"",
"digits of the series, 1^1 + 2^2 + 3^3 +",
"Project Euler Problem 48 Self powers Solved by <NAME> The",
"1^1 + 2^2 + 3^3 + ... + 10^10 =",
"max_digits = 1000 sum = 0 for i in range(1,",
"<NAME> The series, 1^1 + 2^2 + 3^3 + ...",
"3^3 + ... + 1000^1000. \"\"\" def main(): max_digits =",
"sum = 0 for i in range(1, max_digits+1): sum +=",
"2^2 + 3^3 + ... + 10^10 = 10405071317. Find",
"2^2 + 3^3 + ... + 1000^1000. \"\"\" def main():",
"10405071317. Find the last ten digits of the series, 1^1",
"\"\"\" def main(): max_digits = 1000 sum = 0 for",
"series, 1^1 + 2^2 + 3^3 + ... + 1000^1000.",
"10^10 = 10405071317. Find the last ten digits of the",
"ten digits of the series, 1^1 + 2^2 + 3^3",
"i in range(1, max_digits+1): sum += i**i print str(sum)[-10:] main()",
"+ 10^10 = 10405071317. Find the last ten digits of",
"= 0 for i in range(1, max_digits+1): sum += i**i",
"+ 1000^1000. \"\"\" def main(): max_digits = 1000 sum =",
"Euler Problem 48 Self powers Solved by <NAME> The series,",
"Self powers Solved by <NAME> The series, 1^1 + 2^2",
"the series, 1^1 + 2^2 + 3^3 + ... +",
"series, 1^1 + 2^2 + 3^3 + ... + 10^10",
"48 Self powers Solved by <NAME> The series, 1^1 +",
"+ 2^2 + 3^3 + ... + 10^10 = 10405071317.",
"3^3 + ... + 10^10 = 10405071317. Find the last",
"main(): max_digits = 1000 sum = 0 for i in"
] |
[
"@file: test.py @time: 2020-01-04 18:36:57 @description: \"\"\" import os from",
".config('spark.executor.memory', '512m')\\ .config(\"spark.executor.cores\", '1')\\ .config('spark.cores.max', '1')\\ .config('spark.driver.memory', '512m') \\ .getOrCreate()",
"= logData.filter(logData.value.contains('b')).count() print(\"Lines with a: %i, lines with b: %i\"",
"logData.filter(logData.value.contains('b')).count() print(\"Lines with a: %i, lines with b: %i\" %",
"\\ .master(\"spark://rainsty:7077\") \\ .config('spark.executor.num', '1')\\ .config('spark.executor.memory', '512m')\\ .config(\"spark.executor.cores\", '1')\\ .config('spark.cores.max',",
"logData = spark.read.text(logFile).cache() numAs = logData.filter(logData.value.contains('a')).count() numBs = logData.filter(logData.value.contains('b')).count() print(\"Lines",
".config('spark.driver.memory', '512m') \\ .getOrCreate() return sc logFile = \"/root/spark/README.md\" spark",
"\"/root/python\" os.environ['PYSPARK_PYTHON'] = \"/usr/bin/python\" os.environ['SPARK_MASTER_IP'] = 'rainsty' def create_spark_context(): sc",
"2020-01-04 18:36:57 @description: \"\"\" import os from pyspark.sql import SparkSession",
"os.environ['PYSPARK_PYTHON'] = \"/usr/bin/python\" os.environ['SPARK_MASTER_IP'] = 'rainsty' def create_spark_context(): sc =",
"os.environ['JAVA_HOME'] = '/root/jdk' os.environ['SPARK_HOME'] = '/root/spark' os.environ['PYTHON_HOME'] = \"/root/python\" os.environ['PYSPARK_PYTHON']",
"sc = SparkSession.builder \\ .appName(\"TestSparkSession\") \\ .master(\"spark://rainsty:7077\") \\ .config('spark.executor.num', '1')\\",
"\"/root/spark/README.md\" spark = create_spark_context() logData = spark.read.text(logFile).cache() numAs = logData.filter(logData.value.contains('a')).count()",
".master(\"spark://rainsty:7077\") \\ .config('spark.executor.num', '1')\\ .config('spark.executor.memory', '512m')\\ .config(\"spark.executor.cores\", '1')\\ .config('spark.cores.max', '1')\\",
"@time: 2020-01-04 18:36:57 @description: \"\"\" import os from pyspark.sql import",
"'/root/jdk' os.environ['SPARK_HOME'] = '/root/spark' os.environ['PYTHON_HOME'] = \"/root/python\" os.environ['PYSPARK_PYTHON'] = \"/usr/bin/python\"",
"os.environ['SPARK_HOME'] = '/root/spark' os.environ['PYTHON_HOME'] = \"/root/python\" os.environ['PYSPARK_PYTHON'] = \"/usr/bin/python\" os.environ['SPARK_MASTER_IP']",
"coding: utf-8 -*- \"\"\" @author: rainsty @file: test.py @time: 2020-01-04",
"= logData.filter(logData.value.contains('a')).count() numBs = logData.filter(logData.value.contains('b')).count() print(\"Lines with a: %i, lines",
"\"/usr/bin/python\" os.environ['SPARK_MASTER_IP'] = 'rainsty' def create_spark_context(): sc = SparkSession.builder \\",
"= 'rainsty' def create_spark_context(): sc = SparkSession.builder \\ .appName(\"TestSparkSession\") \\",
".config('spark.executor.num', '1')\\ .config('spark.executor.memory', '512m')\\ .config(\"spark.executor.cores\", '1')\\ .config('spark.cores.max', '1')\\ .config('spark.driver.memory', '512m')",
"= SparkSession.builder \\ .appName(\"TestSparkSession\") \\ .master(\"spark://rainsty:7077\") \\ .config('spark.executor.num', '1')\\ .config('spark.executor.memory',",
"= '/root/spark' os.environ['PYTHON_HOME'] = \"/root/python\" os.environ['PYSPARK_PYTHON'] = \"/usr/bin/python\" os.environ['SPARK_MASTER_IP'] =",
"'512m') \\ .getOrCreate() return sc logFile = \"/root/spark/README.md\" spark =",
"return sc logFile = \"/root/spark/README.md\" spark = create_spark_context() logData =",
"numAs = logData.filter(logData.value.contains('a')).count() numBs = logData.filter(logData.value.contains('b')).count() print(\"Lines with a: %i,",
"'rainsty' def create_spark_context(): sc = SparkSession.builder \\ .appName(\"TestSparkSession\") \\ .master(\"spark://rainsty:7077\")",
"'1')\\ .config('spark.cores.max', '1')\\ .config('spark.driver.memory', '512m') \\ .getOrCreate() return sc logFile",
"utf-8 -*- \"\"\" @author: rainsty @file: test.py @time: 2020-01-04 18:36:57",
"'1')\\ .config('spark.driver.memory', '512m') \\ .getOrCreate() return sc logFile = \"/root/spark/README.md\"",
"#!/usr/bin/python # -*- coding: utf-8 -*- \"\"\" @author: rainsty @file:",
"= \"/root/spark/README.md\" spark = create_spark_context() logData = spark.read.text(logFile).cache() numAs =",
"SparkSession.builder \\ .appName(\"TestSparkSession\") \\ .master(\"spark://rainsty:7077\") \\ .config('spark.executor.num', '1')\\ .config('spark.executor.memory', '512m')\\",
"# -*- coding: utf-8 -*- \"\"\" @author: rainsty @file: test.py",
"import SparkSession os.environ['JAVA_HOME'] = '/root/jdk' os.environ['SPARK_HOME'] = '/root/spark' os.environ['PYTHON_HOME'] =",
"os.environ['PYTHON_HOME'] = \"/root/python\" os.environ['PYSPARK_PYTHON'] = \"/usr/bin/python\" os.environ['SPARK_MASTER_IP'] = 'rainsty' def",
"-*- \"\"\" @author: rainsty @file: test.py @time: 2020-01-04 18:36:57 @description:",
"\\ .appName(\"TestSparkSession\") \\ .master(\"spark://rainsty:7077\") \\ .config('spark.executor.num', '1')\\ .config('spark.executor.memory', '512m')\\ .config(\"spark.executor.cores\",",
"'/root/spark' os.environ['PYTHON_HOME'] = \"/root/python\" os.environ['PYSPARK_PYTHON'] = \"/usr/bin/python\" os.environ['SPARK_MASTER_IP'] = 'rainsty'",
"\"\"\" import os from pyspark.sql import SparkSession os.environ['JAVA_HOME'] = '/root/jdk'",
".getOrCreate() return sc logFile = \"/root/spark/README.md\" spark = create_spark_context() logData",
"rainsty @file: test.py @time: 2020-01-04 18:36:57 @description: \"\"\" import os",
"spark = create_spark_context() logData = spark.read.text(logFile).cache() numAs = logData.filter(logData.value.contains('a')).count() numBs",
"SparkSession os.environ['JAVA_HOME'] = '/root/jdk' os.environ['SPARK_HOME'] = '/root/spark' os.environ['PYTHON_HOME'] = \"/root/python\"",
"print(\"Lines with a: %i, lines with b: %i\" % (numAs,",
"numBs = logData.filter(logData.value.contains('b')).count() print(\"Lines with a: %i, lines with b:",
"import os from pyspark.sql import SparkSession os.environ['JAVA_HOME'] = '/root/jdk' os.environ['SPARK_HOME']",
"= '/root/jdk' os.environ['SPARK_HOME'] = '/root/spark' os.environ['PYTHON_HOME'] = \"/root/python\" os.environ['PYSPARK_PYTHON'] =",
".config(\"spark.executor.cores\", '1')\\ .config('spark.cores.max', '1')\\ .config('spark.driver.memory', '512m') \\ .getOrCreate() return sc",
"= create_spark_context() logData = spark.read.text(logFile).cache() numAs = logData.filter(logData.value.contains('a')).count() numBs =",
"@author: rainsty @file: test.py @time: 2020-01-04 18:36:57 @description: \"\"\" import",
"os.environ['SPARK_MASTER_IP'] = 'rainsty' def create_spark_context(): sc = SparkSession.builder \\ .appName(\"TestSparkSession\")",
"test.py @time: 2020-01-04 18:36:57 @description: \"\"\" import os from pyspark.sql",
".appName(\"TestSparkSession\") \\ .master(\"spark://rainsty:7077\") \\ .config('spark.executor.num', '1')\\ .config('spark.executor.memory', '512m')\\ .config(\"spark.executor.cores\", '1')\\",
"<filename>BigData/sparkTask/test.py #!/usr/bin/python # -*- coding: utf-8 -*- \"\"\" @author: rainsty",
"create_spark_context(): sc = SparkSession.builder \\ .appName(\"TestSparkSession\") \\ .master(\"spark://rainsty:7077\") \\ .config('spark.executor.num',",
"\\ .getOrCreate() return sc logFile = \"/root/spark/README.md\" spark = create_spark_context()",
"@description: \"\"\" import os from pyspark.sql import SparkSession os.environ['JAVA_HOME'] =",
"with a: %i, lines with b: %i\" % (numAs, numBs))",
"a: %i, lines with b: %i\" % (numAs, numBs)) spark.stop()",
"logFile = \"/root/spark/README.md\" spark = create_spark_context() logData = spark.read.text(logFile).cache() numAs",
"create_spark_context() logData = spark.read.text(logFile).cache() numAs = logData.filter(logData.value.contains('a')).count() numBs = logData.filter(logData.value.contains('b')).count()",
"18:36:57 @description: \"\"\" import os from pyspark.sql import SparkSession os.environ['JAVA_HOME']",
"os from pyspark.sql import SparkSession os.environ['JAVA_HOME'] = '/root/jdk' os.environ['SPARK_HOME'] =",
"'512m')\\ .config(\"spark.executor.cores\", '1')\\ .config('spark.cores.max', '1')\\ .config('spark.driver.memory', '512m') \\ .getOrCreate() return",
"sc logFile = \"/root/spark/README.md\" spark = create_spark_context() logData = spark.read.text(logFile).cache()",
"pyspark.sql import SparkSession os.environ['JAVA_HOME'] = '/root/jdk' os.environ['SPARK_HOME'] = '/root/spark' os.environ['PYTHON_HOME']",
"from pyspark.sql import SparkSession os.environ['JAVA_HOME'] = '/root/jdk' os.environ['SPARK_HOME'] = '/root/spark'",
"logData.filter(logData.value.contains('a')).count() numBs = logData.filter(logData.value.contains('b')).count() print(\"Lines with a: %i, lines with",
"\"\"\" @author: rainsty @file: test.py @time: 2020-01-04 18:36:57 @description: \"\"\"",
"'1')\\ .config('spark.executor.memory', '512m')\\ .config(\"spark.executor.cores\", '1')\\ .config('spark.cores.max', '1')\\ .config('spark.driver.memory', '512m') \\",
"= \"/usr/bin/python\" os.environ['SPARK_MASTER_IP'] = 'rainsty' def create_spark_context(): sc = SparkSession.builder",
"= \"/root/python\" os.environ['PYSPARK_PYTHON'] = \"/usr/bin/python\" os.environ['SPARK_MASTER_IP'] = 'rainsty' def create_spark_context():",
"def create_spark_context(): sc = SparkSession.builder \\ .appName(\"TestSparkSession\") \\ .master(\"spark://rainsty:7077\") \\",
".config('spark.cores.max', '1')\\ .config('spark.driver.memory', '512m') \\ .getOrCreate() return sc logFile =",
"\\ .config('spark.executor.num', '1')\\ .config('spark.executor.memory', '512m')\\ .config(\"spark.executor.cores\", '1')\\ .config('spark.cores.max', '1')\\ .config('spark.driver.memory',",
"= spark.read.text(logFile).cache() numAs = logData.filter(logData.value.contains('a')).count() numBs = logData.filter(logData.value.contains('b')).count() print(\"Lines with",
"-*- coding: utf-8 -*- \"\"\" @author: rainsty @file: test.py @time:",
"spark.read.text(logFile).cache() numAs = logData.filter(logData.value.contains('a')).count() numBs = logData.filter(logData.value.contains('b')).count() print(\"Lines with a:"
] |
[
"il numero di intervalli in [0.0, 1.0] ') deltaIntervallo =",
"yIntervalli print \"area numpy = \", npArea print \"old timing",
"1.0) npArea = np.sum(yNPIntervalli*deltaIntervallo) endNP = timeit.default_timer() # print xNPIntervalli",
"yNPIntervalli # print yIntervalli print \"area numpy = \", npArea",
"(xNPIntervalli - 1.0) npArea = np.sum(yNPIntervalli*deltaIntervallo) endNP = timeit.default_timer() #",
"di intervalli in [0.0, 1.0] ') deltaIntervallo = 1.0 /",
"numero di intervalli in [0.0, 1.0] ') deltaIntervallo = 1.0",
"0.0 for altezza in yIntervalli: areaSottesa += altezza * deltaIntervallo",
"deltaIntervallo start = timeit.default_timer() xIntervalli = [] yIntervalli = []",
"deltaIntervallo endOld = timeit.default_timer() print \"l'area sottesa dalla curva vale",
"yIntervalli = [] i = 0 while i < numIntervalli:",
"np.sum(yNPIntervalli*deltaIntervallo) endNP = timeit.default_timer() # print xNPIntervalli # print xIntervalli",
"in yIntervalli: areaSottesa += altezza * deltaIntervallo endOld = timeit.default_timer()",
"* (x - 1.0) return y numIntervalli = input('inserire il",
"xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli, endpoint=False) yNPIntervalli = -xNPIntervalli *",
"altezza * deltaIntervallo endOld = timeit.default_timer() print \"l'area sottesa dalla",
"xIntervalli # print yNPIntervalli # print yIntervalli print \"area numpy",
"areaSottesa xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli, endpoint=False) yNPIntervalli = -xNPIntervalli",
"float(numIntervalli) print \"larghezza intervallo\", deltaIntervallo start = timeit.default_timer() xIntervalli =",
"= -xNPIntervalli * (xNPIntervalli - 1.0) npArea = np.sum(yNPIntervalli*deltaIntervallo) endNP",
"print \"l'area sottesa dalla curva vale \", areaSottesa xNPIntervalli =",
"print xIntervalli # print yNPIntervalli # print yIntervalli print \"area",
"1.0] ') deltaIntervallo = 1.0 / float(numIntervalli) print \"larghezza intervallo\",",
"\", npArea print \"old timing = \", endOld - start,",
"print \"old timing = \", endOld - start, \"numPy timing",
"[] i = 0 while i < numIntervalli: xIntervallo =",
"import timeit def effe(x): y = -x * (x -",
"xIntervallo = i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i += 1 areaSottesa =",
"return y numIntervalli = input('inserire il numero di intervalli in",
"# print yIntervalli print \"area numpy = \", npArea print",
"areaSottesa = 0.0 for altezza in yIntervalli: areaSottesa += altezza",
"= np.linspace(0.0, 1.0, numIntervalli, endpoint=False) yNPIntervalli = -xNPIntervalli * (xNPIntervalli",
"\"l'area sottesa dalla curva vale \", areaSottesa xNPIntervalli = np.linspace(0.0,",
"yIntervalli: areaSottesa += altezza * deltaIntervallo endOld = timeit.default_timer() print",
"timeit.default_timer() print \"l'area sottesa dalla curva vale \", areaSottesa xNPIntervalli",
"= timeit.default_timer() # print xNPIntervalli # print xIntervalli # print",
"import numpy as np import timeit def effe(x): y =",
"i < numIntervalli: xIntervallo = i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i +=",
"xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i += 1 areaSottesa = 0.0 for altezza",
"/ float(numIntervalli) print \"larghezza intervallo\", deltaIntervallo start = timeit.default_timer() xIntervalli",
"0 while i < numIntervalli: xIntervallo = i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo))",
"# print xNPIntervalli # print xIntervalli # print yNPIntervalli #",
"* deltaIntervallo endOld = timeit.default_timer() print \"l'area sottesa dalla curva",
"= \", npArea print \"old timing = \", endOld -",
"endNP = timeit.default_timer() # print xNPIntervalli # print xIntervalli #",
"= -x * (x - 1.0) return y numIntervalli =",
"1.0 / float(numIntervalli) print \"larghezza intervallo\", deltaIntervallo start = timeit.default_timer()",
"+= altezza * deltaIntervallo endOld = timeit.default_timer() print \"l'area sottesa",
"as np import timeit def effe(x): y = -x *",
"= input('inserire il numero di intervalli in [0.0, 1.0] ')",
"def effe(x): y = -x * (x - 1.0) return",
"1.0) return y numIntervalli = input('inserire il numero di intervalli",
"xIntervalli = [] yIntervalli = [] i = 0 while",
"= [] i = 0 while i < numIntervalli: xIntervallo",
"npArea = np.sum(yNPIntervalli*deltaIntervallo) endNP = timeit.default_timer() # print xNPIntervalli #",
"timeit.default_timer() # print xNPIntervalli # print xIntervalli # print yNPIntervalli",
"= i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i += 1 areaSottesa = 0.0",
"print yNPIntervalli # print yIntervalli print \"area numpy = \",",
"= [] yIntervalli = [] i = 0 while i",
"-x * (x - 1.0) return y numIntervalli = input('inserire",
"curva vale \", areaSottesa xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli, endpoint=False)",
"i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i += 1 areaSottesa = 0.0 for",
"endOld = timeit.default_timer() print \"l'area sottesa dalla curva vale \",",
"print xNPIntervalli # print xIntervalli # print yNPIntervalli # print",
"in [0.0, 1.0] ') deltaIntervallo = 1.0 / float(numIntervalli) print",
"< numIntervalli: xIntervallo = i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i += 1",
"areaSottesa += altezza * deltaIntervallo endOld = timeit.default_timer() print \"l'area",
"dalla curva vale \", areaSottesa xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli,",
"\"larghezza intervallo\", deltaIntervallo start = timeit.default_timer() xIntervalli = [] yIntervalli",
"sottesa dalla curva vale \", areaSottesa xNPIntervalli = np.linspace(0.0, 1.0,",
"xNPIntervalli # print xIntervalli # print yNPIntervalli # print yIntervalli",
"np.linspace(0.0, 1.0, numIntervalli, endpoint=False) yNPIntervalli = -xNPIntervalli * (xNPIntervalli -",
"\"old timing = \", endOld - start, \"numPy timing =",
"start = timeit.default_timer() xIntervalli = [] yIntervalli = [] i",
"* (xNPIntervalli - 1.0) npArea = np.sum(yNPIntervalli*deltaIntervallo) endNP = timeit.default_timer()",
"print \"area numpy = \", npArea print \"old timing =",
"= np.sum(yNPIntervalli*deltaIntervallo) endNP = timeit.default_timer() # print xNPIntervalli # print",
"<reponame>gdv/python-alfabetizzazione<filename>esercizi/areaSottesaCompareNumPy.py import numpy as np import timeit def effe(x): y",
"i = 0 while i < numIntervalli: xIntervallo = i*deltaIntervallo",
"[0.0, 1.0] ') deltaIntervallo = 1.0 / float(numIntervalli) print \"larghezza",
"altezza in yIntervalli: areaSottesa += altezza * deltaIntervallo endOld =",
"-xNPIntervalli * (xNPIntervalli - 1.0) npArea = np.sum(yNPIntervalli*deltaIntervallo) endNP =",
"npArea print \"old timing = \", endOld - start, \"numPy",
"\", endOld - start, \"numPy timing = \", endNP -",
"[] yIntervalli = [] i = 0 while i <",
"# print xIntervalli # print yNPIntervalli # print yIntervalli print",
"= 1.0 / float(numIntervalli) print \"larghezza intervallo\", deltaIntervallo start =",
"= \", endOld - start, \"numPy timing = \", endNP",
"intervallo\", deltaIntervallo start = timeit.default_timer() xIntervalli = [] yIntervalli =",
"(x - 1.0) return y numIntervalli = input('inserire il numero",
"# print yNPIntervalli # print yIntervalli print \"area numpy =",
"effe(x): y = -x * (x - 1.0) return y",
"numIntervalli, endpoint=False) yNPIntervalli = -xNPIntervalli * (xNPIntervalli - 1.0) npArea",
"= timeit.default_timer() xIntervalli = [] yIntervalli = [] i =",
"y = -x * (x - 1.0) return y numIntervalli",
"= 0.0 for altezza in yIntervalli: areaSottesa += altezza *",
"input('inserire il numero di intervalli in [0.0, 1.0] ') deltaIntervallo",
"endpoint=False) yNPIntervalli = -xNPIntervalli * (xNPIntervalli - 1.0) npArea =",
"y numIntervalli = input('inserire il numero di intervalli in [0.0,",
"while i < numIntervalli: xIntervallo = i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i",
"\"area numpy = \", npArea print \"old timing = \",",
"timeit def effe(x): y = -x * (x - 1.0)",
"vale \", areaSottesa xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli, endpoint=False) yNPIntervalli",
"deltaIntervallo = 1.0 / float(numIntervalli) print \"larghezza intervallo\", deltaIntervallo start",
"yNPIntervalli = -xNPIntervalli * (xNPIntervalli - 1.0) npArea = np.sum(yNPIntervalli*deltaIntervallo)",
"- 1.0) return y numIntervalli = input('inserire il numero di",
"timeit.default_timer() xIntervalli = [] yIntervalli = [] i = 0",
"intervalli in [0.0, 1.0] ') deltaIntervallo = 1.0 / float(numIntervalli)",
"1.0, numIntervalli, endpoint=False) yNPIntervalli = -xNPIntervalli * (xNPIntervalli - 1.0)",
"endOld - start, \"numPy timing = \", endNP - endOld",
"i += 1 areaSottesa = 0.0 for altezza in yIntervalli:",
"- 1.0) npArea = np.sum(yNPIntervalli*deltaIntervallo) endNP = timeit.default_timer() # print",
"numpy as np import timeit def effe(x): y = -x",
"numpy = \", npArea print \"old timing = \", endOld",
"= timeit.default_timer() print \"l'area sottesa dalla curva vale \", areaSottesa",
"1 areaSottesa = 0.0 for altezza in yIntervalli: areaSottesa +=",
"') deltaIntervallo = 1.0 / float(numIntervalli) print \"larghezza intervallo\", deltaIntervallo",
"= 0 while i < numIntervalli: xIntervallo = i*deltaIntervallo xIntervalli.append(xIntervallo)",
"yIntervalli.append(effe(xIntervallo)) i += 1 areaSottesa = 0.0 for altezza in",
"\", areaSottesa xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli, endpoint=False) yNPIntervalli =",
"for altezza in yIntervalli: areaSottesa += altezza * deltaIntervallo endOld",
"print \"larghezza intervallo\", deltaIntervallo start = timeit.default_timer() xIntervalli = []",
"print yIntervalli print \"area numpy = \", npArea print \"old",
"timing = \", endOld - start, \"numPy timing = \",",
"numIntervalli: xIntervallo = i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i += 1 areaSottesa",
"+= 1 areaSottesa = 0.0 for altezza in yIntervalli: areaSottesa",
"numIntervalli = input('inserire il numero di intervalli in [0.0, 1.0]",
"np import timeit def effe(x): y = -x * (x"
] |
[
"the type marshalling \"\"\" return ast.ListComp( elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")], keywords=[],",
"for keyword in node_assign.value.keywords if keyword.arg == \"default\" ][0] value.args.append(default_value)",
"ast.arguments( args=[ast.arg(arg=\"self\", annotation=None)], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) fn_body",
"property -> self.prop = parent_dict[\"prop\"] Optional property -> self.prop =",
"is primitive, we can simply get it Marshmallow will do",
"constructor class_body = [ ast.FunctionDef( name=\"__init__\", args=fn_arguments, body=fn_body, decorator_list=[], returns=None",
"value @staticmethod def _get_default_for_property(node_assign, value, object_, prop): for node in",
"else: # Assign the property as self.prop = table.get(\"prop\") value",
") ], ) @staticmethod def _get_key_from_object(object_, prop): return ast.Call( func=ast.Attribute(value=ast.Name(id=object_),",
"ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop) else: # Assign the property as self.prop",
"func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop)], keywords=[], ) @staticmethod def _hint_required_property(node_assign, value, object_,",
"decorator_list=[], returns=None ), ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema), ] return ast.ClassDef(name=name, bases=[],",
"return value else: return value @staticmethod def assign_property(node_assign, object_): \"\"\"",
"\"Nested\": return class_name(node.args[0].id) @staticmethod def _non_primitive_nested_list(node_assign): if node_assign.value.func.attr == \"List\":",
"node_assign.value.func.attr == \"List\": return ( len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr",
"can simply get it Marshmallow will do the type marshalling",
"for node in ast.walk(node_assign): if isinstance(node, ast.keyword): if \"required\" in",
"ast.arg(arg=\"only\", annotation=None), ], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[ast.NameConstant(value=None)], ) fn_body",
"iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])], keywords=[], ), ifs=[], is_async=0,",
"decorator_list=[], returns=None ) return _construct_to_helper @staticmethod def construct_from_json(schema): fn_args =",
"or dict supported\") def _construct_to_helper(schema): fn_args = ast.arguments( args=[ast.arg(arg=\"self\", annotation=None)],",
"return value for node in ast.walk(node_assign): if isinstance(node, ast.keyword) and",
"dict supported\") def _construct_to_helper(schema): fn_args = ast.arguments( args=[ast.arg(arg=\"self\", annotation=None)], vararg=None,",
"args=[ast.Str(s=prop)], keywords=[], ) @staticmethod def _hint_required_property(node_assign, value, object_, prop): for",
") ], ), attr=method, ), args=[ast.Name(id=\"self\")], keywords=[], ), attr=\"data\", )",
"keywords=[], ) @staticmethod def _hint_required_property(node_assign, value, object_, prop): for node",
"decorator_list=[], keywords=[]) @staticmethod def _construct_to_(output): if output == \"json\": method",
"= ast.arguments( args=[ ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\", annotation=None), ], vararg=None, kwonlyargs=[],",
"prop) value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop) return ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"),",
"args=[ast.arg(arg=\"self\", annotation=None)], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) fn_body =",
"kwarg=None, kwonlyargs=[], kw_defaults=[], defaults=[], ) fn_body = [ ObjectGenerator.assign_property(node, name_lower)",
"attr=\"data\", ) ) ] return ast.FunctionDef( name=\"from_json\", args=fn_args, body=fn_body, decorator_list=[ast.Name(id=\"staticmethod\")],",
"if ObjectGenerator._non_primitive_nested_list(node_assign): value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop) else: # Assign",
"node in ast.walk(node_assign): if isinstance(node, ast.Call): if node.func.attr == \"Nested\":",
"ast from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name class ObjectGenerator(object): @staticmethod def",
"if isinstance(node, ast.keyword) and node.arg == \"required\": return value for",
"value.args.append(default_value) return value else: return value @staticmethod def assign_property(node_assign, object_):",
"name.lower() # Bundle function arguments and keywords fn_arguments = ast.arguments(",
"kwonlyargs=[], kw_defaults=[], defaults=[], ) fn_body = [ ObjectGenerator.assign_property(node, name_lower) for",
"> 0 and node_assign.value.args[0].func.attr == \"Nested\" ) else: return False",
"annotation=None), ], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[ast.NameConstant(value=None)], ) fn_body =",
"@staticmethod def _nesting_class(node_assign): for node in ast.walk(node_assign): if isinstance(node, ast.Call):",
"value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop) return ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop),",
"parent_dict.get(\"prop\") Primative nested list -> self.prop = parent_dict.get(\"prop\") Non-primative nested",
"ObjectGenerator._hint_required_property(node_assign, value, object_, prop) value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop)",
"node in ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg == \"default\":",
"][0] value.args.append(default_value) return value else: return value @staticmethod def assign_property(node_assign,",
"args=[], keywords=[ ast.keyword( arg=\"strict\", value=ast.NameConstant(value=True) ) ], ), attr=method, ),",
"for node in ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg ==",
") fn_body = [ ast.Return( value=ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name),",
"_nesting_class(node_assign): for node in ast.walk(node_assign): if isinstance(node, ast.Call): if node.func.attr",
"\"json\": method = \"dumps\" elif output == \"dict\": method =",
"@staticmethod def _hint_required_property(node_assign, value, object_, prop): for node in ast.walk(node_assign):",
"@staticmethod def _construct_to_(output): if output == \"json\": method = \"dumps\"",
"keywords=[ ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")), ], ), attr=\"loads\", ), args=[ast.Name(id=\"json\")],",
"for el in parent_dict.get('props', {})] \"\"\" prop = ObjectGenerator._get_property_name(node_assign) if",
"ast.ListComp( elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")], keywords=[], ), generators=[ ast.comprehension( target=ast.Name(id=\"el\"), iter=ast.Call(",
"( len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == \"Nested\" ) else:",
"len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == \"Nested\" ) else: return",
"kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) fn_body = [ ast.Return( value=ast.Attribute(",
"value else: return value @staticmethod def assign_property(node_assign, object_): \"\"\" Required",
"keywords=[], ), generators=[ ast.comprehension( target=ast.Name(id=\"el\"), iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop), ast.Dict(keys=[],",
"ObjectGenerator(object): @staticmethod def _get_property_name(node_assign): name = node_assign.targets[0] return name.id @staticmethod",
"ast.keyword) and node.arg == \"default\": default_value = [ keyword.value for",
"self.prop = table.get(\"prop\") value = ObjectGenerator._get_key_from_object(object_, prop) # If the",
"] # pass if no Assign nodes if len(fn_body) ==",
"-> self.prop = parent_dict.get(\"prop\") Non-primative nested list -> self.props =",
"vararg=None, kwarg=None, kwonlyargs=[], kw_defaults=[], defaults=[], ) fn_body = [ ObjectGenerator.assign_property(node,",
"value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop)) ) return value @staticmethod def _get_default_for_property(node_assign, value, object_,",
"vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) fn_body = [ ast.Return(",
"attr=\"get\"), args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])], keywords=[], ), ifs=[], is_async=0, ) ],",
"value, object_, prop) return ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop), value=value, simple=0, annotation=Annotations(node_assign).type,",
"def _get_property_name(node_assign): name = node_assign.targets[0] return name.id @staticmethod def _nesting_class(node_assign):",
"in a list comp If the nest is primitive, we",
"attr=prop), value=value, simple=0, annotation=Annotations(node_assign).type, ) @staticmethod def construct_class(schema): name =",
"@staticmethod def construct_from_json(schema): fn_args = ast.arguments( args=[ ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\",",
"keywords fn_arguments = ast.arguments( args=[ ast.arg(arg=\"self\", annotation=None), ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")), ],",
"def _nesting_class(node_assign): for node in ast.walk(node_assign): if isinstance(node, ast.Call): if",
"[ ast.Return( ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword(arg=\"strict\",",
"func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])], keywords=[], ), ifs=[], is_async=0, )",
"value, object_, prop): for node in ast.walk(node_assign): if isinstance(node, ast.keyword):",
"schema.body if isinstance(node, ast.Assign) ] # pass if no Assign",
"args=[ast.Name(id=\"json\")], keywords=[], ), attr=\"data\", ) ) ] return ast.FunctionDef( name=\"from_json\",",
"is_async=0, ) ], ) @staticmethod def _get_key_from_object(object_, prop): return ast.Call(",
"class_body = [ ast.FunctionDef( name=\"__init__\", args=fn_arguments, body=fn_body, decorator_list=[], returns=None ),",
"list -> self.prop = parent_dict.get(\"prop\") Non-primative nested list -> self.props",
"value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword( arg=\"strict\", value=ast.NameConstant(value=True) ) ], ),",
"ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")), ], ), attr=\"loads\", ), args=[ast.Name(id=\"json\")], keywords=[],",
"ast.comprehension( target=ast.Name(id=\"el\"), iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])], keywords=[], ),",
"] return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[]) @staticmethod def _construct_to_(output):",
"fn_args = ast.arguments( args=[ast.arg(arg=\"self\", annotation=None)], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[],",
"== \"Nested\" ) else: return False @staticmethod def _init_non_primitive_nested_class(node_assign, object_,",
"nested list -> self.props = [PropertyClass(el) for el in parent_dict.get('props',",
") ) ] return ast.FunctionDef( name=\"from_json\", args=fn_args, body=fn_body, decorator_list=[ast.Name(id=\"staticmethod\")], returns=None,",
"node.arg == \"required\": return value for node in ast.walk(node_assign): if",
"= ObjectGenerator._get_property_name(node_assign) if ObjectGenerator._non_primitive_nested_list(node_assign): value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop) else:",
"args=[ ast.arg(arg=\"self\", annotation=None), ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")), ], vararg=None, kwarg=None, kwonlyargs=[], kw_defaults=[],",
"ObjectGenerator._get_property_name(node_assign) if ObjectGenerator._non_primitive_nested_list(node_assign): value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop) else: #",
"annotation=None), ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")), ], vararg=None, kwarg=None, kwonlyargs=[], kw_defaults=[], defaults=[], )",
"object_, prop): for node in ast.walk(node_assign): if isinstance(node, ast.keyword) and",
"# pass if no Assign nodes if len(fn_body) == 0:",
"Assign the property as self.prop = table.get(\"prop\") value = ObjectGenerator._get_key_from_object(object_,",
"ObjectGenerator.assign_property(node, name_lower) for node in schema.body if isinstance(node, ast.Assign) ]",
"prop): \"\"\" If the nested list is non-primitive, initialise sub-classes",
"@staticmethod def assign_property(node_assign, object_): \"\"\" Required property -> self.prop =",
"output == \"json\": method = \"dumps\" elif output == \"dict\":",
"isinstance(node, ast.keyword): if \"required\" in node.arg: value = ast.Subscript( value=ast.Name(id=object_),",
"@staticmethod def _init_non_primitive_nested_class(node_assign, object_, prop): \"\"\" If the nested list",
"= \"dump\" else: raise NotImplementedError(\"Only deserialisation to json or dict",
"ast.arg(arg=\"self\", annotation=None), ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")), ], vararg=None, kwarg=None, kwonlyargs=[], kw_defaults=[], defaults=[],",
"\"dict\": method = \"dump\" else: raise NotImplementedError(\"Only deserialisation to json",
"as self.prop = table.get(\"prop\") value = ObjectGenerator._get_key_from_object(object_, prop) # If",
"ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[]) @staticmethod def _construct_to_(output): if output",
"we can simply get it Marshmallow will do the type",
"arguments and keywords fn_arguments = ast.arguments( args=[ ast.arg(arg=\"self\", annotation=None), ast.arg(arg=name_lower,",
"return name.id @staticmethod def _nesting_class(node_assign): for node in ast.walk(node_assign): if",
"def construct_from_json(schema): fn_args = ast.arguments( args=[ ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\", annotation=None),",
") ] return ast.FunctionDef( name=\"from_json\", args=fn_args, body=fn_body, decorator_list=[ast.Name(id=\"staticmethod\")], returns=None, )",
"= [PropertyClass(el) for el in parent_dict.get('props', {})] \"\"\" prop =",
"ast.FunctionDef( name=\"__init__\", args=fn_arguments, body=fn_body, decorator_list=[], returns=None ), ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema),",
"do the type marshalling \"\"\" return ast.ListComp( elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")],",
"== \"dict\": method = \"dump\" else: raise NotImplementedError(\"Only deserialisation to",
"return ast.FunctionDef( name=f\"to_{output}\", args=fn_args, body=fn_body, decorator_list=[], returns=None ) return _construct_to_helper",
"name = node_assign.targets[0] return name.id @staticmethod def _nesting_class(node_assign): for node",
"= ObjectGenerator._hint_required_property(node_assign, value, object_, prop) value = ObjectGenerator._get_default_for_property(node_assign, value, object_,",
") return _construct_to_helper @staticmethod def construct_from_json(schema): fn_args = ast.arguments( args=[",
"body=class_body, decorator_list=[], keywords=[]) @staticmethod def _construct_to_(output): if output == \"json\":",
"class_name(schema.name) name_lower = name.lower() # Bundle function arguments and keywords",
"value=ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword( arg=\"strict\", value=ast.NameConstant(value=True)",
"will do the type marshalling \"\"\" return ast.ListComp( elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)),",
"[PropertyClass(el) for el in parent_dict.get('props', {})] \"\"\" prop = ObjectGenerator._get_property_name(node_assign)",
"isinstance(node, ast.keyword) and node.arg == \"default\": default_value = [ keyword.value",
"_hint_required_property(node_assign, value, object_, prop): for node in ast.walk(node_assign): if isinstance(node,",
") fn_body = [ ObjectGenerator.assign_property(node, name_lower) for node in schema.body",
"_get_property_name(node_assign): name = node_assign.targets[0] return name.id @staticmethod def _nesting_class(node_assign): for",
"property -> self.prop = parent_dict.get(\"prop\") Primative nested list -> self.prop",
"object_, prop) else: # Assign the property as self.prop =",
"ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop), value=value, simple=0, annotation=Annotations(node_assign).type, ) @staticmethod def construct_class(schema):",
"arg=\"strict\", value=ast.NameConstant(value=True) ) ], ), attr=method, ), args=[ast.Name(id=\"self\")], keywords=[], ),",
"{})] \"\"\" prop = ObjectGenerator._get_property_name(node_assign) if ObjectGenerator._non_primitive_nested_list(node_assign): value = ObjectGenerator._init_non_primitive_nested_class(node_assign,",
"nest is primitive, we can simply get it Marshmallow will",
"), ifs=[], is_async=0, ) ], ) @staticmethod def _get_key_from_object(object_, prop):",
"0: fn_body = [ast.Pass()] # Generate class constructor class_body =",
"args=fn_args, body=fn_body, decorator_list=[], returns=None ) return _construct_to_helper @staticmethod def construct_from_json(schema):",
"target=ast.Name(id=\"el\"), iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])], keywords=[], ), ifs=[],",
"ast.Return( ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)),",
"def _get_key_from_object(object_, prop): return ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop)], keywords=[], )",
"Marshmallow will do the type marshalling \"\"\" return ast.ListComp( elt=ast.Call(",
"return value @staticmethod def _get_default_for_property(node_assign, value, object_, prop): for node",
"def _construct_to_helper(schema): fn_args = ast.arguments( args=[ast.arg(arg=\"self\", annotation=None)], vararg=None, kwonlyargs=[], kw_defaults=[],",
"prop): return ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop)], keywords=[], ) @staticmethod def",
"== \"json\": method = \"dumps\" elif output == \"dict\": method",
"@staticmethod def construct_class(schema): name = class_name(schema.name) name_lower = name.lower() #",
"required, assign as self.prop = table[\"prop\"] value = ObjectGenerator._hint_required_property(node_assign, value,",
"simple=0, annotation=Annotations(node_assign).type, ) @staticmethod def construct_class(schema): name = class_name(schema.name) name_lower",
"_non_primitive_nested_list(node_assign): if node_assign.value.func.attr == \"List\": return ( len(node_assign.value.args) > 0",
"assign_property(node_assign, object_): \"\"\" Required property -> self.prop = parent_dict[\"prop\"] Optional",
"[ ast.Return( value=ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword(",
"value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")),",
") @staticmethod def _hint_required_property(node_assign, value, object_, prop): for node in",
"non-primitive, initialise sub-classes in a list comp If the nest",
"initialise sub-classes in a list comp If the nest is",
"in schema.body if isinstance(node, ast.Assign) ] # pass if no",
"return ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop)], keywords=[], ) @staticmethod def _hint_required_property(node_assign,",
"else: return value @staticmethod def assign_property(node_assign, object_): \"\"\" Required property",
"prop) return ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop), value=value, simple=0, annotation=Annotations(node_assign).type, ) @staticmethod",
"def _hint_required_property(node_assign, value, object_, prop): for node in ast.walk(node_assign): if",
") else: return False @staticmethod def _init_non_primitive_nested_class(node_assign, object_, prop): \"\"\"",
"if node_assign.value.func.attr == \"List\": return ( len(node_assign.value.args) > 0 and",
"return _construct_to_helper @staticmethod def construct_from_json(schema): fn_args = ast.arguments( args=[ ast.arg(arg=\"json\",",
"If the nest is primitive, we can simply get it",
"in ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg == \"required\": return",
"Non-primative nested list -> self.props = [PropertyClass(el) for el in",
"name = class_name(schema.name) name_lower = name.lower() # Bundle function arguments",
"defaults=[], ) fn_body = [ ast.Return( value=ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call(",
"node_assign.value.keywords if keyword.arg == \"default\" ][0] value.args.append(default_value) return value else:",
"@staticmethod def _get_default_for_property(node_assign, value, object_, prop): for node in ast.walk(node_assign):",
"table.get(\"prop\") value = ObjectGenerator._get_key_from_object(object_, prop) # If the property is",
"returns=None ) return _construct_to_helper @staticmethod def construct_from_json(schema): fn_args = ast.arguments(",
"target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop), value=value, simple=0, annotation=Annotations(node_assign).type, ) @staticmethod def construct_class(schema): name",
"ObjectGenerator._get_default_for_property(node_assign, value, object_, prop) return ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop), value=value, simple=0,",
"ast.Dict(keys=[], values=[])], keywords=[], ), ifs=[], is_async=0, ) ], ) @staticmethod",
"args=fn_arguments, body=fn_body, decorator_list=[], returns=None ), ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema), ] return",
"\"required\" in node.arg: value = ast.Subscript( value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop)) ) return",
"-> self.prop = parent_dict[\"prop\"] Optional property -> self.prop = parent_dict.get(\"prop\")",
"func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")), ], ), attr=\"loads\",",
"= [ ast.Return( ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[",
"ast.walk(node_assign): if isinstance(node, ast.Call): if node.func.attr == \"Nested\": return class_name(node.args[0].id)",
"\"default\": default_value = [ keyword.value for keyword in node_assign.value.keywords if",
"\"\"\" prop = ObjectGenerator._get_property_name(node_assign) if ObjectGenerator._non_primitive_nested_list(node_assign): value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_,",
"comp If the nest is primitive, we can simply get",
"Optional property -> self.prop = parent_dict.get(\"prop\") Primative nested list ->",
"ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\", annotation=None), ], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[ast.NameConstant(value=None)],",
"import Annotations, class_name class ObjectGenerator(object): @staticmethod def _get_property_name(node_assign): name =",
") return value @staticmethod def _get_default_for_property(node_assign, value, object_, prop): for",
"len(fn_body) == 0: fn_body = [ast.Pass()] # Generate class constructor",
"and node.arg == \"required\": return value for node in ast.walk(node_assign):",
"value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop) value = ObjectGenerator._get_default_for_property(node_assign, value,",
"in ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg == \"default\": default_value",
"parent_dict.get('props', {})] \"\"\" prop = ObjectGenerator._get_property_name(node_assign) if ObjectGenerator._non_primitive_nested_list(node_assign): value =",
"kwarg=None, defaults=[ast.NameConstant(value=None)], ) fn_body = [ ast.Return( ast.Attribute( value=ast.Call( func=ast.Attribute(",
"= ObjectGenerator._get_default_for_property(node_assign, value, object_, prop) return ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop), value=value,",
"deserialisation to json or dict supported\") def _construct_to_helper(schema): fn_args =",
"node in schema.body if isinstance(node, ast.Assign) ] # pass if",
"import ast from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name class ObjectGenerator(object): @staticmethod",
"value, object_, prop): for node in ast.walk(node_assign): if isinstance(node, ast.keyword)",
"el in parent_dict.get('props', {})] \"\"\" prop = ObjectGenerator._get_property_name(node_assign) if ObjectGenerator._non_primitive_nested_list(node_assign):",
"self.prop = parent_dict.get(\"prop\") Non-primative nested list -> self.props = [PropertyClass(el)",
"a list comp If the nest is primitive, we can",
"default_value = [ keyword.value for keyword in node_assign.value.keywords if keyword.arg",
"-> self.prop = parent_dict.get(\"prop\") Primative nested list -> self.prop =",
"if isinstance(node, ast.keyword): if \"required\" in node.arg: value = ast.Subscript(",
"the property is required, assign as self.prop = table[\"prop\"] value",
"supported\") def _construct_to_helper(schema): fn_args = ast.arguments( args=[ast.arg(arg=\"self\", annotation=None)], vararg=None, kwonlyargs=[],",
"object_): \"\"\" Required property -> self.prop = parent_dict[\"prop\"] Optional property",
"in node.arg: value = ast.Subscript( value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop)) ) return value",
"0 and node_assign.value.args[0].func.attr == \"Nested\" ) else: return False @staticmethod",
"list comp If the nest is primitive, we can simply",
"list is non-primitive, initialise sub-classes in a list comp If",
"in ast.walk(node_assign): if isinstance(node, ast.keyword): if \"required\" in node.arg: value",
"def _construct_to_(output): if output == \"json\": method = \"dumps\" elif",
"args=[ast.Name(id=\"self\")], keywords=[], ), attr=\"data\", ) ) ] return ast.FunctionDef( name=f\"to_{output}\",",
"def assign_property(node_assign, object_): \"\"\" Required property -> self.prop = parent_dict[\"prop\"]",
"slice=ast.Index(value=ast.Str(s=prop)) ) return value @staticmethod def _get_default_for_property(node_assign, value, object_, prop):",
"fn_body = [ ast.Return( ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[],",
"@staticmethod def _get_property_name(node_assign): name = node_assign.targets[0] return name.id @staticmethod def",
"== \"List\": return ( len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr ==",
"name_lower = name.lower() # Bundle function arguments and keywords fn_arguments",
"the nested list is non-primitive, initialise sub-classes in a list",
"def construct_class(schema): name = class_name(schema.name) name_lower = name.lower() # Bundle",
"ast.Return( value=ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword( arg=\"strict\",",
"is non-primitive, initialise sub-classes in a list comp If the",
"# Assign the property as self.prop = table.get(\"prop\") value =",
"output == \"dict\": method = \"dump\" else: raise NotImplementedError(\"Only deserialisation",
"fn_body = [ast.Pass()] # Generate class constructor class_body = [",
"ast.keyword): if \"required\" in node.arg: value = ast.Subscript( value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop))",
"json or dict supported\") def _construct_to_helper(schema): fn_args = ast.arguments( args=[ast.arg(arg=\"self\",",
"class_name class ObjectGenerator(object): @staticmethod def _get_property_name(node_assign): name = node_assign.targets[0] return",
"= table[\"prop\"] value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop) value =",
"value=value, simple=0, annotation=Annotations(node_assign).type, ) @staticmethod def construct_class(schema): name = class_name(schema.name)",
"_init_non_primitive_nested_class(node_assign, object_, prop): \"\"\" If the nested list is non-primitive,",
"and node.arg == \"default\": default_value = [ keyword.value for keyword",
"fn_args = ast.arguments( args=[ ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\", annotation=None), ], vararg=None,",
"is required, assign as self.prop = table[\"prop\"] value = ObjectGenerator._hint_required_property(node_assign,",
"), generators=[ ast.comprehension( target=ast.Name(id=\"el\"), iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])],",
"parent_dict.get(\"prop\") Non-primative nested list -> self.props = [PropertyClass(el) for el",
"property as self.prop = table.get(\"prop\") value = ObjectGenerator._get_key_from_object(object_, prop) #",
"ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema), ] return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[]) @staticmethod",
"return False @staticmethod def _init_non_primitive_nested_class(node_assign, object_, prop): \"\"\" If the",
"body=fn_body, decorator_list=[], returns=None ) return _construct_to_helper @staticmethod def construct_from_json(schema): fn_args",
"pass if no Assign nodes if len(fn_body) == 0: fn_body",
"in ast.walk(node_assign): if isinstance(node, ast.Call): if node.func.attr == \"Nested\": return",
"_construct_to_(output): if output == \"json\": method = \"dumps\" elif output",
"as self.prop = table[\"prop\"] value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop)",
"if no Assign nodes if len(fn_body) == 0: fn_body =",
"fn_body = [ ObjectGenerator.assign_property(node, name_lower) for node in schema.body if",
"[ keyword.value for keyword in node_assign.value.keywords if keyword.arg == \"default\"",
"], ) @staticmethod def _get_key_from_object(object_, prop): return ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"),",
"@staticmethod def _get_key_from_object(object_, prop): return ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop)], keywords=[],",
"= [ ObjectGenerator.assign_property(node, name_lower) for node in schema.body if isinstance(node,",
"keyword in node_assign.value.keywords if keyword.arg == \"default\" ][0] value.args.append(default_value) return",
"name_lower) for node in schema.body if isinstance(node, ast.Assign) ] #",
"], ), attr=\"loads\", ), args=[ast.Name(id=\"json\")], keywords=[], ), attr=\"data\", ) )",
"keywords=[ ast.keyword( arg=\"strict\", value=ast.NameConstant(value=True) ) ], ), attr=method, ), args=[ast.Name(id=\"self\")],",
"ObjectGenerator._get_key_from_object(object_, prop) # If the property is required, assign as",
"node.arg: value = ast.Subscript( value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop)) ) return value @staticmethod",
"func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword( arg=\"strict\", value=ast.NameConstant(value=True) ) ],",
"NotImplementedError(\"Only deserialisation to json or dict supported\") def _construct_to_helper(schema): fn_args",
"if len(fn_body) == 0: fn_body = [ast.Pass()] # Generate class",
"keywords=[], ), attr=\"data\", ) ) ] return ast.FunctionDef( name=f\"to_{output}\", args=fn_args,",
"if isinstance(node, ast.keyword) and node.arg == \"default\": default_value = [",
"defaults=[ast.NameConstant(value=None)], ) fn_body = [ ast.Return( ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call(",
"self.prop = table[\"prop\"] value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop) value",
") ] return ast.FunctionDef( name=f\"to_{output}\", args=fn_args, body=fn_body, decorator_list=[], returns=None )",
"= node_assign.targets[0] return name.id @staticmethod def _nesting_class(node_assign): for node in",
"it Marshmallow will do the type marshalling \"\"\" return ast.ListComp(",
"= [ keyword.value for keyword in node_assign.value.keywords if keyword.arg ==",
"ast.arguments( args=[ ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\", annotation=None), ], vararg=None, kwonlyargs=[], kw_defaults=[],",
"\"Nested\" ) else: return False @staticmethod def _init_non_primitive_nested_class(node_assign, object_, prop):",
"value = ObjectGenerator._get_key_from_object(object_, prop) # If the property is required,",
"from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name class ObjectGenerator(object): @staticmethod def _get_property_name(node_assign):",
"\"default\" ][0] value.args.append(default_value) return value else: return value @staticmethod def",
"table[\"prop\"] value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop) value = ObjectGenerator._get_default_for_property(node_assign,",
"Bundle function arguments and keywords fn_arguments = ast.arguments( args=[ ast.arg(arg=\"self\",",
"isinstance(node, ast.keyword) and node.arg == \"required\": return value for node",
"ObjectGenerator._non_primitive_nested_list(node_assign): value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop) else: # Assign the",
"= ast.arguments( args=[ ast.arg(arg=\"self\", annotation=None), ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")), ], vararg=None, kwarg=None,",
"the property as self.prop = table.get(\"prop\") value = ObjectGenerator._get_key_from_object(object_, prop)",
"return ( len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == \"Nested\" )",
"prop) else: # Assign the property as self.prop = table.get(\"prop\")",
"_construct_to_helper(schema): fn_args = ast.arguments( args=[ast.arg(arg=\"self\", annotation=None)], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None,",
"kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[ast.NameConstant(value=None)], ) fn_body = [ ast.Return( ast.Attribute(",
"ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")), ], ), attr=\"loads\", ), args=[ast.Name(id=\"json\")], keywords=[], ), attr=\"data\",",
"== \"default\": default_value = [ keyword.value for keyword in node_assign.value.keywords",
"attr=\"get\"), args=[ast.Str(s=prop)], keywords=[], ) @staticmethod def _hint_required_property(node_assign, value, object_, prop):",
"= name.lower() # Bundle function arguments and keywords fn_arguments =",
"= parent_dict.get(\"prop\") Primative nested list -> self.prop = parent_dict.get(\"prop\") Non-primative",
"assign as self.prop = table[\"prop\"] value = ObjectGenerator._hint_required_property(node_assign, value, object_,",
"] return ast.FunctionDef( name=f\"to_{output}\", args=fn_args, body=fn_body, decorator_list=[], returns=None ) return",
"sub-classes in a list comp If the nest is primitive,",
"json_codegen.generators.python3_marshmallow.utils import Annotations, class_name class ObjectGenerator(object): @staticmethod def _get_property_name(node_assign): name",
"nodes if len(fn_body) == 0: fn_body = [ast.Pass()] # Generate",
"values=[])], keywords=[], ), ifs=[], is_async=0, ) ], ) @staticmethod def",
"def _non_primitive_nested_list(node_assign): if node_assign.value.func.attr == \"List\": return ( len(node_assign.value.args) >",
"], ), attr=method, ), args=[ast.Name(id=\"self\")], keywords=[], ), attr=\"data\", ) )",
"Generate class constructor class_body = [ ast.FunctionDef( name=\"__init__\", args=fn_arguments, body=fn_body,",
"_get_default_for_property(node_assign, value, object_, prop): for node in ast.walk(node_assign): if isinstance(node,",
"property is required, assign as self.prop = table[\"prop\"] value =",
"vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[ast.NameConstant(value=None)], ) fn_body = [ ast.Return(",
"in parent_dict.get('props', {})] \"\"\" prop = ObjectGenerator._get_property_name(node_assign) if ObjectGenerator._non_primitive_nested_list(node_assign): value",
"= ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop) else: # Assign the property as",
"list -> self.props = [PropertyClass(el) for el in parent_dict.get('props', {})]",
"keywords=[], ), ifs=[], is_async=0, ) ], ) @staticmethod def _get_key_from_object(object_,",
"prop) # If the property is required, assign as self.prop",
"), attr=\"data\", ) ) ] return ast.FunctionDef( name=f\"to_{output}\", args=fn_args, body=fn_body,",
"If the nested list is non-primitive, initialise sub-classes in a",
"_get_key_from_object(object_, prop): return ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop)], keywords=[], ) @staticmethod",
"and keywords fn_arguments = ast.arguments( args=[ ast.arg(arg=\"self\", annotation=None), ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")),",
"def _init_non_primitive_nested_class(node_assign, object_, prop): \"\"\" If the nested list is",
"in node_assign.value.keywords if keyword.arg == \"default\" ][0] value.args.append(default_value) return value",
"marshalling \"\"\" return ast.ListComp( elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")], keywords=[], ), generators=[",
"object_, prop) return ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop), value=value, simple=0, annotation=Annotations(node_assign).type, )",
"], vararg=None, kwarg=None, kwonlyargs=[], kw_defaults=[], defaults=[], ) fn_body = [",
"fn_body = [ ast.Return( value=ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[],",
"= ast.Subscript( value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop)) ) return value @staticmethod def _get_default_for_property(node_assign,",
"\"\"\" If the nested list is non-primitive, initialise sub-classes in",
"method = \"dumps\" elif output == \"dict\": method = \"dump\"",
"), attr=method, ), args=[ast.Name(id=\"self\")], keywords=[], ), attr=\"data\", ) ) ]",
"# Generate class constructor class_body = [ ast.FunctionDef( name=\"__init__\", args=fn_arguments,",
"func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")], keywords=[], ), generators=[ ast.comprehension( target=ast.Name(id=\"el\"), iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"),",
"Required property -> self.prop = parent_dict[\"prop\"] Optional property -> self.prop",
"ast.FunctionDef( name=f\"to_{output}\", args=fn_args, body=fn_body, decorator_list=[], returns=None ) return _construct_to_helper @staticmethod",
"-> self.props = [PropertyClass(el) for el in parent_dict.get('props', {})] \"\"\"",
"ifs=[], is_async=0, ) ], ) @staticmethod def _get_key_from_object(object_, prop): return",
"returns=None ), ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema), ] return ast.ClassDef(name=name, bases=[], body=class_body,",
"), args=[ast.Name(id=\"json\")], keywords=[], ), attr=\"data\", ) ) ] return ast.FunctionDef(",
"function arguments and keywords fn_arguments = ast.arguments( args=[ ast.arg(arg=\"self\", annotation=None),",
"keyword.arg == \"default\" ][0] value.args.append(default_value) return value else: return value",
") @staticmethod def _get_key_from_object(object_, prop): return ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop)],",
"= table.get(\"prop\") value = ObjectGenerator._get_key_from_object(object_, prop) # If the property",
"return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[]) @staticmethod def _construct_to_(output): if",
"kw_defaults=[], kwarg=None, defaults=[], ) fn_body = [ ast.Return( value=ast.Attribute( value=ast.Call(",
"= parent_dict[\"prop\"] Optional property -> self.prop = parent_dict.get(\"prop\") Primative nested",
"= [ ast.Return( value=ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[",
"return value @staticmethod def assign_property(node_assign, object_): \"\"\" Required property ->",
"ObjectGenerator.construct_from_json(schema), ] return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[]) @staticmethod def",
"value = ast.Subscript( value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop)) ) return value @staticmethod def",
"if output == \"json\": method = \"dumps\" elif output ==",
"], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[ast.NameConstant(value=None)], ) fn_body = [",
"ast.keyword) and node.arg == \"required\": return value for node in",
"== \"default\" ][0] value.args.append(default_value) return value else: return value @staticmethod",
"construct_from_json(schema): fn_args = ast.arguments( args=[ ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\", annotation=None), ],",
"ast.Subscript( value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop)) ) return value @staticmethod def _get_default_for_property(node_assign, value,",
"value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")), ], ),",
"elif output == \"dict\": method = \"dump\" else: raise NotImplementedError(\"Only",
"args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])], keywords=[], ), ifs=[], is_async=0, ) ], )",
"= \"dumps\" elif output == \"dict\": method = \"dump\" else:",
"nested list -> self.prop = parent_dict.get(\"prop\") Non-primative nested list ->",
"value @staticmethod def assign_property(node_assign, object_): \"\"\" Required property -> self.prop",
"= parent_dict.get(\"prop\") Non-primative nested list -> self.props = [PropertyClass(el) for",
"\"List\": return ( len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == \"Nested\"",
"[ ast.FunctionDef( name=\"__init__\", args=fn_arguments, body=fn_body, decorator_list=[], returns=None ), ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema),",
"args=[], keywords=[ ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")), ], ), attr=\"loads\", ),",
"self.prop = parent_dict.get(\"prop\") Primative nested list -> self.prop = parent_dict.get(\"prop\")",
"construct_class(schema): name = class_name(schema.name) name_lower = name.lower() # Bundle function",
"else: raise NotImplementedError(\"Only deserialisation to json or dict supported\") def",
"ast.keyword( arg=\"strict\", value=ast.NameConstant(value=True) ) ], ), attr=method, ), args=[ast.Name(id=\"self\")], keywords=[],",
"isinstance(node, ast.Assign) ] # pass if no Assign nodes if",
"class ObjectGenerator(object): @staticmethod def _get_property_name(node_assign): name = node_assign.targets[0] return name.id",
"ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\",",
"= [ast.Pass()] # Generate class constructor class_body = [ ast.FunctionDef(",
"bases=[], body=class_body, decorator_list=[], keywords=[]) @staticmethod def _construct_to_(output): if output ==",
") ) ] return ast.FunctionDef( name=f\"to_{output}\", args=fn_args, body=fn_body, decorator_list=[], returns=None",
"# If the property is required, assign as self.prop =",
"type marshalling \"\"\" return ast.ListComp( elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")], keywords=[], ),",
"\"dumps\" elif output == \"dict\": method = \"dump\" else: raise",
"= ast.arguments( args=[ast.arg(arg=\"self\", annotation=None)], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], )",
"if node.func.attr == \"Nested\": return class_name(node.args[0].id) @staticmethod def _non_primitive_nested_list(node_assign): if",
"return ast.ListComp( elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")], keywords=[], ), generators=[ ast.comprehension( target=ast.Name(id=\"el\"),",
"ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")), ], vararg=None, kwarg=None, kwonlyargs=[], kw_defaults=[], defaults=[], ) fn_body",
"keyword.value for keyword in node_assign.value.keywords if keyword.arg == \"default\" ][0]",
"kw_defaults=[], kwarg=None, defaults=[ast.NameConstant(value=None)], ) fn_body = [ ast.Return( ast.Attribute( value=ast.Call(",
"args=[ast.Name(id=\"el\")], keywords=[], ), generators=[ ast.comprehension( target=ast.Name(id=\"el\"), iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop),",
"isinstance(node, ast.Call): if node.func.attr == \"Nested\": return class_name(node.args[0].id) @staticmethod def",
"@staticmethod def _non_primitive_nested_list(node_assign): if node_assign.value.func.attr == \"List\": return ( len(node_assign.value.args)",
"class constructor class_body = [ ast.FunctionDef( name=\"__init__\", args=fn_arguments, body=fn_body, decorator_list=[],",
"# Bundle function arguments and keywords fn_arguments = ast.arguments( args=[",
"defaults=[], ) fn_body = [ ObjectGenerator.assign_property(node, name_lower) for node in",
"fn_arguments = ast.arguments( args=[ ast.arg(arg=\"self\", annotation=None), ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")), ], vararg=None,",
"nested list is non-primitive, initialise sub-classes in a list comp",
"\"\"\" return ast.ListComp( elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")], keywords=[], ), generators=[ ast.comprehension(",
"prop = ObjectGenerator._get_property_name(node_assign) if ObjectGenerator._non_primitive_nested_list(node_assign): value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop)",
"node.arg == \"default\": default_value = [ keyword.value for keyword in",
"ast.arguments( args=[ ast.arg(arg=\"self\", annotation=None), ast.arg(arg=name_lower, annotation=ast.Name(id=\"dict\")), ], vararg=None, kwarg=None, kwonlyargs=[],",
"self.props = [PropertyClass(el) for el in parent_dict.get('props', {})] \"\"\" prop",
"body=fn_body, decorator_list=[], returns=None ), ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema), ] return ast.ClassDef(name=name,",
"return ast.AnnAssign( target=ast.Attribute(value=ast.Name(id=\"self\"), attr=prop), value=value, simple=0, annotation=Annotations(node_assign).type, ) @staticmethod def",
"kw_defaults=[], defaults=[], ) fn_body = [ ObjectGenerator.assign_property(node, name_lower) for node",
"keywords=[]) @staticmethod def _construct_to_(output): if output == \"json\": method =",
"\"\"\" Required property -> self.prop = parent_dict[\"prop\"] Optional property ->",
"no Assign nodes if len(fn_body) == 0: fn_body = [ast.Pass()]",
"= class_name(schema.name) name_lower = name.lower() # Bundle function arguments and",
"value=ast.NameConstant(value=True) ) ], ), attr=method, ), args=[ast.Name(id=\"self\")], keywords=[], ), attr=\"data\",",
"[ast.Pass()] # Generate class constructor class_body = [ ast.FunctionDef( name=\"__init__\",",
"name=f\"to_{output}\", args=fn_args, body=fn_body, decorator_list=[], returns=None ) return _construct_to_helper @staticmethod def",
"Annotations, class_name class ObjectGenerator(object): @staticmethod def _get_property_name(node_assign): name = node_assign.targets[0]",
"node in ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg == \"required\":",
"if isinstance(node, ast.Call): if node.func.attr == \"Nested\": return class_name(node.args[0].id) @staticmethod",
"\"required\": return value for node in ast.walk(node_assign): if isinstance(node, ast.keyword)",
"Assign nodes if len(fn_body) == 0: fn_body = [ast.Pass()] #",
"ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema), ] return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[])",
"parent_dict[\"prop\"] Optional property -> self.prop = parent_dict.get(\"prop\") Primative nested list",
"annotation=ast.Name(id=\"dict\")), ], vararg=None, kwarg=None, kwonlyargs=[], kw_defaults=[], defaults=[], ) fn_body =",
"ast.Call): if node.func.attr == \"Nested\": return class_name(node.args[0].id) @staticmethod def _non_primitive_nested_list(node_assign):",
"the nest is primitive, we can simply get it Marshmallow",
"if \"required\" in node.arg: value = ast.Subscript( value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop)) )",
") fn_body = [ ast.Return( ast.Attribute( value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name),",
"[ ObjectGenerator.assign_property(node, name_lower) for node in schema.body if isinstance(node, ast.Assign)",
"get it Marshmallow will do the type marshalling \"\"\" return",
") @staticmethod def construct_class(schema): name = class_name(schema.name) name_lower = name.lower()",
"== 0: fn_body = [ast.Pass()] # Generate class constructor class_body",
"primitive, we can simply get it Marshmallow will do the",
"func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword( arg=\"strict\", value=ast.NameConstant(value=True) ) ], ), attr=method,",
"prop): for node in ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg",
"for node in ast.walk(node_assign): if isinstance(node, ast.Call): if node.func.attr ==",
"== \"required\": return value for node in ast.walk(node_assign): if isinstance(node,",
"elt=ast.Call( func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)), args=[ast.Name(id=\"el\")], keywords=[], ), generators=[ ast.comprehension( target=ast.Name(id=\"el\"), iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_),",
"name=\"__init__\", args=fn_arguments, body=fn_body, decorator_list=[], returns=None ), ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema), ]",
"kwarg=None, defaults=[], ) fn_body = [ ast.Return( value=ast.Attribute( value=ast.Call( func=ast.Attribute(",
"Primative nested list -> self.prop = parent_dict.get(\"prop\") Non-primative nested list",
"ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg == \"default\": default_value =",
"method = \"dump\" else: raise NotImplementedError(\"Only deserialisation to json or",
"name.id @staticmethod def _nesting_class(node_assign): for node in ast.walk(node_assign): if isinstance(node,",
"value, object_, prop) value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop) return",
"node.func.attr == \"Nested\": return class_name(node.args[0].id) @staticmethod def _non_primitive_nested_list(node_assign): if node_assign.value.func.attr",
"return class_name(node.args[0].id) @staticmethod def _non_primitive_nested_list(node_assign): if node_assign.value.func.attr == \"List\": return",
"If the property is required, assign as self.prop = table[\"prop\"]",
"), attr=\"data\", ) ) ] return ast.FunctionDef( name=\"from_json\", args=fn_args, body=fn_body,",
"\"dump\" else: raise NotImplementedError(\"Only deserialisation to json or dict supported\")",
"node_assign.value.args[0].func.attr == \"Nested\" ) else: return False @staticmethod def _init_non_primitive_nested_class(node_assign,",
"ast.Assign) ] # pass if no Assign nodes if len(fn_body)",
"if isinstance(node, ast.Assign) ] # pass if no Assign nodes",
"self.prop = parent_dict[\"prop\"] Optional property -> self.prop = parent_dict.get(\"prop\") Primative",
"False @staticmethod def _init_non_primitive_nested_class(node_assign, object_, prop): \"\"\" If the nested",
"annotation=Annotations(node_assign).type, ) @staticmethod def construct_class(schema): name = class_name(schema.name) name_lower =",
"ast.walk(node_assign): if isinstance(node, ast.keyword): if \"required\" in node.arg: value =",
"attr=\"data\", ) ) ] return ast.FunctionDef( name=f\"to_{output}\", args=fn_args, body=fn_body, decorator_list=[],",
"prop): for node in ast.walk(node_assign): if isinstance(node, ast.keyword): if \"required\"",
"value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop) else: # Assign the property",
"annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\", annotation=None), ], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[ast.NameConstant(value=None)], )",
"node in ast.walk(node_assign): if isinstance(node, ast.keyword): if \"required\" in node.arg:",
"_construct_to_helper @staticmethod def construct_from_json(schema): fn_args = ast.arguments( args=[ ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")),",
"func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword(arg=\"strict\", value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")), ],",
"annotation=None)], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[], ) fn_body = [",
"ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg == \"required\": return value",
"class_name(node.args[0].id) @staticmethod def _non_primitive_nested_list(node_assign): if node_assign.value.func.attr == \"List\": return (",
"def _get_default_for_property(node_assign, value, object_, prop): for node in ast.walk(node_assign): if",
"raise NotImplementedError(\"Only deserialisation to json or dict supported\") def _construct_to_helper(schema):",
"else: return False @staticmethod def _init_non_primitive_nested_class(node_assign, object_, prop): \"\"\" If",
"), args=[ast.Name(id=\"self\")], keywords=[], ), attr=\"data\", ) ) ] return ast.FunctionDef(",
"value=ast.Call( func=ast.Attribute( value=ast.Call( func=ast.Name(id=schema.name), args=[], keywords=[ ast.keyword( arg=\"strict\", value=ast.NameConstant(value=True) )",
"generators=[ ast.comprehension( target=ast.Name(id=\"el\"), iter=ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])], keywords=[],",
"value=ast.Name(id=\"only\")), ], ), attr=\"loads\", ), args=[ast.Name(id=\"json\")], keywords=[], ), attr=\"data\", )",
"object_, prop): \"\"\" If the nested list is non-primitive, initialise",
"attr=method, ), args=[ast.Name(id=\"self\")], keywords=[], ), attr=\"data\", ) ) ] return",
"for node in schema.body if isinstance(node, ast.Assign) ] # pass",
"args=[ ast.arg(arg=\"json\", annotation=ast.Name(id=\"str\")), ast.arg(arg=\"only\", annotation=None), ], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None,",
"value=ast.NameConstant(value=True)), ast.keyword(arg=\"only\", value=ast.Name(id=\"only\")), ], ), attr=\"loads\", ), args=[ast.Name(id=\"json\")], keywords=[], ),",
"value for node in ast.walk(node_assign): if isinstance(node, ast.keyword) and node.arg",
"= ObjectGenerator._get_key_from_object(object_, prop) # If the property is required, assign",
"object_, prop): for node in ast.walk(node_assign): if isinstance(node, ast.keyword): if",
"if keyword.arg == \"default\" ][0] value.args.append(default_value) return value else: return",
"), ObjectGenerator._construct_to_(\"json\")(schema), ObjectGenerator._construct_to_(\"dict\")(schema), ObjectGenerator.construct_from_json(schema), ] return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[],",
"and node_assign.value.args[0].func.attr == \"Nested\" ) else: return False @staticmethod def",
"ast.Call( func=ast.Attribute(value=ast.Name(id=object_), attr=\"get\"), args=[ast.Str(s=prop)], keywords=[], ) @staticmethod def _hint_required_property(node_assign, value,",
"), attr=\"loads\", ), args=[ast.Name(id=\"json\")], keywords=[], ), attr=\"data\", ) ) ]",
"keywords=[], ), attr=\"data\", ) ) ] return ast.FunctionDef( name=\"from_json\", args=fn_args,",
"== \"Nested\": return class_name(node.args[0].id) @staticmethod def _non_primitive_nested_list(node_assign): if node_assign.value.func.attr ==",
"= [ ast.FunctionDef( name=\"__init__\", args=fn_arguments, body=fn_body, decorator_list=[], returns=None ), ObjectGenerator._construct_to_(\"json\")(schema),",
"attr=\"loads\", ), args=[ast.Name(id=\"json\")], keywords=[], ), attr=\"data\", ) ) ] return",
"to json or dict supported\") def _construct_to_helper(schema): fn_args = ast.arguments(",
"simply get it Marshmallow will do the type marshalling \"\"\"",
"node_assign.targets[0] return name.id @staticmethod def _nesting_class(node_assign): for node in ast.walk(node_assign):",
"object_, prop) value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop) return ast.AnnAssign("
] |
[
"4 #print \"\\n\" return field def run_regridding(srcfield, dstfield): ''' PRECONDITIONS:",
"The values on 'srcfield' and 'dstfield' are compared. returns True",
"| | | | 1.0 0.0 1 ------- 2 --------",
"= (max_y-min_y)/(ub_y-lb_y) cellcenter_x = cellwidth_x/2 cellcenter_y = cellwidth_y/2 maxIndex =",
"dtype=_NP.int32) elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32) elemType = _NP.ones(num_elem, dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD",
"build_analyticfieldgrid(field, grid): ''' PRECONDITIONS: An ESMP_Field has been created. POSTCONDITIONS:",
"corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p = 0 for i1 in",
"the field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field) # get the",
"= float(4) ub_y = float(4) lb_x = float(0) lb_y =",
"= cellwidth_y/2 maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32) grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART)",
"y, ESMP.ESMP_STAGGERLOC_CORNER) #print 'lower corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print 'upper",
"a Mesh. The source Field is set to an analytic",
"def setUp(self): pass def test_test1(self): # create two unique ESMP_Mesh",
"[{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print 'upper corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p = 0",
"routehandle) ESMP.ESMP_FieldRegridRelease(routehandle) return dstfield def compare_fields(field1, field2): ''' PRECONDITIONS: Two",
"= ESMP.ESMP_FieldRegridStore(srcfield, dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle) ESMP.ESMP_FieldRegridRelease(routehandle) return",
"0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5, 0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0]) nodeOwner =",
"= [{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print 'upper corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1]) p =",
"created. ''' ub_x = float(4) ub_y = float(4) lb_x =",
"ESMP_Grid has been created, and 'name' is a string that",
"has been initialized. POSTCONDITIONS: A ESMP_Grid has been created. '''",
"field2): ''' PRECONDITIONS: Two ESMP_Fields have been created and a",
"= False print \"ACCURACY ERROR - \"+str(err) print \"field1 =",
"| | | 1.5 0.5 5 ------- 6 -------- 7--------",
"= {0} : field2 = {1}\\n\".format(field1ptr[i], field2ptr[i]) totalErr += err",
"offset = 0 for i in range(field.size): # this routine",
"ESMP_Field. POSTCONDITIONS: An ESMP_Field has been created. ''' # defaults",
"of the the values is desired between 'srcfield' and 'dstfield'.",
"return True else: print \" - FAIL - Total Error",
"in range(exLB_center[1], exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]): #print '[{0},{1}]'.format(gridXCenter[p],",
"the regridding functions routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield,",
"check for False point values correct = True totalErr =",
"# clean up ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok, True)",
"the coordinate pointers and set the coordinates [x,y] = [0,",
"- FAIL - Total Error = \"+str(totalErr) return False class",
"# defaults to center staggerloc field = ESMP.ESMP_FieldCreateGrid(grid, name) return",
"in range(num_node): x = nodeCoord[2*i] y = nodeCoord[2*i+1] #print '[{0},{1}]'.format(x,",
"An ESMP_Field has been created. ''' # defaults to center",
"20.0+xc*yc+yc**2 #print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p]) p = p + 1",
"fields field1ptr = ESMP.ESMP_FieldGetPtr(field1) field2ptr = ESMP.ESMP_FieldGetPtr(field2) # compare point",
"totalErr += err if correct: print \" - PASS -",
"1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0]) ''' # this is for mesh",
"ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print 'Mesh coordinates:' for i in range(num_node): x",
"POSTCONDITIONS: An ESMP_Field has been created. ''' # defaults to",
"ESMP/examples/ESMP_GridToMeshRegrid.py #=============================================================================== \"\"\" ESMP_GridToMeshRegridCsrv.py Two ESMP_Field objects are created, one",
"field = ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return field def build_analyticfield(field, nodeCoord,",
"ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER) #print 'lower corner bounds",
"= run_regridding(srcfield, dstfield) # compare results and output PASS or",
"elemConn def create_ESMPmesh_3x3(): ''' PRECONDITIONS: ESMP is initialized. POSTCONDITIONS: An",
"field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0) # set the",
"8,9,13,12, 9,10,14,13, 10,11,15,14], dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print 'Mesh coordinates:' for",
"the ESMF regridding dstfield = run_regridding(srcfield, dstfield) # compare results",
"| | 1 | 2 | 3 | | |",
"1 #print 'Grid center coordinates:' p = 0 for i1",
"name) return field def build_analyticfieldgrid(field, grid): ''' PRECONDITIONS: An ESMP_Field",
"grid): ''' PRECONDITIONS: An ESMP_Field has been created. POSTCONDITIONS: The",
"gridYCenter[p]) p = p + 1 #print '\\n' return grid",
"been created and returned as 'mesh'. ''' # Two parametric",
"\"\\n\" return field def create_ESMPfield(mesh, name): ''' PRECONDITIONS: An ESMP_Mesh",
"new ESMP_Field. POSTCONDITIONS: An ESMP_Field has been created. ''' field",
"''' ub_x = float(4) ub_y = float(4) lb_x = float(0)",
"on a Grid and the other on a Mesh. The",
"# # $Id: ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14 rokuingh Exp $",
"= ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print",
"PRECONDITIONS: ESMP is initialized. POSTCONDITIONS: An ESMP_Mesh (3x3) has been",
"coordinates\" offset = 0 for i in range(field.size): # this",
"return mesh, nodeCoord, elemType, elemConn def create_ESMPfieldgrid(grid, name): ''' PRECONDITIONS:",
"ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CORNER) # get",
"Ids in centers (Everything owned by PET 0) ''' #",
"ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print \"Grid center coordinates\" p = 0",
"\"Mesh center coordinates\" offset = 0 for i in range(field.size):",
"for i0 in range(exLB_center[0], exUB_center[0]): #print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p]) p =",
"0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0]) nodeOwner = _NP.zeros(num_node, dtype=_NP.int32) elemId =",
"PRECONDITIONS: An ESMP_Field has been created. POSTCONDITIONS: The 'field' has",
"1,2,6,5, 2,3,7,6, 4,5,9,8, 5,6,10,9, 6,7,11,10, 8,9,13,12, 9,10,14,13, 10,11,15,14], dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner)",
"nodeCoord[(elemConn[offset+3])*2+1] x = (x1+x2)/2.0 y = (y1+y2)/2.0 fieldPtr[i] = 20.0+x+y",
"err = abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i]) if err > .06: correct",
"True else: print \" - FAIL - Total Error =",
"values on 'srcfield' and 'dstfield' are compared. returns True if",
"POSTCONDITIONS: The 'field' has been initialized to an analytic field.",
"2.0 13 -------14 --------15--------16 | | | | | 7",
"exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]): #print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p]) p",
"| | | 1.0 0.0 1 ------- 2 -------- 3--------",
"this is for grid to mesh nodeCoord = _NP.array([1.0,1.0, 1.5,1.0,",
"analytic field. ''' # get the field pointer first fieldPtr",
"first fieldPtr = ESMP.ESMP_FieldGetPtr(field) # get the grid bounds and",
"[0, 1] gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid,",
"dstfield2) # clean up ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok,",
"p = p + 1 #print 'Grid center coordinates:' p",
"ESMP_Mesh objects grid = grid_create() mesh, nodeCoord, elemType, elemConn =",
"ESMP.ESMP_FieldGetPtr(field2) # compare point values of field1 to field2 #",
"range(exLB_center[0], exUB_center[0]): #print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p]) p = p + 1",
"coordinates:' p = 0 for i1 in range(exLB_corner[1], exUB_corner[1]): for",
"nodeCoord, elemType, elemConn): ''' PRECONDITIONS: An ESMP_Field has been created.",
"def build_analyticfield(field, nodeCoord, elemType, elemConn): ''' PRECONDITIONS: An ESMP_Field has",
"been created. ''' # defaults to center staggerloc field =",
"PASS or FAIL ok = compare_fields(dstfield, dstfield2) # clean up",
"| | | 2.5 1.5 9 ------- 10 --------11--------12 |",
"= \"+str(totalErr) return True else: print \" - FAIL -",
"1.5 9 ------- 10 --------11--------12 | | | | |",
"dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle) ESMP.ESMP_FieldRegridRelease(routehandle) return dstfield def",
"16 num_elem = 9 nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) ''' # this",
"\"+str(err) print \"field1 = {0} : field2 = {1}\\n\".format(field1ptr[i], field2ptr[i])",
"function, and a conservative regridding operation is performed from the",
"Fields to an analytic function srcfield = build_analyticfieldgrid(srcfield, grid) dstfield2",
"= _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0, 1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5, 1.0,2.5,",
"= float(i0)*cellwidth_x + cellwidth_x/2.0 gridYCenter[p] = float(i1)*cellwidth_y + cellwidth_y/2.0 p",
"exUB_center[0]): gridXCenter[p] = float(i0)*cellwidth_x + cellwidth_x/2.0 gridYCenter[p] = float(i1)*cellwidth_y +",
"dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn = _NP.array([0,1,5,4, 1,2,6,5, 2,3,7,6, 4,5,9,8, 5,6,10,9, 6,7,11,10,",
"= abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i]) if err > .06: correct =",
"1.5,2.5, 2.5,2.5, 3.0,2.5, 1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0]) ''' # this",
"'\\n' return grid def mesh_create_3x3(mesh): ''' PRECONDITIONS: An ESMP_Mesh has",
"0.0 0.5 1.5 2.0 1.0 1.5 2.5 3.0 Node Ids",
"= ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers and",
"name): ''' PRECONDITIONS: An ESMP_Grid has been created, and 'name'",
"ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CORNER) # get the coordinate pointers and set",
"#print 'Grid center coordinates:' p = 0 for i1 in",
"return False class TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def setUp(self): pass def test_test1(self): #",
"1.5,2.5, 2.5,2.5, 4.0,2.5, 0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0]) nodeOwner = _NP.zeros(num_node,",
"+ 4 #print \"\\n\" return field def run_regridding(srcfield, dstfield): '''",
"ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print \"Grid center coordinates\"",
"corners Element Ids in centers (Everything owned by PET 0)",
"An ESMP_Field has been created. ''' field = ESMP.ESMP_FieldCreate(mesh, name,",
"totalErr = 0.0 for i in range(field1.size): err = abs(field1ptr[i]",
"to the exact solution over that domain. \"\"\" import cdms2",
"as 'mesh'. ''' # Two parametric dimensions, and three spatial",
"raise NameError(\"Cannot compute a non-constant analytic field for a mesh\\",
"4.0,2.5, 0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0]) nodeOwner = _NP.zeros(num_node, dtype=_NP.int32) elemId",
"been initialized to an analytic field. ''' # get the",
"[0, 1] gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid,",
"#print \"Grid center coordinates\" p = 0 for i1 in",
"3.0 Node Ids at corners Element Ids in centers (Everything",
"correct = False print \"ACCURACY ERROR - \"+str(err) print \"field1",
"#print 'lower corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print 'upper corner bounds",
"test_test1(self): # create two unique ESMP_Mesh objects grid = grid_create()",
"elemType, elemConn = create_ESMPmesh_3x3() ''' # this is for grid",
"to mesh # create ESMP_Field objects on the Meshes srcfield",
"\"+str(totalErr) return False class TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def setUp(self): pass def test_test1(self):",
"regridding is completed, the destination Field is compared to the",
"= float(i1)*cellwidth_y p = p + 1 #print 'Grid corner",
"'[{0},{1}]'.format(gridXCorner[p], gridYCorner[p]) p = p + 1 #print '\\n' ##",
"pointers exLB, exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate",
"elements if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI): raise NameError(\"Cannot compute a non-constant",
"i1 in range(exLB_corner[1], exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]): #print",
"= nodeCoord[(elemConn[offset+1])*2] y1 = nodeCoord[(elemConn[offset+1])*2+1] y2 = nodeCoord[(elemConn[offset+3])*2+1] x =",
"\" - FAIL - Total Error = \"+str(totalErr) return False",
"TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def setUp(self): pass def test_test1(self): # create two unique",
"| 1 | 2 | 3 | | | |",
"over that domain. \"\"\" import cdms2 import ESMP import numpy",
"assumes this field is on elements if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI):",
"clean up ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok, True) if",
"''' # get the data pointers for the fields field1ptr",
"2,3,7,6, 4,5,9,8, 5,6,10,9, 6,7,11,10, 8,9,13,12, 9,10,14,13, 10,11,15,14], dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn)",
"_NP.ones(num_elem, dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn = _NP.array([0,1,5,4, 1,2,6,5, 2,3,7,6, 4,5,9,8, 5,6,10,9,",
"gridYCorner[p] = float(i1)*cellwidth_y p = p + 1 #print 'Grid",
"= float(0) min_y = float(0) cellwidth_x = (max_x-min_x)/(ub_x-lb_x) cellwidth_y =",
"PRECONDITIONS: An ESMP_Mesh has been created, and 'name' is a",
"the fileds are comparable (success) ''' # get the data",
"gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print 'lower corner bounds =",
"!= field2.size): raise NameError('compare_fields: Fields must be the same size!')",
"_NP.array([ub_x,ub_y], dtype=_NP.int32) grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART) ## CORNERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER)",
"min_y = float(0) cellwidth_x = (max_x-min_x)/(ub_x-lb_x) cellwidth_y = (max_y-min_y)/(ub_y-lb_y) cellcenter_x",
"float(4) lb_x = float(0) lb_y = float(0) max_x = float(4)",
"2.5,3.0, 3.0,3.0]) ''' # this is for mesh to grid",
"dstfield, routehandle) ESMP.ESMP_FieldRegridRelease(routehandle) return dstfield def compare_fields(field1, field2): ''' PRECONDITIONS:",
"'dstfield'. POSTCONDITIONS: The values on 'srcfield' and 'dstfield' are compared.",
"mesh, nodeCoord, elemType, elemConn = mesh_create_3x3(mesh) return mesh, nodeCoord, elemType,",
"initial field for now #print \"Mesh center coordinates\" offset =",
"dstfield = create_ESMPfield(mesh, 'dstfield') dstfield2 = create_ESMPfield(mesh, 'dstfield_exact') # initialize",
"import unittest def grid_create(): ''' PRECONDITIONS: ESMP has been initialized.",
"the coordinates [x,y] = [0, 1] gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x,",
"gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER)",
"set the field to a vanilla initial field for now",
"on a Mesh. The source Field is set to an",
"| | | | 4 | 5 | 6 |",
"functions routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle)",
"True if the fileds are comparable (success) ''' # get",
"a simple mesh num_node = 16 num_elem = 9 nodeId",
"coordinates [x,y] = [0, 1] gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER)",
"max_y = float(4) min_x = float(0) min_y = float(0) cellwidth_x",
"field def build_analyticfield(field, nodeCoord, elemType, elemConn): ''' PRECONDITIONS: An ESMP_Field",
"#print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p]) p = p + 1 #print '\\n'",
"at corners Element Ids in centers (Everything owned by PET",
"True) if __name__ == '__main__': ESMP.ESMP_LogSet(True) print \"\" # Spacer",
"= build_analyticfieldgrid(dstfield2, grid) # run the ESMF regridding dstfield =",
"= [{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p = 0 for i1 in range(exLB_corner[1], exUB_corner[1]):",
"for i1 in range(exLB_center[1], exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]):",
"ESMP.ESMP_MESHELEMTYPE_TRI): raise NameError(\"Cannot compute a non-constant analytic field for a",
"a vanilla initial field for now #print \"Mesh center coordinates\"",
"a non-constant analytic field for a mesh\\ with triangular elements!\")",
"\"\"\" ESMP_GridToMeshRegridCsrv.py Two ESMP_Field objects are created, one on a",
"other on a Mesh. The source Field is set to",
"Two ESMP_Fields have been created and a regridding operation is",
"0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5, 0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5, 0.0,4.0, 1.5,4.0,",
"ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print \"Grid",
"-------14 --------15--------16 | | | | | 7 | 8",
"= [0, 1] gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner =",
"| | | 4 | 5 | 6 | |",
"the same size!') # initialize to True, and check for",
"'[{0},{1}] = {2}'.format(x,y,fieldPtr[i]) offset = offset + 4 #print \"\\n\"",
"+ 1 #print 'Grid corner coordinates:' p = 0 for",
"= 0 for i1 in range(exLB_center[1], exUB_center[1]): for i0 in",
"nodeCoord, elemType, elemConn = create_ESMPmesh_3x3() ''' # this is for",
"ESMP_Field has been created. ''' field = ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT)",
"exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CORNER) # get the coordinate",
"to an analytic function srcfield = build_analyticfield(srcfield, nodeCoord, elemType, elemConn)",
"the destination Field is compared to the exact solution over",
"bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1]) p = 0 for i1 in range(exLB_center[1],",
"= float(0) lb_y = float(0) max_x = float(4) max_y =",
"are compared. returns True if the fileds are comparable (success)",
"bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p = 0 for i1 in range(exLB_corner[1],",
"spatial dimensions mesh = ESMP.ESMP_MeshCreate(2,2) mesh, nodeCoord, elemType, elemConn =",
"= _NP.ones(num_elem, dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn = _NP.array([0,1,5,4, 1,2,6,5, 2,3,7,6, 4,5,9,8,",
"The 'field' has been initialized to an analytic field. '''",
"10,11,15,14], dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print 'Mesh coordinates:' for i in",
"is for mesh to grid # create ESMP_Field objects on",
"= ESMP.ESMP_FieldGetPtr(field) # get the grid bounds and coordinate pointers",
"mesh_create_3x3(mesh) return mesh, nodeCoord, elemType, elemConn def create_ESMPfieldgrid(grid, name): '''",
".06: correct = False print \"ACCURACY ERROR - \"+str(err) print",
"corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print 'upper corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1])",
"fieldPtr[i] = 20.0+x+y #fieldPtr[i] = 20.0+x*y+y**2 #print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i])",
"has been created, and 'name' is a string that will",
"center staggerloc field = ESMP.ESMP_FieldCreateGrid(grid, name) return field def build_analyticfieldgrid(field,",
"self.assertEqual(ok, True) if __name__ == '__main__': ESMP.ESMP_LogSet(True) print \"\" #",
"run the ESMF regridding dstfield = run_regridding(srcfield, dstfield) # compare",
"ub_x = float(4) ub_y = float(4) lb_x = float(0) lb_y",
"[{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print 'upper corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1]) p = 0",
"range(exLB_center[1], exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]): #print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p])",
"elemConn) dstfield2 = build_analyticfieldgrid(dstfield2, grid) # run the ESMF regridding",
"1] gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y,",
"\"\\n\" return field def run_regridding(srcfield, dstfield): ''' PRECONDITIONS: Two ESMP_Fields",
"= float(4) lb_x = float(0) lb_y = float(0) max_x =",
"regridding operation is performed from the source to the destination",
"dimensions, and three spatial dimensions mesh = ESMP.ESMP_MeshCreate(2,2) mesh, nodeCoord,",
"= mesh_create_3x3(mesh) return mesh, nodeCoord, elemType, elemConn def create_ESMPfieldgrid(grid, name):",
"the same size if (field1.size != field2.size): raise NameError('compare_fields: Fields",
"Error = \"+str(totalErr) return False class TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def setUp(self): pass",
"on the Meshes srcfield = create_ESMPfield(mesh, 'srcfield') dstfield = create_ESMPfieldgrid(grid,",
"A ESMP_Grid has been created. ''' ub_x = float(4) ub_y",
"ESMP is initialized. POSTCONDITIONS: An ESMP_Mesh (3x3) has been created",
"in range(field1.size): err = abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i]) if err >",
"output PASS or FAIL ok = compare_fields(dstfield, dstfield2) # clean",
"exUB_corner[0]): gridXCorner[p] = float(i0)*cellwidth_x gridYCorner[p] = float(i1)*cellwidth_y p = p",
"ESMP_Field has been created. ''' # defaults to center staggerloc",
"cellwidth_x/2.0 gridYCenter[p] = float(i1)*cellwidth_y + cellwidth_y/2.0 p = p +",
"| | 7 | 8 | 9 | | |",
"dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print 'Mesh coordinates:' for i in range(num_node):",
"def compare_fields(field1, field2): ''' PRECONDITIONS: Two ESMP_Fields have been created",
"= cellwidth_x/2 cellcenter_y = cellwidth_y/2 maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32) grid",
"'Grid corner coordinates:' p = 0 for i1 in range(exLB_corner[1],",
"= _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) ''' # this is for grid to mesh",
"+ 1 #print \"\\n\" return field def create_ESMPfield(mesh, name): '''",
"= float(4) max_y = float(4) min_x = float(0) min_y =",
"the field to a vanilla initial field for now #print",
"analytic field for a mesh\\ with triangular elements!\") x1 =",
"| 7 | 8 | 9 | | | |",
"# get the data pointers for the fields field1ptr =",
"NameError('compare_fields: Fields must be the same size!') # initialize to",
"gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER) #print 'lower corner bounds =",
"6,7,11,10, 8,9,13,12, 9,10,14,13, 10,11,15,14], dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print 'Mesh coordinates:'",
"$Id: ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14 rokuingh Exp $ #=============================================================================== #",
"the data on 'dstfield'. ''' # call the regridding functions",
"= _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32) elemType = _NP.ones(num_elem, dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn =",
"3x3 ESMP_Mesh has been created. 3x3 Mesh 3.0 2.0 13",
"ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART) ## CORNERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid,",
"1.5 2.5 3.0 Node Ids at corners Element Ids in",
"2 | 3 | | | | | 1.0 0.0",
"--------15--------16 | | | | | 7 | 8 |",
"been created. ''' ub_x = float(4) ub_y = float(4) lb_x",
"mesh to grid nodeCoord = _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0, 0.0,1.5,",
"and three spatial dimensions mesh = ESMP.ESMP_MeshCreate(2,2) mesh, nodeCoord, elemType,",
"be used to initialize the name of a new ESMP_Field.",
"float(4) ub_y = float(4) lb_x = float(0) lb_y = float(0)",
"exUB[1]): for i0 in range(exLB[0], exUB[0]): xc = gridXCoord[p] yc",
"ESMP_Field objects on the Meshes srcfield = create_ESMPfield(mesh, 'srcfield') dstfield",
"= p + 1 #print '\\n' ## CENTERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER)",
"coordinate pointers exLB, exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER) # get the",
"= p + 1 #print 'Grid corner coordinates:' p =",
"ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers and set",
"ESMP.ESMP_FieldGetPtr(field) # get the grid bounds and coordinate pointers exLB,",
"= gridYCoord[p] fieldPtr[p] = 20.0+xc+yc #fieldPtr[p] = 20.0+xc*yc+yc**2 #print '[{0},{1}]",
"i in range(field1.size): err = abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i]) if err",
"1.5 0.5 5 ------- 6 -------- 7-------- 8 | |",
"values correct = True totalErr = 0.0 for i in",
"''' PRECONDITIONS: Two ESMP_Fields have been created and a regridding",
"0 for i1 in range(exLB_center[1], exUB_center[1]): for i0 in range(exLB_center[0],",
"8 | | | | | 1 | 2 |",
"err if correct: print \" - PASS - Total Error",
"1.5 2012/04/23 23:00:14 rokuingh Exp $ #=============================================================================== # ESMP/examples/ESMP_GridToMeshRegrid.py #===============================================================================",
"PRECONDITIONS: Two ESMP_Fields have been created and a regridding operation",
"# get the field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field) #",
"create two unique ESMP_Mesh objects grid = grid_create() mesh, nodeCoord,",
"compared. returns True if the fileds are comparable (success) '''",
"mesh, nodeCoord, elemType, elemConn def create_ESMPmesh_3x3(): ''' PRECONDITIONS: ESMP is",
"Meshes srcfield = create_ESMPfieldgrid(grid, 'srcfield') dstfield = create_ESMPfield(mesh, 'dstfield') dstfield2",
"p = 0 for i1 in range(exLB[1], exUB[1]): for i0",
"center coordinates\" offset = 0 for i in range(field.size): #",
"float(i1)*cellwidth_y p = p + 1 #print 'Grid corner coordinates:'",
"An ESMP_Mesh has been declared. POSTCONDITIONS: A 3x3 ESMP_Mesh has",
"------- 6 -------- 7-------- 8 | | | | |",
"i0 in range(exLB_corner[0], exUB_corner[0]): gridXCorner[p] = float(i0)*cellwidth_x gridYCorner[p] = float(i1)*cellwidth_y",
"one on a Grid and the other on a Mesh.",
"_NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) ''' # this is for grid to mesh nodeCoord",
"1.5,3.0, 2.5,3.0, 3.0,3.0]) ''' # this is for mesh to",
"1.5,1.0, 2.5,1.0, 3.0,1.0, 1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5, 1.0,2.5, 1.5,2.5, 2.5,2.5,",
"meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return field def build_analyticfield(field, nodeCoord, elemType, elemConn): ''' PRECONDITIONS:",
"this field is on elements if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI): raise",
"they are the same size if (field1.size != field2.size): raise",
"| 9 | | | | | 2.5 1.5 9",
"= create_ESMPfieldgrid(grid, 'dstfield') dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact') # initialize the",
"'dstfield') dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact') # initialize the Fields to",
"gridYCenter[p] = float(i1)*cellwidth_y + cellwidth_y/2.0 p = p + 1",
"to mesh nodeCoord = _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0, 1.0,1.5, 1.5,1.5,",
"= create_ESMPfield(mesh, 'dstfield_exact') # initialize the Fields to an analytic",
"of field1 to field2 # first verify they are the",
"return field def build_analyticfield(field, nodeCoord, elemType, elemConn): ''' PRECONDITIONS: An",
"| 1.0 0.0 1 ------- 2 -------- 3-------- 4 0.0",
"p + 1 #print '\\n' ## CENTERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center,",
"= float(i1)*cellwidth_y + cellwidth_y/2.0 p = p + 1 #print",
"in range(exLB_center[0], exUB_center[0]): gridXCenter[p] = float(i0)*cellwidth_x + cellwidth_x/2.0 gridYCenter[p] =",
"name): ''' PRECONDITIONS: An ESMP_Mesh has been created, and 'name'",
"if (field1.size != field2.size): raise NameError('compare_fields: Fields must be the",
"exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers and",
"ESMP regridding operation has set the data on 'dstfield'. '''",
"objects on the Meshes srcfield = create_ESMPfield(mesh, 'srcfield') dstfield =",
"#=============================================================================== \"\"\" ESMP_GridToMeshRegridCsrv.py Two ESMP_Field objects are created, one on",
"max_x = float(4) max_y = float(4) min_x = float(0) min_y",
"between 'srcfield' and 'dstfield'. POSTCONDITIONS: The values on 'srcfield' and",
"and a regridding operation is desired from 'srcfield' to 'dstfield'.",
"range(field1.size): err = abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i]) if err > .06:",
"{0} : field2 = {1}\\n\".format(field1ptr[i], field2ptr[i]) totalErr += err if",
"to field2 # first verify they are the same size",
"= ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER) #print",
"routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle) ESMP.ESMP_FieldRegridRelease(routehandle)",
"for i in range(field1.size): err = abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i]) if",
"is set to an analytic function, and a conservative regridding",
"== '__main__': ESMP.ESMP_LogSet(True) print \"\" # Spacer suite = unittest.TestLoader().loadTestsFromTestCase(TestESMP_GridToMeshRegridCsrv)",
"elemType, elemConn def create_ESMPmesh_3x3(): ''' PRECONDITIONS: ESMP is initialized. POSTCONDITIONS:",
"# initialize to True, and check for False point values",
"| 3 | | | | | 1.0 0.0 1",
"grid def mesh_create_3x3(mesh): ''' PRECONDITIONS: An ESMP_Mesh has been declared.",
"= compare_fields(dstfield, dstfield2) # clean up ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid)",
"ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print 'lower",
"| 5 | 6 | | | | | 1.5",
"'\\n' ## CENTERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid, \\",
"ESMP.ESMP_FieldCreateGrid(grid, name) return field def build_analyticfieldgrid(field, grid): ''' PRECONDITIONS: An",
"the destination Field. After the regridding is completed, the destination",
"= ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART) ## CORNERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner, exUB_corner =",
"create_ESMPfield(mesh, 'dstfield_exact') # initialize the Fields to an analytic function",
"An ESMP_Mesh (3x3) has been created and returned as 'mesh'.",
"float(0) max_x = float(4) max_y = float(4) min_x = float(0)",
"compare results and output PASS or FAIL ok = compare_fields(dstfield,",
"\"+str(totalErr) return True else: print \" - FAIL - Total",
"be the same size!') # initialize to True, and check",
"3.0,1.0, 1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5, 1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5, 1.0,3.0,",
"field. ''' # get the field pointer first fieldPtr =",
"= [{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print 'upper corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p =",
"[{0},{1}]'.format(exUB_center[0],exUB_center[1]) p = 0 for i1 in range(exLB_center[1], exUB_center[1]): for",
"#print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p]) p = p + 1 #print",
"range(num_node): x = nodeCoord[2*i] y = nodeCoord[2*i+1] #print '[{0},{1}]'.format(x, y)",
"corner coordinates:' p = 0 for i1 in range(exLB_corner[1], exUB_corner[1]):",
"dstfield def compare_fields(field1, field2): ''' PRECONDITIONS: Two ESMP_Fields have been",
"'dstfield' are compared. returns True if the fileds are comparable",
"source Field is set to an analytic function, and a",
"and check for False point values correct = True totalErr",
"''' PRECONDITIONS: ESMP is initialized. POSTCONDITIONS: An ESMP_Mesh (3x3) has",
"create_ESMPfieldgrid(grid, 'dstfield_exact') # initialize the Fields to an analytic function",
"# create ESMP_Field objects on the Meshes srcfield = create_ESMPfieldgrid(grid,",
"= 20.0+xc+yc #fieldPtr[p] = 20.0+xc*yc+yc**2 #print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p]) p",
"'Mesh coordinates:' for i in range(num_node): x = nodeCoord[2*i] y",
"print \"ACCURACY ERROR - \"+str(err) print \"field1 = {0} :",
"get the grid bounds and coordinate pointers exLB, exUB =",
"# get the coordinate pointers and set the coordinates [x,y]",
"#print 'Mesh coordinates:' for i in range(num_node): x = nodeCoord[2*i]",
"simple mesh num_node = 16 num_elem = 9 nodeId =",
"coordinates [x,y] = [0, 1] gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER)",
"for a mesh\\ with triangular elements!\") x1 = nodeCoord[(elemConn[offset])*2] x2",
"+ 1 #print '\\n' return grid def mesh_create_3x3(mesh): ''' PRECONDITIONS:",
"set the coordinates [x,y] = [0, 1] gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid,",
"'field' has been initialized to an analytic field. ''' #",
"dstfield2 = build_analyticfield(dstfield2, nodeCoord, elemType, elemConn) ''' # this is",
"def run_regridding(srcfield, dstfield): ''' PRECONDITIONS: Two ESMP_Fields have been created",
"POSTCONDITIONS: A ESMP_Grid has been created. ''' ub_x = float(4)",
"# set up a simple mesh num_node = 16 num_elem",
"ESMP.ESMP_FieldGetPtr(field, 0) # set the field to a vanilla initial",
"FAIL - Total Error = \"+str(totalErr) return False class TestESMP_GridToMeshRegridCsrv(unittest.TestCase):",
"= 16 num_elem = 9 nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) ''' #",
"Meshes srcfield = create_ESMPfield(mesh, 'srcfield') dstfield = create_ESMPfieldgrid(grid, 'dstfield') dstfield2",
"''' # get the field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field)",
"3.0,2.5, 1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0]) ''' # this is for",
"CENTERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CENTER) #",
"range(exLB_center[0], exUB_center[0]): gridXCenter[p] = float(i0)*cellwidth_x + cellwidth_x/2.0 gridYCenter[p] = float(i1)*cellwidth_y",
"= 20.0+xc*yc+yc**2 #print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p]) p = p +",
"# compare results and output PASS or FAIL ok =",
"= [0, 1] gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter =",
"a new ESMP_Field. POSTCONDITIONS: An ESMP_Field has been created. '''",
"lb_x = float(0) lb_y = float(0) max_x = float(4) max_y",
"ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle) ESMP.ESMP_FieldRegridRelease(routehandle) return dstfield def compare_fields(field1, field2): '''",
"point values correct = True totalErr = 0.0 for i",
"field def build_analyticfieldgrid(field, grid): ''' PRECONDITIONS: An ESMP_Field has been",
"print \"field1 = {0} : field2 = {1}\\n\".format(field1ptr[i], field2ptr[i]) totalErr",
"nodeCoord[2*i+1] #print '[{0},{1}]'.format(x, y) #print '\\n' return mesh, nodeCoord, elemType,",
"if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI): raise NameError(\"Cannot compute a non-constant analytic",
"= (y1+y2)/2.0 fieldPtr[i] = 20.0+x+y #fieldPtr[i] = 20.0+x*y+y**2 #print '[{0},{1}]",
"= nodeCoord[2*i+1] #print '[{0},{1}]'.format(x, y) #print '\\n' return mesh, nodeCoord,",
"offset = offset + 4 #print \"\\n\" return field def",
"been created and a regridding operation is desired from 'srcfield'",
"1.0 1.5 2.5 3.0 Node Ids at corners Element Ids",
"def create_ESMPfieldgrid(grid, name): ''' PRECONDITIONS: An ESMP_Grid has been created,",
"class TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def setUp(self): pass def test_test1(self): # create two",
"ESMP_Field objects are created, one on a Grid and the",
"on the Meshes srcfield = create_ESMPfieldgrid(grid, 'srcfield') dstfield = create_ESMPfield(mesh,",
"ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER) #print 'lower corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print",
"nodeCoord[2*i] y = nodeCoord[2*i+1] #print '[{0},{1}]'.format(x, y) #print '\\n' return",
"return field def create_ESMPfield(mesh, name): ''' PRECONDITIONS: An ESMP_Mesh has",
"is performed from the source to the destination Field. After",
"domain. \"\"\" import cdms2 import ESMP import numpy as _NP",
"mesh to grid # create ESMP_Field objects on the Meshes",
"p = p + 1 #print 'Grid corner coordinates:' p",
"Mesh 3.0 2.0 13 -------14 --------15--------16 | | | |",
"for i0 in range(exLB_corner[0], exUB_corner[0]): gridXCorner[p] = float(i0)*cellwidth_x gridYCorner[p] =",
"has been created. ''' ub_x = float(4) ub_y = float(4)",
"10 --------11--------12 | | | | | 4 | 5",
"get the data pointers for the fields field1ptr = ESMP.ESMP_FieldGetPtr(field1)",
"2.5,1.5, 4.0,1.5, 0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5, 0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0])",
"ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print 'lower corner bounds",
"field2 = {1}\\n\".format(field1ptr[i], field2ptr[i]) totalErr += err if correct: print",
"in range(exLB[0], exUB[0]): xc = gridXCoord[p] yc = gridYCoord[p] fieldPtr[p]",
"return mesh, nodeCoord, elemType, elemConn def create_ESMPmesh_3x3(): ''' PRECONDITIONS: ESMP",
"analytic function srcfield = build_analyticfield(srcfield, nodeCoord, elemType, elemConn) dstfield2 =",
"-------- 7-------- 8 | | | | | 1 |",
"| | 4 | 5 | 6 | | |",
"elemConn): ''' PRECONDITIONS: An ESMP_Field has been created. POSTCONDITIONS: The",
"False print \"ACCURACY ERROR - \"+str(err) print \"field1 = {0}",
"field = ESMP.ESMP_FieldCreateGrid(grid, name) return field def build_analyticfieldgrid(field, grid): '''",
"get the coordinate pointers and set the coordinates [x,y] =",
"| | | | | 1.0 0.0 1 ------- 2",
"in range(exLB_center[1], exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]): gridXCenter[p] =",
"ESMP_Fields have been created and a regridding operation is desired",
"staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CENTER) # get the",
"Element Ids in centers (Everything owned by PET 0) '''",
"''' PRECONDITIONS: An ESMP_Mesh has been created, and 'name' is",
"print \"\" # Spacer suite = unittest.TestLoader().loadTestsFromTestCase(TestESMP_GridToMeshRegridCsrv) unittest.TextTestRunner(verbosity = 1).run(suite)",
"non-constant analytic field for a mesh\\ with triangular elements!\") x1",
"= p + 1 #print '\\n' return grid def mesh_create_3x3(mesh):",
"''' field = ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return field def build_analyticfield(field,",
"ok = compare_fields(dstfield, dstfield2) # clean up ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2)",
"exUB_corner = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CORNER) # get the coordinate pointers",
"ESMP.ESMP_STAGGERLOC_CORNER) #print 'lower corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print 'upper corner",
"(x1+x2)/2.0 y = (y1+y2)/2.0 fieldPtr[i] = 20.0+x+y #fieldPtr[i] = 20.0+x*y+y**2",
"elemConn = create_ESMPmesh_3x3() ''' # this is for grid to",
"''' # set up a simple mesh num_node = 16",
"## CENTERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CENTER)",
"1] gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y,",
"(Everything owned by PET 0) ''' # set up a",
"7 | 8 | 9 | | | | |",
"now #print \"Mesh center coordinates\" offset = 0 for i",
"regridding operation is desired from 'srcfield' to 'dstfield'. POSTCONDITIONS: An",
"pointers and set the coordinates [x,y] = [0, 1] gridXCenter",
"'srcfield' and 'dstfield'. POSTCONDITIONS: The values on 'srcfield' and 'dstfield'",
"- \"+str(err) print \"field1 = {0} : field2 = {1}\\n\".format(field1ptr[i],",
"+= err if correct: print \" - PASS - Total",
"the regridding is completed, the destination Field is compared to",
"compare_fields(field1, field2): ''' PRECONDITIONS: Two ESMP_Fields have been created and",
"#print 'lower corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print 'upper corner bounds",
"coordinates:' for i in range(num_node): x = nodeCoord[2*i] y =",
"p + 1 #print '\\n' return grid def mesh_create_3x3(mesh): '''",
"def build_analyticfieldgrid(field, grid): ''' PRECONDITIONS: An ESMP_Field has been created.",
"23:00:14 rokuingh Exp $ #=============================================================================== # ESMP/examples/ESMP_GridToMeshRegrid.py #=============================================================================== \"\"\" ESMP_GridToMeshRegridCsrv.py",
"ESMP_Field has been created. POSTCONDITIONS: The 'field' has been initialized",
"ERROR - \"+str(err) print \"field1 = {0} : field2 =",
"# this is for grid to mesh # create ESMP_Field",
"ESMP_Field objects on the Meshes srcfield = create_ESMPfieldgrid(grid, 'srcfield') dstfield",
"''' PRECONDITIONS: An ESMP_Grid has been created, and 'name' is",
"elemConn def create_ESMPfieldgrid(grid, name): ''' PRECONDITIONS: An ESMP_Grid has been",
"1] gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y,",
"new ESMP_Field. POSTCONDITIONS: An ESMP_Field has been created. ''' #",
"and 'name' is a string that will be used to",
"bounds and coordinate pointers exLB, exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER) #",
"abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i]) if err > .06: correct = False",
"initialize the Fields to an analytic function srcfield = build_analyticfield(srcfield,",
"PRECONDITIONS: An ESMP_Grid has been created, and 'name' is a",
"range(exLB_corner[1], exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]): gridXCorner[p] = float(i0)*cellwidth_x",
"'Grid center coordinates:' p = 0 for i1 in range(exLB_center[1],",
"5 | 6 | | | | | 1.5 0.5",
"mesh = ESMP.ESMP_MeshCreate(2,2) mesh, nodeCoord, elemType, elemConn = mesh_create_3x3(mesh) return",
"name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return field def build_analyticfield(field, nodeCoord, elemType, elemConn): '''",
"created and a comparison of the the values is desired",
"# set the field to a vanilla initial field for",
"# compare point values of field1 to field2 # first",
"elemConn = _NP.array([0,1,5,4, 1,2,6,5, 2,3,7,6, 4,5,9,8, 5,6,10,9, 6,7,11,10, 8,9,13,12, 9,10,14,13,",
"Total Error = \"+str(totalErr) return True else: print \" -",
"for grid to mesh # create ESMP_Field objects on the",
"to True, and check for False point values correct =",
"= nodeCoord[2*i] y = nodeCoord[2*i+1] #print '[{0},{1}]'.format(x, y) #print '\\n'",
"2.5 3.0 Node Ids at corners Element Ids in centers",
"field def create_ESMPfield(mesh, name): ''' PRECONDITIONS: An ESMP_Mesh has been",
"have been created and a comparison of the the values",
"field1ptr = ESMP.ESMP_FieldGetPtr(field1) field2ptr = ESMP.ESMP_FieldGetPtr(field2) # compare point values",
"0) ''' # set up a simple mesh num_node =",
"_NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32) elemType = _NP.ones(num_elem, dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn = _NP.array([0,1,5,4,",
"= create_ESMPfieldgrid(grid, 'dstfield_exact') # initialize the Fields to an analytic",
"grid) # run the ESMF regridding dstfield = run_regridding(srcfield, dstfield)",
"'__main__': ESMP.ESMP_LogSet(True) print \"\" # Spacer suite = unittest.TestLoader().loadTestsFromTestCase(TestESMP_GridToMeshRegridCsrv) unittest.TextTestRunner(verbosity",
"used to initialize the name of a new ESMP_Field. POSTCONDITIONS:",
"has set the data on 'dstfield'. ''' # call the",
"field for a mesh\\ with triangular elements!\") x1 = nodeCoord[(elemConn[offset])*2]",
"been created and a comparison of the the values is",
"PRECONDITIONS: ESMP has been initialized. POSTCONDITIONS: A ESMP_Grid has been",
"- field2ptr[i])/abs(field2ptr[i]) if err > .06: correct = False print",
"#!/usr/bin/env python # # $Id: ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14 rokuingh",
"name of a new ESMP_Field. POSTCONDITIONS: An ESMP_Field has been",
"of a new ESMP_Field. POSTCONDITIONS: An ESMP_Field has been created.",
"a regridding operation is desired from 'srcfield' to 'dstfield'. POSTCONDITIONS:",
"nodeCoord, elemType, elemConn def create_ESMPmesh_3x3(): ''' PRECONDITIONS: ESMP is initialized.",
"for mesh to grid # create ESMP_Field objects on the",
"cdms2 import ESMP import numpy as _NP import unittest def",
"2.5,2.5, 4.0,2.5, 0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0]) nodeOwner = _NP.zeros(num_node, dtype=_NP.int32)",
"ESMP_Grid has been created. ''' ub_x = float(4) ub_y =",
"p + 1 #print 'Grid center coordinates:' p = 0",
"= grid_create() mesh, nodeCoord, elemType, elemConn = create_ESMPmesh_3x3() ''' #",
"float(i0)*cellwidth_x gridYCorner[p] = float(i1)*cellwidth_y p = p + 1 #print",
"has been created and returned as 'mesh'. ''' # Two",
"dtype=_NP.int32) grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART) ## CORNERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner,",
"Two ESMP_Field objects are created, one on a Grid and",
"ESMP.ESMP_STAGGERLOC_CENTER) #print \"Grid center coordinates\" p = 0 for i1",
"(max_y-min_y)/(ub_y-lb_y) cellcenter_x = cellwidth_x/2 cellcenter_y = cellwidth_y/2 maxIndex = _NP.array([ub_x,ub_y],",
"Error = \"+str(totalErr) return True else: print \" - FAIL",
"create_ESMPfieldgrid(grid, name): ''' PRECONDITIONS: An ESMP_Grid has been created, and",
"POSTCONDITIONS: An ESMP_Field has been created. ''' field = ESMP.ESMP_FieldCreate(mesh,",
"2.5,0.0, 4.0,0.0, 0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5, 0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5,",
"4.0,4.0]) nodeOwner = _NP.zeros(num_node, dtype=_NP.int32) elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32) elemType",
"grid bounds and coordinate pointers exLB, exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER)",
"''' PRECONDITIONS: Two ESMP_Fields have been created and a comparison",
"ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok, True) if __name__ == '__main__': ESMP.ESMP_LogSet(True)",
"$ #=============================================================================== # ESMP/examples/ESMP_GridToMeshRegrid.py #=============================================================================== \"\"\" ESMP_GridToMeshRegridCsrv.py Two ESMP_Field objects",
"# first verify they are the same size if (field1.size",
"0.0 1 ------- 2 -------- 3-------- 4 0.0 0.5 1.5",
"if the fileds are comparable (success) ''' # get the",
"grid_create() mesh, nodeCoord, elemType, elemConn = create_ESMPmesh_3x3() ''' # this",
"# create two unique ESMP_Mesh objects grid = grid_create() mesh,",
"been initialized. POSTCONDITIONS: A ESMP_Grid has been created. ''' ub_x",
"Two ESMP_Fields have been created and a comparison of the",
"a Grid and the other on a Mesh. The source",
"= _NP.array([ub_x,ub_y], dtype=_NP.int32) grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART) ## CORNERS ESMP.ESMP_GridAddCoord(grid,",
"1 | 2 | 3 | | | | |",
"are created, one on a Grid and the other on",
"#print 'Grid corner coordinates:' p = 0 for i1 in",
"grid_create(): ''' PRECONDITIONS: ESMP has been initialized. POSTCONDITIONS: A ESMP_Grid",
"first verify they are the same size if (field1.size !=",
"0) # set the field to a vanilla initial field",
"fieldPtr = ESMP.ESMP_FieldGetPtr(field) # get the grid bounds and coordinate",
"An ESMP_Mesh has been created, and 'name' is a string",
"mesh num_node = 16 num_elem = 9 nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])",
"the fields field1ptr = ESMP.ESMP_FieldGetPtr(field1) field2ptr = ESMP.ESMP_FieldGetPtr(field2) # compare",
"ESMP_Mesh has been declared. POSTCONDITIONS: A 3x3 ESMP_Mesh has been",
"3x3 Mesh 3.0 2.0 13 -------14 --------15--------16 | | |",
"coordSys=ESMP.ESMP_COORDSYS_CART) ## CORNERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid, \\",
"and set the coordinates [x,y] = [0, 1] gridXCoord =",
"= gridXCoord[p] yc = gridYCoord[p] fieldPtr[p] = 20.0+xc+yc #fieldPtr[p] =",
"#print \"Mesh center coordinates\" offset = 0 for i in",
"get the field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field) # get",
"#print '\\n' ## CENTERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid,",
"in range(exLB_corner[1], exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]): #print '[{0},{1}]'.format(gridXCorner[p],",
"grid nodeCoord = _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0, 0.0,1.5, 1.5,1.5, 2.5,1.5,",
"#print \"\\n\" return field def run_regridding(srcfield, dstfield): ''' PRECONDITIONS: Two",
"5 ------- 6 -------- 7-------- 8 | | | |",
"is for grid to mesh # create ESMP_Field objects on",
"srcfield = build_analyticfieldgrid(srcfield, grid) dstfield2 = build_analyticfield(dstfield2, nodeCoord, elemType, elemConn)",
"p = p + 1 #print \"\\n\" return field def",
"is completed, the destination Field is compared to the exact",
"def create_ESMPfield(mesh, name): ''' PRECONDITIONS: An ESMP_Mesh has been created,",
"are comparable (success) ''' # get the data pointers for",
"POSTCONDITIONS: A 3x3 ESMP_Mesh has been created. 3x3 Mesh 3.0",
"3 | | | | | 1.0 0.0 1 -------",
"y = nodeCoord[2*i+1] #print '[{0},{1}]'.format(x, y) #print '\\n' return mesh,",
"range(exLB_center[1], exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]): gridXCenter[p] = float(i0)*cellwidth_x",
"an analytic function srcfield = build_analyticfield(srcfield, nodeCoord, elemType, elemConn) dstfield2",
"to the destination Field. After the regridding is completed, the",
"lb_y = float(0) max_x = float(4) max_y = float(4) min_x",
"CORNERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CORNER) #",
"the coordinates [x,y] = [0, 1] gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x,",
"pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field) # get the grid bounds",
"'name' is a string that will be used to initialize",
"an analytic field. ''' # get the field pointer first",
"srcfield = build_analyticfield(srcfield, nodeCoord, elemType, elemConn) dstfield2 = build_analyticfieldgrid(dstfield2, grid)",
"4 0.0 0.5 1.5 2.0 1.0 1.5 2.5 3.0 Node",
"\"Grid center coordinates\" p = 0 for i1 in range(exLB[1],",
"= build_analyticfield(srcfield, nodeCoord, elemType, elemConn) dstfield2 = build_analyticfieldgrid(dstfield2, grid) #",
"get the field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0) #",
"yc = gridYCoord[p] fieldPtr[p] = 20.0+xc+yc #fieldPtr[p] = 20.0+xc*yc+yc**2 #print",
"to a vanilla initial field for now #print \"Mesh center",
"gridXCoord[p] yc = gridYCoord[p] fieldPtr[p] = 20.0+xc+yc #fieldPtr[p] = 20.0+xc*yc+yc**2",
"call the regridding functions routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR)",
"#print '[{0},{1}]'.format(x, y) #print '\\n' return mesh, nodeCoord, elemType, elemConn",
"return dstfield def compare_fields(field1, field2): ''' PRECONDITIONS: Two ESMP_Fields have",
"compare point values of field1 to field2 # first verify",
"to an analytic field. ''' # get the field pointer",
"= [{0},{1}]'.format(exUB_center[0],exUB_center[1]) p = 0 for i1 in range(exLB_center[1], exUB_center[1]):",
"grid to mesh nodeCoord = _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0, 1.0,1.5,",
"nodeOwner = _NP.zeros(num_node, dtype=_NP.int32) elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32) elemType =",
"maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32) grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART) ## CORNERS",
"the name of a new ESMP_Field. POSTCONDITIONS: An ESMP_Field has",
"triangular elements!\") x1 = nodeCoord[(elemConn[offset])*2] x2 = nodeCoord[(elemConn[offset+1])*2] y1 =",
"elemType, elemConn) dstfield2 = build_analyticfieldgrid(dstfield2, grid) # run the ESMF",
"''' PRECONDITIONS: ESMP has been initialized. POSTCONDITIONS: A ESMP_Grid has",
"ESMP import numpy as _NP import unittest def grid_create(): '''",
"= ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print \"Grid center coordinates\" p =",
"for i1 in range(exLB[1], exUB[1]): for i0 in range(exLB[0], exUB[0]):",
"''' # get the field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field,",
"i1 in range(exLB_corner[1], exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]): gridXCorner[p]",
"13 -------14 --------15--------16 | | | | | 7 |",
"= ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers and set",
"y, ESMP.ESMP_STAGGERLOC_CENTER) #print \"Grid center coordinates\" p = 0 for",
"returns True if the fileds are comparable (success) ''' #",
"point values of field1 to field2 # first verify they",
"Fields must be the same size!') # initialize to True,",
"Total Error = \"+str(totalErr) return False class TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def setUp(self):",
"pointers for the fields field1ptr = ESMP.ESMP_FieldGetPtr(field1) field2ptr = ESMP.ESMP_FieldGetPtr(field2)",
"'srcfield') dstfield = create_ESMPfield(mesh, 'dstfield') dstfield2 = create_ESMPfield(mesh, 'dstfield_exact') #",
"grid # create ESMP_Field objects on the Meshes srcfield =",
"gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print \"Grid center coordinates\" p",
"offset + 4 #print \"\\n\" return field def run_regridding(srcfield, dstfield):",
"= _NP.zeros(num_node, dtype=_NP.int32) elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32) elemType = _NP.ones(num_elem,",
"run_regridding(srcfield, dstfield) # compare results and output PASS or FAIL",
"mesh nodeCoord = _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0, 1.0,1.5, 1.5,1.5, 2.5,1.5,",
"compare_fields(dstfield, dstfield2) # clean up ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh)",
"2012/04/23 23:00:14 rokuingh Exp $ #=============================================================================== # ESMP/examples/ESMP_GridToMeshRegrid.py #=============================================================================== \"\"\"",
"range(field.size): # this routine assumes this field is on elements",
"ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14 rokuingh Exp $ #=============================================================================== # ESMP/examples/ESMP_GridToMeshRegrid.py",
"= 0 for i1 in range(exLB[1], exUB[1]): for i0 in",
"def mesh_create_3x3(mesh): ''' PRECONDITIONS: An ESMP_Mesh has been declared. POSTCONDITIONS:",
"'[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p]) p = p + 1 #print \"\\n\"",
"run_regridding(srcfield, dstfield): ''' PRECONDITIONS: Two ESMP_Fields have been created and",
"if __name__ == '__main__': ESMP.ESMP_LogSet(True) print \"\" # Spacer suite",
"ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return field def build_analyticfield(field, nodeCoord, elemType, elemConn):",
"0.5 1.5 2.0 1.0 1.5 2.5 3.0 Node Ids at",
"# this is for grid to mesh nodeCoord = _NP.array([1.0,1.0,",
"4,5,9,8, 5,6,10,9, 6,7,11,10, 8,9,13,12, 9,10,14,13, 10,11,15,14], dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print",
"3.0,1.5, 1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5, 1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0]) '''",
"create_ESMPfield(mesh, name): ''' PRECONDITIONS: An ESMP_Mesh has been created, and",
"operation has set the data on 'dstfield'. ''' # call",
"for the fields field1ptr = ESMP.ESMP_FieldGetPtr(field1) field2ptr = ESMP.ESMP_FieldGetPtr(field2) #",
"create ESMP_Field objects on the Meshes srcfield = create_ESMPfield(mesh, 'srcfield')",
"#fieldPtr[p] = 20.0+xc*yc+yc**2 #print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p]) p = p",
"= ESMP.ESMP_FieldCreateGrid(grid, name) return field def build_analyticfieldgrid(field, grid): ''' PRECONDITIONS:",
"Exp $ #=============================================================================== # ESMP/examples/ESMP_GridToMeshRegrid.py #=============================================================================== \"\"\" ESMP_GridToMeshRegridCsrv.py Two ESMP_Field",
"ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print 'Mesh coordinates:' for i in range(num_node): x =",
"is initialized. POSTCONDITIONS: An ESMP_Mesh (3x3) has been created and",
"ESMP_Fields have been created and a comparison of the the",
"created. POSTCONDITIONS: The 'field' has been initialized to an analytic",
"float(0) lb_y = float(0) max_x = float(4) max_y = float(4)",
"cellcenter_x = cellwidth_x/2 cellcenter_y = cellwidth_y/2 maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32)",
"regridding operation has set the data on 'dstfield'. ''' #",
"been declared. POSTCONDITIONS: A 3x3 ESMP_Mesh has been created. 3x3",
"initialized. POSTCONDITIONS: A ESMP_Grid has been created. ''' ub_x =",
"PASS - Total Error = \"+str(totalErr) return True else: print",
"fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0) # set the field to a",
"'dstfield_exact') # initialize the Fields to an analytic function srcfield",
"has been created. ''' field = ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return",
"to an analytic function srcfield = build_analyticfieldgrid(srcfield, grid) dstfield2 =",
"pointers and set the coordinates [x,y] = [0, 1] gridXCoord",
"Fields to an analytic function srcfield = build_analyticfield(srcfield, nodeCoord, elemType,",
"= float(4) min_x = float(0) min_y = float(0) cellwidth_x =",
"| | | | | 1 | 2 | 3",
"been created, and 'name' is a string that will be",
"objects on the Meshes srcfield = create_ESMPfieldgrid(grid, 'srcfield') dstfield =",
"2.5,1.5, 3.0,1.5, 1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5, 1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0])",
"elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn = _NP.array([0,1,5,4, 1,2,6,5, 2,3,7,6, 4,5,9,8, 5,6,10,9, 6,7,11,10, 8,9,13,12,",
"y, ESMP.ESMP_STAGGERLOC_CENTER) #print 'lower corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print 'upper",
"print \" - FAIL - Total Error = \"+str(totalErr) return",
"2.5,1.0, 3.0,1.0, 1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5, 1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5,",
"_NP.array([0,1,5,4, 1,2,6,5, 2,3,7,6, 4,5,9,8, 5,6,10,9, 6,7,11,10, 8,9,13,12, 9,10,14,13, 10,11,15,14], dtype=_NP.int32)",
"True totalErr = 0.0 for i in range(field1.size): err =",
"9 ------- 10 --------11--------12 | | | | | 4",
"an analytic function, and a conservative regridding operation is performed",
"- Total Error = \"+str(totalErr) return False class TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def",
"= nodeCoord[(elemConn[offset])*2] x2 = nodeCoord[(elemConn[offset+1])*2] y1 = nodeCoord[(elemConn[offset+1])*2+1] y2 =",
"grid to mesh # create ESMP_Field objects on the Meshes",
"to an analytic function, and a conservative regridding operation is",
"and returned as 'mesh'. ''' # Two parametric dimensions, and",
"field is on elements if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI): raise NameError(\"Cannot",
"\"\"\" import cdms2 import ESMP import numpy as _NP import",
"comparable (success) ''' # get the data pointers for the",
"<filename>testing/regrid/testEsmfGridToMeshRegridCsrv.py #!/usr/bin/env python # # $Id: ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14",
"and coordinate pointers exLB, exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER) # get",
"staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CORNER) # get the",
"''' # this is for grid to mesh nodeCoord =",
"regridding functions routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield, dstfield,",
"- PASS - Total Error = \"+str(totalErr) return True else:",
"print \" - PASS - Total Error = \"+str(totalErr) return",
"'[{0},{1}]'.format(x, y) #print '\\n' return mesh, nodeCoord, elemType, elemConn def",
"elemType, elemConn def create_ESMPfieldgrid(grid, name): ''' PRECONDITIONS: An ESMP_Grid has",
"has been created. POSTCONDITIONS: The 'field' has been initialized to",
"ESMP.ESMP_MeshCreate(2,2) mesh, nodeCoord, elemType, elemConn = mesh_create_3x3(mesh) return mesh, nodeCoord,",
"and set the coordinates [x,y] = [0, 1] gridXCenter =",
"Field is compared to the exact solution over that domain.",
"exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]): #print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p]) p",
"= 0 for i in range(field.size): # this routine assumes",
"| | 1.5 0.5 5 ------- 6 -------- 7-------- 8",
"on 'srcfield' and 'dstfield' are compared. returns True if the",
"to 'dstfield'. POSTCONDITIONS: An ESMP regridding operation has set the",
"create_ESMPfieldgrid(grid, 'dstfield') dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact') # initialize the Fields",
"solution over that domain. \"\"\" import cdms2 import ESMP import",
"field2 # first verify they are the same size if",
"the exact solution over that domain. \"\"\" import cdms2 import",
"as _NP import unittest def grid_create(): ''' PRECONDITIONS: ESMP has",
"x2 = nodeCoord[(elemConn[offset+1])*2] y1 = nodeCoord[(elemConn[offset+1])*2+1] y2 = nodeCoord[(elemConn[offset+3])*2+1] x",
"the Meshes srcfield = create_ESMPfieldgrid(grid, 'srcfield') dstfield = create_ESMPfield(mesh, 'dstfield')",
"num_node = 16 num_elem = 9 nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) '''",
"= float(0) cellwidth_x = (max_x-min_x)/(ub_x-lb_x) cellwidth_y = (max_y-min_y)/(ub_y-lb_y) cellcenter_x =",
"elements!\") x1 = nodeCoord[(elemConn[offset])*2] x2 = nodeCoord[(elemConn[offset+1])*2] y1 = nodeCoord[(elemConn[offset+1])*2+1]",
"y1 = nodeCoord[(elemConn[offset+1])*2+1] y2 = nodeCoord[(elemConn[offset+3])*2+1] x = (x1+x2)/2.0 y",
"= build_analyticfield(dstfield2, nodeCoord, elemType, elemConn) ''' # this is for",
"elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32) elemType = _NP.ones(num_elem, dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn",
"= ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER) #print 'lower corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1])",
"'mesh'. ''' # Two parametric dimensions, and three spatial dimensions",
"and a conservative regridding operation is performed from the source",
"cellwidth_y = (max_y-min_y)/(ub_y-lb_y) cellcenter_x = cellwidth_x/2 cellcenter_y = cellwidth_y/2 maxIndex",
"set up a simple mesh num_node = 16 num_elem =",
"coordinate pointers and set the coordinates [x,y] = [0, 1]",
"-------- 3-------- 4 0.0 0.5 1.5 2.0 1.0 1.5 2.5",
"Field is set to an analytic function, and a conservative",
"FAIL ok = compare_fields(dstfield, dstfield2) # clean up ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield)",
"exUB_center[0]): #print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p]) p = p + 1 #print",
"exUB_corner[0]): #print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p]) p = p + 1 #print",
"the coordinates [x,y] = [0, 1] gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x,",
"analytic function, and a conservative regridding operation is performed from",
"float(i1)*cellwidth_y + cellwidth_y/2.0 p = p + 1 #print 'Grid",
"this is for grid to mesh # create ESMP_Field objects",
"def create_ESMPmesh_3x3(): ''' PRECONDITIONS: ESMP is initialized. POSTCONDITIONS: An ESMP_Mesh",
"'dstfield') dstfield2 = create_ESMPfield(mesh, 'dstfield_exact') # initialize the Fields to",
"= nodeCoord[(elemConn[offset+1])*2+1] y2 = nodeCoord[(elemConn[offset+3])*2+1] x = (x1+x2)/2.0 y =",
"def grid_create(): ''' PRECONDITIONS: ESMP has been initialized. POSTCONDITIONS: A",
"ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok, True) if __name__ == '__main__': ESMP.ESMP_LogSet(True) print \"\"",
"0.5 5 ------- 6 -------- 7-------- 8 | | |",
"p = p + 1 #print '\\n' ## CENTERS ESMP.ESMP_GridAddCoord(grid,",
"parametric dimensions, and three spatial dimensions mesh = ESMP.ESMP_MeshCreate(2,2) mesh,",
"9,10,14,13, 10,11,15,14], dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print 'Mesh coordinates:' for i",
"string that will be used to initialize the name of",
"pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0) # set the field",
"Ids at corners Element Ids in centers (Everything owned by",
"data on 'dstfield'. ''' # call the regridding functions routehandle",
"mesh, nodeCoord, elemType, elemConn def create_ESMPfieldgrid(grid, name): ''' PRECONDITIONS: An",
"gridXCenter[p] = float(i0)*cellwidth_x + cellwidth_x/2.0 gridYCenter[p] = float(i1)*cellwidth_y + cellwidth_y/2.0",
"#print '\\n' return grid def mesh_create_3x3(mesh): ''' PRECONDITIONS: An ESMP_Mesh",
"created and returned as 'mesh'. ''' # Two parametric dimensions,",
"in range(exLB_corner[0], exUB_corner[0]): #print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p]) p = p +",
"An ESMP_Field has been created. POSTCONDITIONS: The 'field' has been",
"been created. ''' field = ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return field",
"# this routine assumes this field is on elements if",
"same size if (field1.size != field2.size): raise NameError('compare_fields: Fields must",
"'lower corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print 'upper corner bounds =",
"to initialize the name of a new ESMP_Field. POSTCONDITIONS: An",
"and the other on a Mesh. The source Field is",
"i1 in range(exLB[1], exUB[1]): for i0 in range(exLB[0], exUB[0]): xc",
"ESMP.ESMP_FieldRegridRelease(routehandle) return dstfield def compare_fields(field1, field2): ''' PRECONDITIONS: Two ESMP_Fields",
"vanilla initial field for now #print \"Mesh center coordinates\" offset",
"size if (field1.size != field2.size): raise NameError('compare_fields: Fields must be",
"for i0 in range(exLB_center[0], exUB_center[0]): gridXCenter[p] = float(i0)*cellwidth_x + cellwidth_x/2.0",
"nodeCoord, elemType, elemConn def create_ESMPfieldgrid(grid, name): ''' PRECONDITIONS: An ESMP_Grid",
"is desired from 'srcfield' to 'dstfield'. POSTCONDITIONS: An ESMP regridding",
"ESMP.ESMP_LogSet(True) print \"\" # Spacer suite = unittest.TestLoader().loadTestsFromTestCase(TestESMP_GridToMeshRegridCsrv) unittest.TextTestRunner(verbosity =",
"\\ ESMP.ESMP_STAGGERLOC_CORNER) # get the coordinate pointers and set the",
"operation is performed from the source to the destination Field.",
"#print '\\n' return mesh, nodeCoord, elemType, elemConn def create_ESMPmesh_3x3(): '''",
"1 #print '\\n' return grid def mesh_create_3x3(mesh): ''' PRECONDITIONS: An",
"unique ESMP_Mesh objects grid = grid_create() mesh, nodeCoord, elemType, elemConn",
"an analytic function srcfield = build_analyticfieldgrid(srcfield, grid) dstfield2 = build_analyticfield(dstfield2,",
"ub_y = float(4) lb_x = float(0) lb_y = float(0) max_x",
"values is desired between 'srcfield' and 'dstfield'. POSTCONDITIONS: The values",
"| | | 1 | 2 | 3 | |",
"set the coordinates [x,y] = [0, 1] gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid,",
"1 #print '\\n' ## CENTERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center, exUB_center =",
"i0 in range(exLB_center[0], exUB_center[0]): gridXCenter[p] = float(i0)*cellwidth_x + cellwidth_x/2.0 gridYCenter[p]",
"compared to the exact solution over that domain. \"\"\" import",
"1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5, 1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5, 1.0,3.0, 1.5,3.0,",
"compute a non-constant analytic field for a mesh\\ with triangular",
"#=============================================================================== # ESMP/examples/ESMP_GridToMeshRegrid.py #=============================================================================== \"\"\" ESMP_GridToMeshRegridCsrv.py Two ESMP_Field objects are",
"9 nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) ''' # this is for grid",
"field def run_regridding(srcfield, dstfield): ''' PRECONDITIONS: Two ESMP_Fields have been",
"= 0 for i1 in range(exLB_corner[1], exUB_corner[1]): for i0 in",
"6 -------- 7-------- 8 | | | | | 1",
"objects are created, one on a Grid and the other",
"a conservative regridding operation is performed from the source to",
"# this is for mesh to grid nodeCoord = _NP.array([0.0,0.0,",
"20.0+xc+yc #fieldPtr[p] = 20.0+xc*yc+yc**2 #print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p]) p =",
"2.5,2.5, 3.0,2.5, 1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0]) ''' # this is",
"first fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0) # set the field to",
"created and a regridding operation is desired from 'srcfield' to",
"''' # call the regridding functions routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield,",
"> .06: correct = False print \"ACCURACY ERROR - \"+str(err)",
"build_analyticfield(srcfield, nodeCoord, elemType, elemConn) dstfield2 = build_analyticfieldgrid(dstfield2, grid) # run",
"[x,y] = [0, 1] gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord",
"3.0,3.0]) ''' # this is for mesh to grid nodeCoord",
"7-------- 8 | | | | | 1 | 2",
"| 2.5 1.5 9 ------- 10 --------11--------12 | | |",
"Grid and the other on a Mesh. The source Field",
"set to an analytic function, and a conservative regridding operation",
"initialized. POSTCONDITIONS: An ESMP_Mesh (3x3) has been created and returned",
"x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print 'lower corner",
"| | 2.5 1.5 9 ------- 10 --------11--------12 | |",
"corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1]) p = 0 for i1 in",
": field2 = {1}\\n\".format(field1ptr[i], field2ptr[i]) totalErr += err if correct:",
"range(exLB_corner[1], exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]): #print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p])",
"the Fields to an analytic function srcfield = build_analyticfield(srcfield, nodeCoord,",
"Field. After the regridding is completed, the destination Field is",
"operation is desired from 'srcfield' to 'dstfield'. POSTCONDITIONS: An ESMP",
"= {2}'.format(xc,yc,fieldPtr[p]) p = p + 1 #print \"\\n\" return",
"= float(0) max_x = float(4) max_y = float(4) min_x =",
"mesh # create ESMP_Field objects on the Meshes srcfield =",
"build_analyticfield(dstfield2, nodeCoord, elemType, elemConn) ''' # this is for mesh",
"exLB, exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers",
"_NP import unittest def grid_create(): ''' PRECONDITIONS: ESMP has been",
"| | | | | 7 | 8 | 9",
"4 | 5 | 6 | | | | |",
"# get the grid bounds and coordinate pointers exLB, exUB",
"nodeCoord[(elemConn[offset])*2] x2 = nodeCoord[(elemConn[offset+1])*2] y1 = nodeCoord[(elemConn[offset+1])*2+1] y2 = nodeCoord[(elemConn[offset+3])*2+1]",
"for i in range(num_node): x = nodeCoord[2*i] y = nodeCoord[2*i+1]",
"two unique ESMP_Mesh objects grid = grid_create() mesh, nodeCoord, elemType,",
"build_analyticfieldgrid(srcfield, grid) dstfield2 = build_analyticfield(dstfield2, nodeCoord, elemType, elemConn) ''' #",
"# create ESMP_Field objects on the Meshes srcfield = create_ESMPfield(mesh,",
"= offset + 4 #print \"\\n\" return field def run_regridding(srcfield,",
"1 #print \"\\n\" return field def create_ESMPfield(mesh, name): ''' PRECONDITIONS:",
"------- 2 -------- 3-------- 4 0.0 0.5 1.5 2.0 1.0",
"the values is desired between 'srcfield' and 'dstfield'. POSTCONDITIONS: The",
"''' PRECONDITIONS: An ESMP_Mesh has been declared. POSTCONDITIONS: A 3x3",
"= (max_x-min_x)/(ub_x-lb_x) cellwidth_y = (max_y-min_y)/(ub_y-lb_y) cellcenter_x = cellwidth_x/2 cellcenter_y =",
"exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate",
"\"field1 = {0} : field2 = {1}\\n\".format(field1ptr[i], field2ptr[i]) totalErr +=",
"= float(i0)*cellwidth_x gridYCorner[p] = float(i1)*cellwidth_y p = p + 1",
"| | | | | 1.5 0.5 5 ------- 6",
"= create_ESMPfield(mesh, 'dstfield') dstfield2 = create_ESMPfield(mesh, 'dstfield_exact') # initialize the",
"1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5, 1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0]) ''' #",
"PET 0) ''' # set up a simple mesh num_node",
"6 | | | | | 1.5 0.5 5 -------",
"gridYCorner[p]) p = p + 1 #print '\\n' ## CENTERS",
"field1 to field2 # first verify they are the same",
"field2ptr[i]) totalErr += err if correct: print \" - PASS",
"will be used to initialize the name of a new",
"4.0,0.0, 0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5, 0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5, 0.0,4.0,",
"| 4 | 5 | 6 | | | |",
"= {1}\\n\".format(field1ptr[i], field2ptr[i]) totalErr += err if correct: print \"",
"= create_ESMPmesh_3x3() ''' # this is for grid to mesh",
"in range(field.size): # this routine assumes this field is on",
"'srcfield' to 'dstfield'. POSTCONDITIONS: An ESMP regridding operation has set",
"this is for mesh to grid nodeCoord = _NP.array([0.0,0.0, 1.5,0.0,",
"build_analyticfield(field, nodeCoord, elemType, elemConn): ''' PRECONDITIONS: An ESMP_Field has been",
"created, one on a Grid and the other on a",
"| 2 | 3 | | | | | 1.0",
"on 'dstfield'. ''' # call the regridding functions routehandle =",
"values of field1 to field2 # first verify they are",
"dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact') # initialize the Fields to an",
"elemConn) ''' # this is for mesh to grid #",
"coordinates [x,y] = [0, 1] gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER)",
"python # # $Id: ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14 rokuingh Exp",
"is desired between 'srcfield' and 'dstfield'. POSTCONDITIONS: The values on",
"if err > .06: correct = False print \"ACCURACY ERROR",
"'upper corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1]) p = 0 for i1",
"must be the same size!') # initialize to True, and",
"nodeCoord = _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0, 0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5,",
"+ 1 #print 'Grid center coordinates:' p = 0 for",
"ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CENTER) # get",
"__name__ == '__main__': ESMP.ESMP_LogSet(True) print \"\" # Spacer suite =",
"the Fields to an analytic function srcfield = build_analyticfieldgrid(srcfield, grid)",
"After the regridding is completed, the destination Field is compared",
"# run the ESMF regridding dstfield = run_regridding(srcfield, dstfield) #",
"| | | | | 4 | 5 | 6",
"else: print \" - FAIL - Total Error = \"+str(totalErr)",
"up ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok, True) if __name__",
"3.0 2.0 13 -------14 --------15--------16 | | | | |",
"= [0, 1] gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord =",
"i1 in range(exLB_center[1], exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]): gridXCenter[p]",
"2.5,4.0, 4.0,4.0]) nodeOwner = _NP.zeros(num_node, dtype=_NP.int32) elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32)",
"i0 in range(exLB_center[0], exUB_center[0]): #print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p]) p = p",
"#print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p]) p = p + 1 #print '\\n'",
"mesh_create_3x3(mesh): ''' PRECONDITIONS: An ESMP_Mesh has been declared. POSTCONDITIONS: A",
"in range(exLB[1], exUB[1]): for i0 in range(exLB[0], exUB[0]): xc =",
"#print \"\\n\" return field def create_ESMPfield(mesh, name): ''' PRECONDITIONS: An",
"# get the field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0)",
"= ESMP.ESMP_FieldGetPtr(field, 0) # set the field to a vanilla",
"create_ESMPfield(mesh, 'dstfield') dstfield2 = create_ESMPfield(mesh, 'dstfield_exact') # initialize the Fields",
"xc = gridXCoord[p] yc = gridYCoord[p] fieldPtr[p] = 20.0+xc+yc #fieldPtr[p]",
"0 for i in range(field.size): # this routine assumes this",
"ESMP.ESMP_FieldDestroy(srcfield) ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok, True) if __name__ ==",
"and set the coordinates [x,y] = [0, 1] gridXCorner =",
"- Total Error = \"+str(totalErr) return True else: print \"",
"been created. POSTCONDITIONS: The 'field' has been initialized to an",
"p = p + 1 #print '\\n' return grid def",
"(max_x-min_x)/(ub_x-lb_x) cellwidth_y = (max_y-min_y)/(ub_y-lb_y) cellcenter_x = cellwidth_x/2 cellcenter_y = cellwidth_y/2",
"cellwidth_y/2.0 p = p + 1 #print 'Grid center coordinates:'",
"# ESMP/examples/ESMP_GridToMeshRegrid.py #=============================================================================== \"\"\" ESMP_GridToMeshRegridCsrv.py Two ESMP_Field objects are created,",
"gridYCoord[p] fieldPtr[p] = 20.0+xc+yc #fieldPtr[p] = 20.0+xc*yc+yc**2 #print '[{0},{1}] =",
"float(0) min_y = float(0) cellwidth_x = (max_x-min_x)/(ub_x-lb_x) cellwidth_y = (max_y-min_y)/(ub_y-lb_y)",
"\"ACCURACY ERROR - \"+str(err) print \"field1 = {0} : field2",
"from the source to the destination Field. After the regridding",
"| | 1.0 0.0 1 ------- 2 -------- 3-------- 4",
"3-------- 4 0.0 0.5 1.5 2.0 1.0 1.5 2.5 3.0",
"exUB[0]): xc = gridXCoord[p] yc = gridYCoord[p] fieldPtr[p] = 20.0+xc+yc",
"POSTCONDITIONS: The values on 'srcfield' and 'dstfield' are compared. returns",
"nodeCoord, elemType, elemConn) dstfield2 = build_analyticfieldgrid(dstfield2, grid) # run the",
"2.5 1.5 9 ------- 10 --------11--------12 | | | |",
"1.0 0.0 1 ------- 2 -------- 3-------- 4 0.0 0.5",
"ESMP_GridToMeshRegridCsrv.py Two ESMP_Field objects are created, one on a Grid",
"i in range(field.size): # this routine assumes this field is",
"= (x1+x2)/2.0 y = (y1+y2)/2.0 fieldPtr[i] = 20.0+x+y #fieldPtr[i] =",
"An ESMP regridding operation has set the data on 'dstfield'.",
"\" - PASS - Total Error = \"+str(totalErr) return True",
"raise NameError('compare_fields: Fields must be the same size!') # initialize",
"grid = grid_create() mesh, nodeCoord, elemType, elemConn = create_ESMPmesh_3x3() '''",
"in range(exLB_center[0], exUB_center[0]): #print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p]) p = p +",
"exUB_center = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers",
"= 20.0+x*y+y**2 #print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i]) offset = offset +",
"ESMP.ESMP_FieldRegridStore(srcfield, dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle) ESMP.ESMP_FieldRegridRelease(routehandle) return dstfield",
"exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]): gridXCorner[p] = float(i0)*cellwidth_x gridYCorner[p]",
"= _NP.array([0,1,5,4, 1,2,6,5, 2,3,7,6, 4,5,9,8, 5,6,10,9, 6,7,11,10, 8,9,13,12, 9,10,14,13, 10,11,15,14],",
"desired between 'srcfield' and 'dstfield'. POSTCONDITIONS: The values on 'srcfield'",
"PRECONDITIONS: An ESMP_Mesh has been declared. POSTCONDITIONS: A 3x3 ESMP_Mesh",
"= 0.0 for i in range(field1.size): err = abs(field1ptr[i] -",
"create_ESMPfieldgrid(grid, 'srcfield') dstfield = create_ESMPfield(mesh, 'dstfield') dstfield2 = create_ESMPfield(mesh, 'dstfield_exact')",
"''' # defaults to center staggerloc field = ESMP.ESMP_FieldCreateGrid(grid, name)",
"An ESMP_Grid has been created, and 'name' is a string",
"= ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CORNER) # get the coordinate pointers and",
"import ESMP import numpy as _NP import unittest def grid_create():",
"import numpy as _NP import unittest def grid_create(): ''' PRECONDITIONS:",
"for i0 in range(exLB_corner[0], exUB_corner[0]): #print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p]) p =",
"if correct: print \" - PASS - Total Error =",
"nodeCoord, elemType, elemConn) ''' # this is for mesh to",
"destination Field is compared to the exact solution over that",
"destination Field. After the regridding is completed, the destination Field",
"p = 0 for i1 in range(exLB_center[1], exUB_center[1]): for i0",
"| 8 | 9 | | | | | 2.5",
"Mesh. The source Field is set to an analytic function,",
"elemType, elemConn = mesh_create_3x3(mesh) return mesh, nodeCoord, elemType, elemConn def",
"field2ptr = ESMP.ESMP_FieldGetPtr(field2) # compare point values of field1 to",
"function srcfield = build_analyticfieldgrid(srcfield, grid) dstfield2 = build_analyticfield(dstfield2, nodeCoord, elemType,",
"dimensions mesh = ESMP.ESMP_MeshCreate(2,2) mesh, nodeCoord, elemType, elemConn = mesh_create_3x3(mesh)",
"desired from 'srcfield' to 'dstfield'. POSTCONDITIONS: An ESMP regridding operation",
"set the data on 'dstfield'. ''' # call the regridding",
"ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print 'lower corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print",
"8 | 9 | | | | | 2.5 1.5",
"ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers and set the",
"from 'srcfield' to 'dstfield'. POSTCONDITIONS: An ESMP regridding operation has",
"'upper corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p = 0 for i1",
"corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print 'upper corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1])",
"staggerloc field = ESMP.ESMP_FieldCreateGrid(grid, name) return field def build_analyticfieldgrid(field, grid):",
"ESMP.ESMP_STAGGERLOC_CENTER) #print 'lower corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print 'upper corner",
"'\\n' return mesh, nodeCoord, elemType, elemConn def create_ESMPmesh_3x3(): ''' PRECONDITIONS:",
"same size!') # initialize to True, and check for False",
"A 3x3 ESMP_Mesh has been created. 3x3 Mesh 3.0 2.0",
"has been created. ''' # defaults to center staggerloc field",
"+ cellwidth_x/2.0 gridYCenter[p] = float(i1)*cellwidth_y + cellwidth_y/2.0 p = p",
"cellcenter_y = cellwidth_y/2 maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32) grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex,",
"in range(exLB_corner[0], exUB_corner[0]): gridXCorner[p] = float(i0)*cellwidth_x gridYCorner[p] = float(i1)*cellwidth_y p",
"ESMP has been initialized. POSTCONDITIONS: A ESMP_Grid has been created.",
"p + 1 #print \"\\n\" return field def create_ESMPfield(mesh, name):",
"= 9 nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) ''' # this is for",
"have been created and a regridding operation is desired from",
"# $Id: ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14 rokuingh Exp $ #===============================================================================",
"has been declared. POSTCONDITIONS: A 3x3 ESMP_Mesh has been created.",
"''' # this is for mesh to grid # create",
"and 'dstfield' are compared. returns True if the fileds are",
"[x,y] = [0, 1] gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter",
"fileds are comparable (success) ''' # get the data pointers",
"(field1.size != field2.size): raise NameError('compare_fields: Fields must be the same",
"0 for i1 in range(exLB_corner[1], exUB_corner[1]): for i0 in range(exLB_corner[0],",
"i0 in range(exLB_corner[0], exUB_corner[0]): #print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p]) p = p",
"Two parametric dimensions, and three spatial dimensions mesh = ESMP.ESMP_MeshCreate(2,2)",
"== ESMP.ESMP_MESHELEMTYPE_TRI): raise NameError(\"Cannot compute a non-constant analytic field for",
"''' # this is for mesh to grid nodeCoord =",
"rokuingh Exp $ #=============================================================================== # ESMP/examples/ESMP_GridToMeshRegrid.py #=============================================================================== \"\"\" ESMP_GridToMeshRegridCsrv.py Two",
"ESMP_Mesh has been created, and 'name' is a string that",
"ESMP.ESMP_STAGGERLOC_CORNER) # get the coordinate pointers and set the coordinates",
"gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER)",
"ESMP_Mesh has been created. 3x3 Mesh 3.0 2.0 13 -------14",
"field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field) # get the grid",
"centers (Everything owned by PET 0) ''' # set up",
"''' # this is for grid to mesh # create",
"a comparison of the the values is desired between 'srcfield'",
"min_x = float(0) min_y = float(0) cellwidth_x = (max_x-min_x)/(ub_x-lb_x) cellwidth_y",
"1 #print 'Grid corner coordinates:' p = 0 for i1",
"(3x3) has been created and returned as 'mesh'. ''' #",
"range(exLB[0], exUB[0]): xc = gridXCoord[p] yc = gridYCoord[p] fieldPtr[p] =",
"# call the regridding functions routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield, regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE,",
"created. ''' # defaults to center staggerloc field = ESMP.ESMP_FieldCreateGrid(grid,",
"dstfield = create_ESMPfieldgrid(grid, 'dstfield') dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact') # initialize",
"performed from the source to the destination Field. After the",
"= _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0, 0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5, 0.0,2.5,",
"size!') # initialize to True, and check for False point",
"correct: print \" - PASS - Total Error = \"+str(totalErr)",
"+ 1 #print '\\n' ## CENTERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER) exLB_center, exUB_center",
"range(exLB_corner[0], exUB_corner[0]): #print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p]) p = p + 1",
"1.5,4.0, 2.5,4.0, 4.0,4.0]) nodeOwner = _NP.zeros(num_node, dtype=_NP.int32) elemId = _NP.array([1,2,3,4,5,6,7,8,9],",
"_NP.zeros(num_node, dtype=_NP.int32) elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32) elemType = _NP.ones(num_elem, dtype=_NP.int32)",
"False point values correct = True totalErr = 0.0 for",
"#print 'upper corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p = 0 for",
"ESMP_Mesh (3x3) has been created and returned as 'mesh'. '''",
"objects grid = grid_create() mesh, nodeCoord, elemType, elemConn = create_ESMPmesh_3x3()",
"defaults to center staggerloc field = ESMP.ESMP_FieldCreateGrid(grid, name) return field",
"a mesh\\ with triangular elements!\") x1 = nodeCoord[(elemConn[offset])*2] x2 =",
"cellwidth_y/2 maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32) grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART) ##",
"for now #print \"Mesh center coordinates\" offset = 0 for",
"nodeCoord[(elemConn[offset+1])*2] y1 = nodeCoord[(elemConn[offset+1])*2+1] y2 = nodeCoord[(elemConn[offset+3])*2+1] x = (x1+x2)/2.0",
"initialized to an analytic field. ''' # get the field",
"return field def run_regridding(srcfield, dstfield): ''' PRECONDITIONS: Two ESMP_Fields have",
"ESMP.ESMP_FieldDestroy(dstfield) ESMP.ESMP_FieldDestroy(dstfield2) ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok, True) if __name__ == '__main__':",
"_NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0, 0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5, 0.0,2.5, 1.5,2.5,",
"True, and check for False point values correct = True",
"declared. POSTCONDITIONS: A 3x3 ESMP_Mesh has been created. 3x3 Mesh",
"analytic function srcfield = build_analyticfieldgrid(srcfield, grid) dstfield2 = build_analyticfield(dstfield2, nodeCoord,",
"elemType, elemConn): ''' PRECONDITIONS: An ESMP_Field has been created. POSTCONDITIONS:",
"create_ESMPmesh_3x3() ''' # this is for grid to mesh #",
"initialize the Fields to an analytic function srcfield = build_analyticfieldgrid(srcfield,",
"three spatial dimensions mesh = ESMP.ESMP_MeshCreate(2,2) mesh, nodeCoord, elemType, elemConn",
"0.0 for i in range(field1.size): err = abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i])",
"| 1.5 0.5 5 ------- 6 -------- 7-------- 8 |",
"created. ''' field = ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return field def",
"num_elem = 9 nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) ''' # this is",
"unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle) ESMP.ESMP_FieldRegridRelease(routehandle) return dstfield def compare_fields(field1, field2):",
"to grid # create ESMP_Field objects on the Meshes srcfield",
"in centers (Everything owned by PET 0) ''' # set",
"(y1+y2)/2.0 fieldPtr[i] = 20.0+x+y #fieldPtr[i] = 20.0+x*y+y**2 #print '[{0},{1}] =",
"gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER)",
"ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER) #print 'lower",
"unittest def grid_create(): ''' PRECONDITIONS: ESMP has been initialized. POSTCONDITIONS:",
"is compared to the exact solution over that domain. \"\"\"",
"the other on a Mesh. The source Field is set",
"float(4) max_y = float(4) min_x = float(0) min_y = float(0)",
"= ESMP.ESMP_MeshCreate(2,2) mesh, nodeCoord, elemType, elemConn = mesh_create_3x3(mesh) return mesh,",
"= ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print 'lower corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1])",
"to center staggerloc field = ESMP.ESMP_FieldCreateGrid(grid, name) return field def",
"y2 = nodeCoord[(elemConn[offset+3])*2+1] x = (x1+x2)/2.0 y = (y1+y2)/2.0 fieldPtr[i]",
"comparison of the the values is desired between 'srcfield' and",
"The source Field is set to an analytic function, and",
"completed, the destination Field is compared to the exact solution",
"\\ ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers and set the",
"center coordinates:' p = 0 for i1 in range(exLB_center[1], exUB_center[1]):",
"| | | | 2.5 1.5 9 ------- 10 --------11--------12",
"created, and 'name' is a string that will be used",
"Node Ids at corners Element Ids in centers (Everything owned",
"this is for mesh to grid # create ESMP_Field objects",
"= p + 1 #print 'Grid center coordinates:' p =",
"that domain. \"\"\" import cdms2 import ESMP import numpy as",
"nodeCoord[(elemConn[offset+1])*2+1] y2 = nodeCoord[(elemConn[offset+3])*2+1] x = (x1+x2)/2.0 y = (y1+y2)/2.0",
"+ cellwidth_y/2.0 p = p + 1 #print 'Grid center",
"create_ESMPmesh_3x3(): ''' PRECONDITIONS: ESMP is initialized. POSTCONDITIONS: An ESMP_Mesh (3x3)",
"def test_test1(self): # create two unique ESMP_Mesh objects grid =",
"[{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p = 0 for i1 in range(exLB_corner[1], exUB_corner[1]): for",
"the Meshes srcfield = create_ESMPfield(mesh, 'srcfield') dstfield = create_ESMPfieldgrid(grid, 'dstfield')",
"and output PASS or FAIL ok = compare_fields(dstfield, dstfield2) #",
"center coordinates\" p = 0 for i1 in range(exLB[1], exUB[1]):",
"dtype=_NP.int32) elemType = _NP.ones(num_elem, dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn = _NP.array([0,1,5,4, 1,2,6,5,",
"elemType = _NP.ones(num_elem, dtype=_NP.int32) elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD elemConn = _NP.array([0,1,5,4, 1,2,6,5, 2,3,7,6,",
"exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]): gridXCenter[p] = float(i0)*cellwidth_x +",
"regridding dstfield = run_regridding(srcfield, dstfield) # compare results and output",
"= True totalErr = 0.0 for i in range(field1.size): err",
"grid) dstfield2 = build_analyticfield(dstfield2, nodeCoord, elemType, elemConn) ''' # this",
"has been created. 3x3 Mesh 3.0 2.0 13 -------14 --------15--------16",
"'dstfield'. POSTCONDITIONS: An ESMP regridding operation has set the data",
"for False point values correct = True totalErr = 0.0",
"cellwidth_x/2 cellcenter_y = cellwidth_y/2 maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32) grid =",
"bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print 'upper corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1]) p",
"1.5 2.0 1.0 1.5 2.5 3.0 Node Ids at corners",
"field2ptr[i])/abs(field2ptr[i]) if err > .06: correct = False print \"ACCURACY",
"created. 3x3 Mesh 3.0 2.0 13 -------14 --------15--------16 | |",
"elemType, elemConn) ''' # this is for mesh to grid",
"function srcfield = build_analyticfield(srcfield, nodeCoord, elemType, elemConn) dstfield2 = build_analyticfieldgrid(dstfield2,",
"{2}'.format(xc,yc,fieldPtr[p]) p = p + 1 #print \"\\n\" return field",
"range(exLB[1], exUB[1]): for i0 in range(exLB[0], exUB[0]): xc = gridXCoord[p]",
"this routine assumes this field is on elements if (elemType[i]",
"ESMF regridding dstfield = run_regridding(srcfield, dstfield) # compare results and",
"numpy as _NP import unittest def grid_create(): ''' PRECONDITIONS: ESMP",
"= p + 1 #print \"\\n\" return field def create_ESMPfield(mesh,",
"mesh, nodeCoord, elemType, elemConn = create_ESMPmesh_3x3() ''' # this is",
"results and output PASS or FAIL ok = compare_fields(dstfield, dstfield2)",
"#print 'upper corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1]) p = 0 for",
"## CORNERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid, \\ ESMP.ESMP_STAGGERLOC_CORNER)",
"p = 0 for i1 in range(exLB_corner[1], exUB_corner[1]): for i0",
"conservative regridding operation is performed from the source to the",
"2.0 1.0 1.5 2.5 3.0 Node Ids at corners Element",
"return field def build_analyticfieldgrid(field, grid): ''' PRECONDITIONS: An ESMP_Field has",
"and 'dstfield'. POSTCONDITIONS: The values on 'srcfield' and 'dstfield' are",
"20.0+x*y+y**2 #print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i]) offset = offset + 4",
"verify they are the same size if (field1.size != field2.size):",
"# initialize the Fields to an analytic function srcfield =",
"initialize to True, and check for False point values correct",
"nodeCoord = _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0, 1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5,",
"x = nodeCoord[2*i] y = nodeCoord[2*i+1] #print '[{0},{1}]'.format(x, y) #print",
"is for mesh to grid nodeCoord = _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0,",
"y = (y1+y2)/2.0 fieldPtr[i] = 20.0+x+y #fieldPtr[i] = 20.0+x*y+y**2 #print",
"elemConn = mesh_create_3x3(mesh) return mesh, nodeCoord, elemType, elemConn def create_ESMPfieldgrid(grid,",
"build_analyticfieldgrid(dstfield2, grid) # run the ESMF regridding dstfield = run_regridding(srcfield,",
"grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex, coordSys=ESMP.ESMP_COORDSYS_CART) ## CORNERS ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER) exLB_corner, exUB_corner",
"1 ------- 2 -------- 3-------- 4 0.0 0.5 1.5 2.0",
"the field pointer first fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0) # set",
"set the coordinates [x,y] = [0, 1] gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid,",
"i1 in range(exLB_center[1], exUB_center[1]): for i0 in range(exLB_center[0], exUB_center[0]): #print",
"ESMP_Field. POSTCONDITIONS: An ESMP_Field has been created. ''' field =",
"| | | | 1 | 2 | 3 |",
"gridXCorner[p] = float(i0)*cellwidth_x gridYCorner[p] = float(i1)*cellwidth_y p = p +",
"nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]) ''' # this is for grid to",
"for i in range(field.size): # this routine assumes this field",
"coordinates\" p = 0 for i1 in range(exLB[1], exUB[1]): for",
"the the values is desired between 'srcfield' and 'dstfield'. POSTCONDITIONS:",
"srcfield = create_ESMPfield(mesh, 'srcfield') dstfield = create_ESMPfieldgrid(grid, 'dstfield') dstfield2 =",
"in range(exLB_corner[1], exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]): gridXCorner[p] =",
"return grid def mesh_create_3x3(mesh): ''' PRECONDITIONS: An ESMP_Mesh has been",
"1.5,1.5, 2.5,1.5, 4.0,1.5, 0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5, 0.0,4.0, 1.5,4.0, 2.5,4.0,",
"the grid bounds and coordinate pointers exLB, exUB = ESMP.ESMP_GridGetCoord(grid,",
"i0 in range(exLB[0], exUB[0]): xc = gridXCoord[p] yc = gridYCoord[p]",
"'dstfield'. ''' # call the regridding functions routehandle = ESMP.ESMP_FieldRegridStore(srcfield,",
"and a comparison of the the values is desired between",
"the data pointers for the fields field1ptr = ESMP.ESMP_FieldGetPtr(field1) field2ptr",
"ESMP.ESMP_STAGGERLOC_CENTER) # get the coordinate pointers and set the coordinates",
"x1 = nodeCoord[(elemConn[offset])*2] x2 = nodeCoord[(elemConn[offset+1])*2] y1 = nodeCoord[(elemConn[offset+1])*2+1] y2",
"is on elements if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI): raise NameError(\"Cannot compute",
"ESMP.ESMP_GridDestroy(grid) ESMP.ESMP_MeshDestroy(mesh) self.assertEqual(ok, True) if __name__ == '__main__': ESMP.ESMP_LogSet(True) print",
"20.0+x+y #fieldPtr[i] = 20.0+x*y+y**2 #print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i]) offset =",
"that will be used to initialize the name of a",
"i in range(num_node): x = nodeCoord[2*i] y = nodeCoord[2*i+1] #print",
"import cdms2 import ESMP import numpy as _NP import unittest",
"field2.size): raise NameError('compare_fields: Fields must be the same size!') #",
"'srcfield' and 'dstfield' are compared. returns True if the fileds",
"nodeCoord, elemType, elemConn = mesh_create_3x3(mesh) return mesh, nodeCoord, elemType, elemConn",
"(success) ''' # get the data pointers for the fields",
"create_ESMPfield(mesh, 'srcfield') dstfield = create_ESMPfieldgrid(grid, 'dstfield') dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact')",
"x = (x1+x2)/2.0 y = (y1+y2)/2.0 fieldPtr[i] = 20.0+x+y #fieldPtr[i]",
"# this is for mesh to grid # create ESMP_Field",
"fieldPtr[p] = 20.0+xc+yc #fieldPtr[p] = 20.0+xc*yc+yc**2 #print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p])",
"on elements if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI): raise NameError(\"Cannot compute a",
"create ESMP_Field objects on the Meshes srcfield = create_ESMPfieldgrid(grid, 'srcfield')",
"is a string that will be used to initialize the",
"POSTCONDITIONS: An ESMP_Mesh (3x3) has been created and returned as",
"5,6,10,9, 6,7,11,10, 8,9,13,12, 9,10,14,13, 10,11,15,14], dtype=_NP.int32) ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner) ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn) #print 'Mesh",
"NameError(\"Cannot compute a non-constant analytic field for a mesh\\ with",
"dstfield) # compare results and output PASS or FAIL ok",
"been created. 3x3 Mesh 3.0 2.0 13 -------14 --------15--------16 |",
"with triangular elements!\") x1 = nodeCoord[(elemConn[offset])*2] x2 = nodeCoord[(elemConn[offset+1])*2] y1",
"for i1 in range(exLB_corner[1], exUB_corner[1]): for i0 in range(exLB_corner[0], exUB_corner[0]):",
"initialize the name of a new ESMP_Field. POSTCONDITIONS: An ESMP_Field",
"are the same size if (field1.size != field2.size): raise NameError('compare_fields:",
"dstfield2 = build_analyticfieldgrid(dstfield2, grid) # run the ESMF regridding dstfield",
"dstfield = run_regridding(srcfield, dstfield) # compare results and output PASS",
"or FAIL ok = compare_fields(dstfield, dstfield2) # clean up ESMP.ESMP_FieldDestroy(srcfield)",
"dstfield2 = create_ESMPfield(mesh, 'dstfield_exact') # initialize the Fields to an",
"4.0,1.5, 0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5, 0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0]) nodeOwner",
"x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print \"Grid center",
"'srcfield') dstfield = create_ESMPfieldgrid(grid, 'dstfield') dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact') #",
"= ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER) #print",
"is for grid to mesh nodeCoord = _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0,",
"'lower corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1]) #print 'upper corner bounds =",
"source to the destination Field. After the regridding is completed,",
"9 | | | | | 2.5 1.5 9 -------",
"correct = True totalErr = 0.0 for i in range(field1.size):",
"regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE, unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR) ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle) ESMP.ESMP_FieldRegridRelease(routehandle) return dstfield def compare_fields(field1,",
"# Two parametric dimensions, and three spatial dimensions mesh =",
"------- 10 --------11--------12 | | | | | 4 |",
"for grid to mesh nodeCoord = _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0,",
"returned as 'mesh'. ''' # Two parametric dimensions, and three",
"= ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT) return field def build_analyticfield(field, nodeCoord, elemType,",
"= nodeCoord[(elemConn[offset+3])*2+1] x = (x1+x2)/2.0 y = (y1+y2)/2.0 fieldPtr[i] =",
"| 6 | | | | | 1.5 0.5 5",
"False class TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def setUp(self): pass def test_test1(self): # create",
"float(4) min_x = float(0) min_y = float(0) cellwidth_x = (max_x-min_x)/(ub_x-lb_x)",
"p + 1 #print 'Grid corner coordinates:' p = 0",
"1.5,1.5, 2.5,1.5, 3.0,1.5, 1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5, 1.0,3.0, 1.5,3.0, 2.5,3.0,",
"mesh\\ with triangular elements!\") x1 = nodeCoord[(elemConn[offset])*2] x2 = nodeCoord[(elemConn[offset+1])*2]",
"{2}'.format(x,y,fieldPtr[i]) offset = offset + 4 #print \"\\n\" return field",
"| | | | | 2.5 1.5 9 ------- 10",
"'[{0},{1}]'.format(gridXCenter[p], gridYCenter[p]) p = p + 1 #print '\\n' return",
"cellwidth_x = (max_x-min_x)/(ub_x-lb_x) cellwidth_y = (max_y-min_y)/(ub_y-lb_y) cellcenter_x = cellwidth_x/2 cellcenter_y",
"= 20.0+x+y #fieldPtr[i] = 20.0+x*y+y**2 #print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i]) offset",
"up a simple mesh num_node = 16 num_elem = 9",
"setUp(self): pass def test_test1(self): # create two unique ESMP_Mesh objects",
"{1}\\n\".format(field1ptr[i], field2ptr[i]) totalErr += err if correct: print \" -",
"| | | | 1.5 0.5 5 ------- 6 --------",
"pass def test_test1(self): # create two unique ESMP_Mesh objects grid",
"srcfield = create_ESMPfieldgrid(grid, 'srcfield') dstfield = create_ESMPfield(mesh, 'dstfield') dstfield2 =",
"(elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI): raise NameError(\"Cannot compute a non-constant analytic field",
"pointers and set the coordinates [x,y] = [0, 1] gridXCorner",
"for mesh to grid nodeCoord = _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0,",
"ESMP.ESMP_FieldGetPtr(field1) field2ptr = ESMP.ESMP_FieldGetPtr(field2) # compare point values of field1",
"= create_ESMPfield(mesh, 'srcfield') dstfield = create_ESMPfieldgrid(grid, 'dstfield') dstfield2 = create_ESMPfieldgrid(grid,",
"| | | | 7 | 8 | 9 |",
"a string that will be used to initialize the name",
"x, ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER) #print 'lower corner",
"0 for i1 in range(exLB[1], exUB[1]): for i0 in range(exLB[0],",
"= ESMP.ESMP_FieldGetPtr(field1) field2ptr = ESMP.ESMP_FieldGetPtr(field2) # compare point values of",
"POSTCONDITIONS: An ESMP regridding operation has set the data on",
"''' # Two parametric dimensions, and three spatial dimensions mesh",
"float(0) cellwidth_x = (max_x-min_x)/(ub_x-lb_x) cellwidth_y = (max_y-min_y)/(ub_y-lb_y) cellcenter_x = cellwidth_x/2",
"bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1]) #print 'upper corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1]) p",
"owned by PET 0) ''' # set up a simple",
"field for now #print \"Mesh center coordinates\" offset = 0",
"''' PRECONDITIONS: An ESMP_Field has been created. POSTCONDITIONS: The 'field'",
"dstfield): ''' PRECONDITIONS: Two ESMP_Fields have been created and a",
"= \"+str(totalErr) return False class TestESMP_GridToMeshRegridCsrv(unittest.TestCase): def setUp(self): pass def",
"= build_analyticfieldgrid(srcfield, grid) dstfield2 = build_analyticfield(dstfield2, nodeCoord, elemType, elemConn) '''",
"float(i0)*cellwidth_x + cellwidth_x/2.0 gridYCenter[p] = float(i1)*cellwidth_y + cellwidth_y/2.0 p =",
"data pointers for the fields field1ptr = ESMP.ESMP_FieldGetPtr(field1) field2ptr =",
"= ESMP.ESMP_FieldGetPtr(field2) # compare point values of field1 to field2",
"exact solution over that domain. \"\"\" import cdms2 import ESMP",
"routine assumes this field is on elements if (elemType[i] ==",
"= create_ESMPfieldgrid(grid, 'srcfield') dstfield = create_ESMPfield(mesh, 'dstfield') dstfield2 = create_ESMPfield(mesh,",
"for i0 in range(exLB[0], exUB[0]): xc = gridXCoord[p] yc =",
"2 -------- 3-------- 4 0.0 0.5 1.5 2.0 1.0 1.5",
"to grid nodeCoord = _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0, 0.0,1.5, 1.5,1.5,",
"[x,y] = [0, 1] gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER) gridYCorner",
"coordinates:' p = 0 for i1 in range(exLB_center[1], exUB_center[1]): for",
"field to a vanilla initial field for now #print \"Mesh",
"#fieldPtr[i] = 20.0+x*y+y**2 #print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i]) offset = offset",
"| | | 7 | 8 | 9 | |",
"#print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i]) offset = offset + 4 #print",
"y) #print '\\n' return mesh, nodeCoord, elemType, elemConn def create_ESMPmesh_3x3():",
"= {2}'.format(x,y,fieldPtr[i]) offset = offset + 4 #print \"\\n\" return",
"[0, 1] gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER) gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid,",
"err > .06: correct = False print \"ACCURACY ERROR -",
"the source to the destination Field. After the regridding is",
"range(exLB_corner[0], exUB_corner[0]): gridXCorner[p] = float(i0)*cellwidth_x gridYCorner[p] = float(i1)*cellwidth_y p =",
"_NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0, 1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5, 1.0,2.5, 1.5,2.5,",
"1.5,0.0, 2.5,0.0, 4.0,0.0, 0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5, 0.0,2.5, 1.5,2.5, 2.5,2.5,",
"has been initialized to an analytic field. ''' # get",
"--------11--------12 | | | | | 4 | 5 |",
"PRECONDITIONS: Two ESMP_Fields have been created and a comparison of",
"by PET 0) ''' # set up a simple mesh"
] |
[
"{ 'n_neighbors': n_neighbors, 'algorithm': algorithm} def get_params(self, deep:bool=False): return self._hyperparams",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"UnknownOp: def __init__(self, n_neighbors=5, algorithm='auto'): self._hyperparams = { 'n_neighbors': n_neighbors,",
"# # Licensed under the Apache License, Version 2.0 (the",
"compliance with the License. # You may obtain a copy",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"2.0 (the \"License\"); # you may not use this file",
"agreed to in writing, software # distributed under the License",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"Unless required by applicable law or agreed to in writing,",
"# for the purpose of testing how to wrap an",
"for the purpose of testing how to wrap an operator",
"return self._hyperparams def fit(self, X, y): self._wrapped_model = sklearn.neighbors.KNeighborsClassifier( **self._hyperparams)",
"distributed under the License is distributed on an \"AS IS\"",
"algorithm='auto'): self._hyperparams = { 'n_neighbors': n_neighbors, 'algorithm': algorithm} def get_params(self,",
"scikit-learn conventions but lacks schemas, # for the purpose of",
"the specific language governing permissions and # limitations under the",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"express or implied. # See the License for the specific",
"applicable law or agreed to in writing, software # distributed",
"except in compliance with the License. # You may obtain",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"not use this file except in compliance with the License.",
"y): self._wrapped_model = sklearn.neighbors.KNeighborsClassifier( **self._hyperparams) def predict(self, X): return self._wrapped_model.predict(X)",
"IBM Corporation # # Licensed under the Apache License, Version",
"schemas, # for the purpose of testing how to wrap",
"= { 'n_neighbors': n_neighbors, 'algorithm': algorithm} def get_params(self, deep:bool=False): return",
"import sklearn.neighbors # class that follows scikit-learn conventions but lacks",
"writing, software # distributed under the License is distributed on",
"in writing, software # distributed under the License is distributed",
"you may not use this file except in compliance with",
"X, y): self._wrapped_model = sklearn.neighbors.KNeighborsClassifier( **self._hyperparams) def predict(self, X): return",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"language governing permissions and # limitations under the License. import",
"of testing how to wrap an operator without schemas class",
"how to wrap an operator without schemas class UnknownOp: def",
"use this file except in compliance with the License. #",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"# limitations under the License. import sklearn.neighbors # class that",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"or implied. # See the License for the specific language",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"License. # You may obtain a copy of the License",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"License, Version 2.0 (the \"License\"); # you may not use",
"# You may obtain a copy of the License at",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"purpose of testing how to wrap an operator without schemas",
"that follows scikit-learn conventions but lacks schemas, # for the",
"'n_neighbors': n_neighbors, 'algorithm': algorithm} def get_params(self, deep:bool=False): return self._hyperparams def",
"the License. import sklearn.neighbors # class that follows scikit-learn conventions",
"deep:bool=False): return self._hyperparams def fit(self, X, y): self._wrapped_model = sklearn.neighbors.KNeighborsClassifier(",
"under the License is distributed on an \"AS IS\" BASIS,",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"n_neighbors, 'algorithm': algorithm} def get_params(self, deep:bool=False): return self._hyperparams def fit(self,",
"License for the specific language governing permissions and # limitations",
"Copyright 2019 IBM Corporation # # Licensed under the Apache",
"# Copyright 2019 IBM Corporation # # Licensed under the",
"self._hyperparams = { 'n_neighbors': n_neighbors, 'algorithm': algorithm} def get_params(self, deep:bool=False):",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"# class that follows scikit-learn conventions but lacks schemas, #",
"def fit(self, X, y): self._wrapped_model = sklearn.neighbors.KNeighborsClassifier( **self._hyperparams) def predict(self,",
"the License for the specific language governing permissions and #",
"(the \"License\"); # you may not use this file except",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"# you may not use this file except in compliance",
"either express or implied. # See the License for the",
"without schemas class UnknownOp: def __init__(self, n_neighbors=5, algorithm='auto'): self._hyperparams =",
"fit(self, X, y): self._wrapped_model = sklearn.neighbors.KNeighborsClassifier( **self._hyperparams) def predict(self, X):",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"the License is distributed on an \"AS IS\" BASIS, #",
"under the License. import sklearn.neighbors # class that follows scikit-learn",
"in compliance with the License. # You may obtain a",
"algorithm} def get_params(self, deep:bool=False): return self._hyperparams def fit(self, X, y):",
"self._hyperparams def fit(self, X, y): self._wrapped_model = sklearn.neighbors.KNeighborsClassifier( **self._hyperparams) def",
"software # distributed under the License is distributed on an",
"but lacks schemas, # for the purpose of testing how",
"to wrap an operator without schemas class UnknownOp: def __init__(self,",
"# # Unless required by applicable law or agreed to",
"def __init__(self, n_neighbors=5, algorithm='auto'): self._hyperparams = { 'n_neighbors': n_neighbors, 'algorithm':",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"the purpose of testing how to wrap an operator without",
"Version 2.0 (the \"License\"); # you may not use this",
"wrap an operator without schemas class UnknownOp: def __init__(self, n_neighbors=5,",
"class that follows scikit-learn conventions but lacks schemas, # for",
"law or agreed to in writing, software # distributed under",
"conventions but lacks schemas, # for the purpose of testing",
"limitations under the License. import sklearn.neighbors # class that follows",
"sklearn.neighbors # class that follows scikit-learn conventions but lacks schemas,",
"governing permissions and # limitations under the License. import sklearn.neighbors",
"get_params(self, deep:bool=False): return self._hyperparams def fit(self, X, y): self._wrapped_model =",
"implied. # See the License for the specific language governing",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"\"License\"); # you may not use this file except in",
"__init__(self, n_neighbors=5, algorithm='auto'): self._hyperparams = { 'n_neighbors': n_neighbors, 'algorithm': algorithm}",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"class UnknownOp: def __init__(self, n_neighbors=5, algorithm='auto'): self._hyperparams = { 'n_neighbors':",
"lacks schemas, # for the purpose of testing how to",
"by applicable law or agreed to in writing, software #",
"# distributed under the License is distributed on an \"AS",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"follows scikit-learn conventions but lacks schemas, # for the purpose",
"may obtain a copy of the License at # #",
"# Unless required by applicable law or agreed to in",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"an operator without schemas class UnknownOp: def __init__(self, n_neighbors=5, algorithm='auto'):",
"operator without schemas class UnknownOp: def __init__(self, n_neighbors=5, algorithm='auto'): self._hyperparams",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"to in writing, software # distributed under the License is",
"2019 IBM Corporation # # Licensed under the Apache License,",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"# See the License for the specific language governing permissions",
"'algorithm': algorithm} def get_params(self, deep:bool=False): return self._hyperparams def fit(self, X,",
"and # limitations under the License. import sklearn.neighbors # class",
"You may obtain a copy of the License at #",
"may not use this file except in compliance with the",
"or agreed to in writing, software # distributed under the",
"schemas class UnknownOp: def __init__(self, n_neighbors=5, algorithm='auto'): self._hyperparams = {",
"required by applicable law or agreed to in writing, software",
"Corporation # # Licensed under the Apache License, Version 2.0",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"def get_params(self, deep:bool=False): return self._hyperparams def fit(self, X, y): self._wrapped_model",
"with the License. # You may obtain a copy of",
"this file except in compliance with the License. # You",
"permissions and # limitations under the License. import sklearn.neighbors #",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"testing how to wrap an operator without schemas class UnknownOp:",
"n_neighbors=5, algorithm='auto'): self._hyperparams = { 'n_neighbors': n_neighbors, 'algorithm': algorithm} def",
"License. import sklearn.neighbors # class that follows scikit-learn conventions but"
] |
[
"Care for a special case where the first char is",
"common unicode symbols with raw character variants s = re.sub(r'\\\\u2026',",
"for x in document if x['user']['screen_name'] == 'realDonaldTrump' and 'full_text'",
"with open(filename, \"r\", encoding=\"utf-8\") as input: with open(output_name, \"w\", encoding=\"utf-8\")",
"re.sub(r'\\\\u2018', \"'\", s) s = re.sub(r\"&\", r\"&\", s) s =",
"def beautify(name): ''' Loading, filtering and saving the JSON tweet",
"= [x['full_text'] for x in document if x['user']['screen_name'] == 'realDonaldTrump'",
"Replaces common symbols and kills quotation marks/apostrophes. :type: s: String",
"re.sub(r'\"', r\"\", s) # Collapse multiples of certain chars s",
"in enumerate(document): output.write(json.dumps(value, ensure_ascii=False) + \"\\n\") # json.dump(document, output, ensure_ascii=False,",
"to {output_name}\") def deep_clean(s): ''' Deep cleaning of filtered tweets.",
"= re.sub(r'\"', r\"\", s) # Collapse multiples of certain chars",
"to a newly generated .txt file :type: name: String :rtype:",
"'', s) s = re.sub(r'\\\\u2019', \"'\", s) s = re.sub(r'\\\\u2018',",
"of certain chars s = re.sub('([.-])+', r'\\1', s) # Pad",
"re.sub(r\"”\", r\"\", s) s = re.sub('[()]', r'', s) s =",
"enumerate(document): output.write(json.dumps(value, ensure_ascii=False) + \"\\n\") # json.dump(document, output, ensure_ascii=False, indent=4)",
"s = re.sub(r'\"', r\"\", s) # Collapse multiples of certain",
"s) # Delete emoji modifying characters s = re.sub(chr(127996), '',",
"in x] # >> Version ii): for self-scraped tweets via",
"re.sub(chr(65039), '', s) # Kill apostrophes & punctuation because they",
"Replace some common unicode symbols with raw character variants s",
"'.join(s.split()).lower() # Define emoji_pattern emoji_pattern = re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\" # emoticons",
"transport & map symbols u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS) u\"\\U0001F1F2-\\U0001F1F4\" #",
"output: .txt ''' filename = name + '.json' output_name =",
"re.sub(r'\\\\u2019', \"'\", s) s = re.sub(r'\\\\u2018', \"'\", s) s =",
"== 'realDonaldTrump' and 'full_text' in x] # >> Version ii):",
"retweet if s[:2] == 'RT': return None # Delete all",
"'.json' output_name = name + \"_filtered.txt\" with open(filename, \"r\", encoding=\"utf-8\")",
"s) # Remove extra whitespace (incl. newlines) s = '",
"multiples of certain chars s = re.sub('([.-])+', r'\\1', s) #",
"filtered tweets. Replaces common symbols and kills quotation marks/apostrophes. :type:",
"re.sub(r'…', '', s) s = re.sub(r'\\\\u2019', \"'\", s) s =",
"[x['full_text'] for x in document if x['user']['screen_name'] == 'realDonaldTrump' and",
"\".\" # return s[1:] if s[0] == \".\" else s",
"chars s = re.sub('([.-])+', r'\\1', s) # Pad sentence punctuation",
"[x['text'] for x in document] # Clean and only include",
"in document if x['user']['screen_name'] == 'realDonaldTrump' and 'full_text' in x]",
"s = re.sub(chr(65039), '', s) # Kill apostrophes & punctuation",
"u\"\\U0001F1F4\" u\"\\U0001F620\" u\"\\u200d\" u\"\\u2640-\\u2642\" \"]+\", flags=re.UNICODE) s = emoji_pattern.sub(r'', s)",
"iii): Data set from https://github.com/MatthewWolff/MarkovTweets/ document = [x['text'] for x",
"encoding for _, value in enumerate(document): output.write(json.dumps(value, ensure_ascii=False) + \"\\n\")",
"\"'\", s) s = re.sub(r'\\\\u2018', \"'\", s) s = re.sub(r\"&\",",
"for interesting tweets. s = re.sub(r'http[\\S]*', '', s) # Replace",
"s = emoji_pattern.sub(r'', s) # Care for a special case",
"s = re.sub(r\"“\", r\"\", s) s = re.sub(r\"”\", r\"\", s)",
"''' Deep cleaning of filtered tweets. Replaces common symbols and",
"is not None] # Preventing unicode characters by ensuring false",
"cleaned {filename} and saved it to {output_name}\") def deep_clean(s): '''",
"import json import re import sys def beautify(name): ''' Loading,",
"u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs u\"\\U0001F680-\\U0001F6FF\" # transport & map",
"some common unicode symbols with raw character variants s =",
"it to {output_name}\") def deep_clean(s): ''' Deep cleaning of filtered",
"by ensuring false ascii encoding for _, value in enumerate(document):",
"indent=4) print(f\">> Sucessfully cleaned {filename} and saved it to {output_name}\")",
"as output: document = json.load(input) # Filter only the messages",
"Filter only the messages that are not retweeted # >>",
"saved it to {output_name}\") def deep_clean(s): ''' Deep cleaning of",
"\"master_XXXX.json\" # document = [x['full_text'] for x in document if",
"beautify(name): ''' Loading, filtering and saving the JSON tweet file",
"unicode symbols with raw character variants s = re.sub(r'\\\\u2026', '...',",
"characters s = re.sub(chr(127996), '', s) s = re.sub(chr(65039), '',",
"kills quotation marks/apostrophes. :type: s: String :rtype: s: String '''",
"None # Delete all URLs because they don't make for",
"certain chars s = re.sub('([.-])+', r'\\1', s) # Pad sentence",
"\\3', s) # Remove extra whitespace (incl. newlines) s =",
"s: String :rtype: s: String ''' # Return None if",
"of filtered tweets. Replaces common symbols and kills quotation marks/apostrophes.",
"with raw character variants s = re.sub(r'\\\\u2026', '...', s) s",
"s) # Replace some common unicode symbols with raw character",
"== \".\" else s return None if __name__ == \"__main__\":",
"with whitespace s = re.sub('([^0-9])([.,!?])([^0-9])', r'\\1 \\2 \\3', s) #",
"# Filter only the messages that are not retweeted #",
"# Remove extra whitespace (incl. newlines) s = ' '.join(s.split()).lower()",
"tweet is a retweet if s[:2] == 'RT': return None",
"document if x['user']['screen_name'] == 'realDonaldTrump' and 'full_text' in x] #",
"s = re.sub(r'\\\\n', r\"\", s) # Delete emoji modifying characters",
"Version ii): for self-scraped tweets via https://github.com/bpb27/twitter_scraping # document =",
"punctuation chars with whitespace s = re.sub('([^0-9])([.,!?])([^0-9])', r'\\1 \\2 \\3',",
"and saved it to {output_name}\") def deep_clean(s): ''' Deep cleaning",
"things. s = re.sub(r\"'\", r\"\", s) s = re.sub(r\"“\", r\"\",",
"return s[1:] if s[0] == \".\" else s if len(s):",
"only the messages that are not retweeted # >> Version",
"re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\" # emoticons u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs u\"\\U0001F680-\\U0001F6FF\"",
"emoji_pattern emoji_pattern = re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\" # emoticons u\"\\U0001F300-\\U0001F5FF\" # symbols",
"document if deep_clean(x) is not None] # Preventing unicode characters",
"if s[0] == \".\" else s if len(s): return s[1:]",
"output_name = name + \"_filtered.txt\" with open(filename, \"r\", encoding=\"utf-8\") as",
"s = re.sub('([.-])+', r'\\1', s) # Pad sentence punctuation chars",
"because they confuse things. s = re.sub(r\"'\", r\"\", s) s",
"x in document] # Clean and only include not retweeted",
"emoji_pattern = re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\" # emoticons u\"\\U0001F300-\\U0001F5FF\" # symbols &",
"s) s = re.sub(r'…', '', s) s = re.sub(r'\\\\u2019', \"'\",",
"s) s = re.sub(r\"”\", r\"\", s) s = re.sub('[()]', r'',",
"re.sub('([.-])+', r'\\1', s) # Pad sentence punctuation chars with whitespace",
"document = [x['text'] for x in document] # Clean and",
"given tweet is a retweet if s[:2] == 'RT': return",
"'...', s) s = re.sub(r'…', '', s) s = re.sub(r'\\\\u2019',",
"Remove extra whitespace (incl. newlines) s = ' '.join(s.split()).lower() #",
"case where the first char is a \".\" # return",
"s[1:] if s[0] == \".\" else s return None if",
"newlines) s = ' '.join(s.split()).lower() # Define emoji_pattern emoji_pattern =",
"# Pad sentence punctuation chars with whitespace s = re.sub('([^0-9])([.,!?])([^0-9])',",
"# document = [x['text'] for x in document if x['user']['screen_name']",
"u\"\\u200d\" u\"\\u2640-\\u2642\" \"]+\", flags=re.UNICODE) s = emoji_pattern.sub(r'', s) # Care",
"= re.sub(r\"'\", r\"\", s) s = re.sub(r\"“\", r\"\", s) s",
"is a \".\" # return s[1:] if s[0] == \".\"",
"variants s = re.sub(r'\\\\u2026', '...', s) s = re.sub(r'…', '',",
"re.sub(r\"“\", r\"\", s) s = re.sub(r\"”\", r\"\", s) s =",
"''' filename = name + '.json' output_name = name +",
"Deep cleaning of filtered tweets. Replaces common symbols and kills",
"''' Loading, filtering and saving the JSON tweet file to",
"(incl. newlines) s = ' '.join(s.split()).lower() # Define emoji_pattern emoji_pattern",
"\\2 \\3', s) # Remove extra whitespace (incl. newlines) s",
"input: with open(output_name, \"w\", encoding=\"utf-8\") as output: document = json.load(input)",
"s = re.sub('([^0-9])([.,!?])([^0-9])', r'\\1 \\2 \\3', s) # Remove extra",
":type: s: String :rtype: s: String ''' # Return None",
"don't make for interesting tweets. s = re.sub(r'http[\\S]*', '', s)",
"with open(output_name, \"w\", encoding=\"utf-8\") as output: document = json.load(input) #",
"s: String ''' # Return None if given tweet is",
"u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\" u\"\\U0001F1F4\" u\"\\U0001F620\" u\"\\u200d\" u\"\\u2640-\\u2642\" \"]+\",",
"else s return None if __name__ == \"__main__\": if len(sys.argv)",
"s = ' '.join(s.split()).lower() # Define emoji_pattern emoji_pattern = re.compile(\"[\"",
"def deep_clean(s): ''' Deep cleaning of filtered tweets. Replaces common",
"for x in document if deep_clean(x) is not None] #",
"and 'full_text' in x] # >> Version ii): for self-scraped",
"s = re.sub('[()]', r'', s) s = re.sub(r'\"', r\"\", s)",
"archive \"master_XXXX.json\" # document = [x['full_text'] for x in document",
"r\"&\", s) s = re.sub(r'\\\\n', r\"\", s) # Delete emoji",
"json.dump(document, output, ensure_ascii=False, indent=4) print(f\">> Sucessfully cleaned {filename} and saved",
"char is a \".\" # return s[1:] if s[0] ==",
"if s[0] == \".\" else s return None if __name__",
"'full_text' in x] # >> Version ii): for self-scraped tweets",
"# emoticons u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs u\"\\U0001F680-\\U0001F6FF\" # transport",
"return None if __name__ == \"__main__\": if len(sys.argv) - 1:",
"sentence punctuation chars with whitespace s = re.sub('([^0-9])([.,!?])([^0-9])', r'\\1 \\2",
"if s[:2] == 'RT': return None # Delete all URLs",
"ii): for self-scraped tweets via https://github.com/bpb27/twitter_scraping # document = [x['text']",
"ensure_ascii=False) + \"\\n\") # json.dump(document, output, ensure_ascii=False, indent=4) print(f\">> Sucessfully",
"https://github.com/MatthewWolff/MarkovTweets/ document = [x['text'] for x in document] # Clean",
"modifying characters s = re.sub(chr(127996), '', s) s = re.sub(chr(65039),",
"# transport & map symbols u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS) u\"\\U0001F1F2-\\U0001F1F4\"",
"# Replace some common unicode symbols with raw character variants",
"for _, value in enumerate(document): output.write(json.dumps(value, ensure_ascii=False) + \"\\n\") #",
"Preventing unicode characters by ensuring false ascii encoding for _,",
"output: document = json.load(input) # Filter only the messages that",
"r\"\", s) # Delete emoji modifying characters s = re.sub(chr(127996),",
"s) s = re.sub(chr(65039), '', s) # Kill apostrophes &",
"String :rtype: output: .txt ''' filename = name + '.json'",
"return None # Delete all URLs because they don't make",
"in document if deep_clean(x) is not None] # Preventing unicode",
"& pictographs u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols u\"\\U0001F1E0-\\U0001F1FF\" #",
"u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\" u\"\\U0001F1F4\" u\"\\U0001F620\" u\"\\u200d\" u\"\\u2640-\\u2642\" \"]+\", flags=re.UNICODE) s",
"confuse things. s = re.sub(r\"'\", r\"\", s) s = re.sub(r\"“\",",
"json.load(input) # Filter only the messages that are not retweeted",
"= re.sub(chr(65039), '', s) # Kill apostrophes & punctuation because",
"\"]+\", flags=re.UNICODE) s = emoji_pattern.sub(r'', s) # Care for a",
"'realDonaldTrump' and 'full_text' in x] # >> Version ii): for",
"in document] # Clean and only include not retweeted messages",
"Return None if given tweet is a retweet if s[:2]",
"ensure_ascii=False, indent=4) print(f\">> Sucessfully cleaned {filename} and saved it to",
"output, ensure_ascii=False, indent=4) print(f\">> Sucessfully cleaned {filename} and saved it",
"Collapse multiples of certain chars s = re.sub('([.-])+', r'\\1', s)",
"because they don't make for interesting tweets. s = re.sub(r'http[\\S]*',",
".txt ''' filename = name + '.json' output_name = name",
"# document = [x['full_text'] for x in document if x['user']['screen_name']",
"and kills quotation marks/apostrophes. :type: s: String :rtype: s: String",
"s = re.sub(r'…', '', s) s = re.sub(r'\\\\u2019', \"'\", s)",
"open(filename, \"r\", encoding=\"utf-8\") as input: with open(output_name, \"w\", encoding=\"utf-8\") as",
"for a special case where the first char is a",
"Version iii): Data set from https://github.com/MatthewWolff/MarkovTweets/ document = [x['text'] for",
"import sys def beautify(name): ''' Loading, filtering and saving the",
"r'\\1 \\2 \\3', s) # Remove extra whitespace (incl. newlines)",
"s) s = re.sub(r'\\\\u2018', \"'\", s) s = re.sub(r\"&\", r\"&\",",
"len(s): return s[1:] if s[0] == \".\" else s return",
"s[1:] if s[0] == \".\" else s if len(s): return",
"not retweeted messages document = [deep_clean(x) for x in document",
"# Define emoji_pattern emoji_pattern = re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\" # emoticons u\"\\U0001F300-\\U0001F5FF\"",
"tweets. Replaces common symbols and kills quotation marks/apostrophes. :type: s:",
"\"'\", s) s = re.sub(r\"&\", r\"&\", s) s = re.sub(r'\\\\n',",
"Loading, filtering and saving the JSON tweet file to a",
"if deep_clean(x) is not None] # Preventing unicode characters by",
"x['user']['screen_name'] == 'realDonaldTrump' and 'text' in x] # >> Version",
"a \".\" # return s[1:] if s[0] == \".\" else",
"== 'realDonaldTrump' and 'text' in x] # >> Version iii):",
"r\"\", s) # Collapse multiples of certain chars s =",
"{output_name}\") def deep_clean(s): ''' Deep cleaning of filtered tweets. Replaces",
"= json.load(input) # Filter only the messages that are not",
"# Macau flag u\"\\U0001F1E6-\\U0001F1FF\" # flags u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\"",
"r'\\1', s) # Pad sentence punctuation chars with whitespace s",
"s) s = re.sub(r'\\\\n', r\"\", s) # Delete emoji modifying",
"\"r\", encoding=\"utf-8\") as input: with open(output_name, \"w\", encoding=\"utf-8\") as output:",
"make for interesting tweets. s = re.sub(r'http[\\S]*', '', s) #",
"s = re.sub(r'\\\\u2026', '...', s) s = re.sub(r'…', '', s)",
"they don't make for interesting tweets. s = re.sub(r'http[\\S]*', '',",
"s if len(s): return s[1:] if s[0] == \".\" else",
"first char is a \".\" # return s[1:] if s[0]",
"output.write(json.dumps(value, ensure_ascii=False) + \"\\n\") # json.dump(document, output, ensure_ascii=False, indent=4) print(f\">>",
"# symbols & pictographs u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols",
"encoding=\"utf-8\") as output: document = json.load(input) # Filter only the",
":rtype: output: .txt ''' filename = name + '.json' output_name",
"= re.sub(r'http[\\S]*', '', s) # Replace some common unicode symbols",
"u\"\\u2640-\\u2642\" \"]+\", flags=re.UNICODE) s = emoji_pattern.sub(r'', s) # Care for",
"# >> Version iii): Data set from https://github.com/MatthewWolff/MarkovTweets/ document =",
"r\"\", s) s = re.sub('[()]', r'', s) s = re.sub(r'\"',",
"flags=re.UNICODE) s = emoji_pattern.sub(r'', s) # Care for a special",
"Kill apostrophes & punctuation because they confuse things. s =",
"Pad sentence punctuation chars with whitespace s = re.sub('([^0-9])([.,!?])([^0-9])', r'\\1",
"saving the JSON tweet file to a newly generated .txt",
"'text' in x] # >> Version iii): Data set from",
">> Version iii): Data set from https://github.com/MatthewWolff/MarkovTweets/ document = [x['text']",
"for x in document if x['user']['screen_name'] == 'realDonaldTrump' and 'text'",
"= re.sub(r\"&\", r\"&\", s) s = re.sub(r'\\\\n', r\"\", s) #",
"deep_clean(x) is not None] # Preventing unicode characters by ensuring",
"symbols & pictographs u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols u\"\\U0001F1E0-\\U0001F1FF\"",
"\".\" else s if len(s): return s[1:] if s[0] ==",
"Macau flag u\"\\U0001F1E6-\\U0001F1FF\" # flags u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\"",
"extra whitespace (incl. newlines) s = ' '.join(s.split()).lower() # Define",
"newly generated .txt file :type: name: String :rtype: output: .txt",
"\".\" else s return None if __name__ == \"__main__\": if",
"# flags (iOS) u\"\\U0001F1F2-\\U0001F1F4\" # Macau flag u\"\\U0001F1E6-\\U0001F1FF\" # flags",
"a retweet if s[:2] == 'RT': return None # Delete",
":rtype: s: String ''' # Return None if given tweet",
"they confuse things. s = re.sub(r\"'\", r\"\", s) s =",
"# Preventing unicode characters by ensuring false ascii encoding for",
"Sucessfully cleaned {filename} and saved it to {output_name}\") def deep_clean(s):",
"== 'RT': return None # Delete all URLs because they",
"interesting tweets. s = re.sub(r'http[\\S]*', '', s) # Replace some",
"via https://github.com/bpb27/twitter_scraping # document = [x['text'] for x in document",
"re.sub(chr(127996), '', s) s = re.sub(chr(65039), '', s) # Kill",
"in x] # >> Version iii): Data set from https://github.com/MatthewWolff/MarkovTweets/",
"value in enumerate(document): output.write(json.dumps(value, ensure_ascii=False) + \"\\n\") # json.dump(document, output,",
"print(f\">> Sucessfully cleaned {filename} and saved it to {output_name}\") def",
"quotation marks/apostrophes. :type: s: String :rtype: s: String ''' #",
"Define emoji_pattern emoji_pattern = re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\" # emoticons u\"\\U0001F300-\\U0001F5FF\" #",
"# return s[1:] if s[0] == \".\" else s if",
"= ' '.join(s.split()).lower() # Define emoji_pattern emoji_pattern = re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\"",
"# >> Version ii): for self-scraped tweets via https://github.com/bpb27/twitter_scraping #",
"s = re.sub(r\"”\", r\"\", s) s = re.sub('[()]', r'', s)",
"= re.sub('([.-])+', r'\\1', s) # Pad sentence punctuation chars with",
"# >> Version i): for tweets from archive \"master_XXXX.json\" #",
"file to a newly generated .txt file :type: name: String",
"include not retweeted messages document = [deep_clean(x) for x in",
"''' # Return None if given tweet is a retweet",
"== \".\" else s if len(s): return s[1:] if s[0]",
"Data set from https://github.com/MatthewWolff/MarkovTweets/ document = [x['text'] for x in",
"{filename} and saved it to {output_name}\") def deep_clean(s): ''' Deep",
"x] # >> Version iii): Data set from https://github.com/MatthewWolff/MarkovTweets/ document",
"from archive \"master_XXXX.json\" # document = [x['full_text'] for x in",
"x['user']['screen_name'] == 'realDonaldTrump' and 'full_text' in x] # >> Version",
"= re.sub(r'\\\\n', r\"\", s) # Delete emoji modifying characters s",
"tweets via https://github.com/bpb27/twitter_scraping # document = [x['text'] for x in",
"= re.sub(chr(127996), '', s) s = re.sub(chr(65039), '', s) #",
"= re.sub(r'\\\\u2018', \"'\", s) s = re.sub(r\"&\", r\"&\", s) s",
"= re.sub(r'…', '', s) s = re.sub(r'\\\\u2019', \"'\", s) s",
"json import re import sys def beautify(name): ''' Loading, filtering",
"\"\\n\") # json.dump(document, output, ensure_ascii=False, indent=4) print(f\">> Sucessfully cleaned {filename}",
"s) # Kill apostrophes & punctuation because they confuse things.",
"generated .txt file :type: name: String :rtype: output: .txt '''",
"if x['user']['screen_name'] == 'realDonaldTrump' and 'text' in x] # >>",
"only include not retweeted messages document = [deep_clean(x) for x",
"special case where the first char is a \".\" #",
"& punctuation because they confuse things. s = re.sub(r\"'\", r\"\",",
"s = re.sub(r'\\\\u2019', \"'\", s) s = re.sub(r'\\\\u2018', \"'\", s)",
"String :rtype: s: String ''' # Return None if given",
"open(output_name, \"w\", encoding=\"utf-8\") as output: document = json.load(input) # Filter",
"sys def beautify(name): ''' Loading, filtering and saving the JSON",
"+ \"_filtered.txt\" with open(filename, \"r\", encoding=\"utf-8\") as input: with open(output_name,",
"= re.sub(r\"“\", r\"\", s) s = re.sub(r\"”\", r\"\", s) s",
"characters by ensuring false ascii encoding for _, value in",
"r'', s) s = re.sub(r'\"', r\"\", s) # Collapse multiples",
"u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\" u\"\\U0001F1F4\" u\"\\U0001F620\" u\"\\u200d\" u\"\\u2640-\\u2642\" \"]+\", flags=re.UNICODE)",
"are not retweeted # >> Version i): for tweets from",
"deep_clean(s): ''' Deep cleaning of filtered tweets. Replaces common symbols",
"'realDonaldTrump' and 'text' in x] # >> Version iii): Data",
"r\"\", s) s = re.sub(r\"“\", r\"\", s) s = re.sub(r\"”\",",
"u\"\\U0001F1F2\" u\"\\U0001F1F4\" u\"\\U0001F620\" u\"\\u200d\" u\"\\u2640-\\u2642\" \"]+\", flags=re.UNICODE) s = emoji_pattern.sub(r'',",
"s = re.sub(r'\\\\u2018', \"'\", s) s = re.sub(r\"&\", r\"&\", s)",
"document if x['user']['screen_name'] == 'realDonaldTrump' and 'text' in x] #",
"<gh_stars>0 import json import re import sys def beautify(name): '''",
"s) s = re.sub(r'\\\\u2019', \"'\", s) s = re.sub(r'\\\\u2018', \"'\",",
"for tweets from archive \"master_XXXX.json\" # document = [x['full_text'] for",
"not retweeted # >> Version i): for tweets from archive",
"None] # Preventing unicode characters by ensuring false ascii encoding",
"re.sub(r\"&\", r\"&\", s) s = re.sub(r'\\\\n', r\"\", s) # Delete",
"chars with whitespace s = re.sub('([^0-9])([.,!?])([^0-9])', r'\\1 \\2 \\3', s)",
"u\"\\U0001F620\" u\"\\u200d\" u\"\\u2640-\\u2642\" \"]+\", flags=re.UNICODE) s = emoji_pattern.sub(r'', s) #",
"\"_filtered.txt\" with open(filename, \"r\", encoding=\"utf-8\") as input: with open(output_name, \"w\",",
"x] # >> Version ii): for self-scraped tweets via https://github.com/bpb27/twitter_scraping",
"a special case where the first char is a \".\"",
"\"w\", encoding=\"utf-8\") as output: document = json.load(input) # Filter only",
"None if given tweet is a retweet if s[:2] ==",
"i): for tweets from archive \"master_XXXX.json\" # document = [x['full_text']",
"name + \"_filtered.txt\" with open(filename, \"r\", encoding=\"utf-8\") as input: with",
"if x['user']['screen_name'] == 'realDonaldTrump' and 'full_text' in x] # >>",
":type: name: String :rtype: output: .txt ''' filename = name",
"in document if x['user']['screen_name'] == 'realDonaldTrump' and 'text' in x]",
"if given tweet is a retweet if s[:2] == 'RT':",
"'', s) # Kill apostrophes & punctuation because they confuse",
"re.sub('[()]', r'', s) s = re.sub(r'\"', r\"\", s) # Collapse",
"= re.sub('([^0-9])([.,!?])([^0-9])', r'\\1 \\2 \\3', s) # Remove extra whitespace",
"s) s = re.sub('[()]', r'', s) s = re.sub(r'\"', r\"\",",
"s) # Care for a special case where the first",
"the messages that are not retweeted # >> Version i):",
"= [x['text'] for x in document if x['user']['screen_name'] == 'realDonaldTrump'",
"_, value in enumerate(document): output.write(json.dumps(value, ensure_ascii=False) + \"\\n\") # json.dump(document,",
"[x['text'] for x in document if x['user']['screen_name'] == 'realDonaldTrump' and",
"Clean and only include not retweeted messages document = [deep_clean(x)",
"r\"\", s) s = re.sub(r\"”\", r\"\", s) s = re.sub('[()]',",
">> Version ii): for self-scraped tweets via https://github.com/bpb27/twitter_scraping # document",
"apostrophes & punctuation because they confuse things. s = re.sub(r\"'\",",
"URLs because they don't make for interesting tweets. s =",
"for self-scraped tweets via https://github.com/bpb27/twitter_scraping # document = [x['text'] for",
"s[0] == \".\" else s if len(s): return s[1:] if",
"u\"\\U0001F1F2-\\U0001F1F4\" # Macau flag u\"\\U0001F1E6-\\U0001F1FF\" # flags u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\"",
"u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS) u\"\\U0001F1F2-\\U0001F1F4\" # Macau flag u\"\\U0001F1E6-\\U0001F1FF\" #",
"set from https://github.com/MatthewWolff/MarkovTweets/ document = [x['text'] for x in document]",
"document = json.load(input) # Filter only the messages that are",
"retweeted messages document = [deep_clean(x) for x in document if",
"character variants s = re.sub(r'\\\\u2026', '...', s) s = re.sub(r'…',",
"+ \"\\n\") # json.dump(document, output, ensure_ascii=False, indent=4) print(f\">> Sucessfully cleaned",
"marks/apostrophes. :type: s: String :rtype: s: String ''' # Return",
"document = [deep_clean(x) for x in document if deep_clean(x) is",
"# Delete all URLs because they don't make for interesting",
"file :type: name: String :rtype: output: .txt ''' filename =",
"s[:2] == 'RT': return None # Delete all URLs because",
"the JSON tweet file to a newly generated .txt file",
"else s if len(s): return s[1:] if s[0] == \".\"",
"cleaning of filtered tweets. Replaces common symbols and kills quotation",
"'RT': return None # Delete all URLs because they don't",
"tweets. s = re.sub(r'http[\\S]*', '', s) # Replace some common",
"map symbols u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS) u\"\\U0001F1F2-\\U0001F1F4\" # Macau flag",
"document = [x['full_text'] for x in document if x['user']['screen_name'] ==",
"& map symbols u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS) u\"\\U0001F1F2-\\U0001F1F4\" # Macau",
"and only include not retweeted messages document = [deep_clean(x) for",
"String ''' # Return None if given tweet is a",
"from https://github.com/MatthewWolff/MarkovTweets/ document = [x['text'] for x in document] #",
"for x in document] # Clean and only include not",
"flag u\"\\U0001F1E6-\\U0001F1FF\" # flags u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\" u\"\\U0001F1F4\"",
"a newly generated .txt file :type: name: String :rtype: output:",
"'', s) # Replace some common unicode symbols with raw",
"JSON tweet file to a newly generated .txt file :type:",
"' '.join(s.split()).lower() # Define emoji_pattern emoji_pattern = re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\" #",
"flags u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\" u\"\\U0001F1F4\" u\"\\U0001F620\" u\"\\u200d\" u\"\\u2640-\\u2642\"",
"emoticons u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs u\"\\U0001F680-\\U0001F6FF\" # transport &",
"whitespace s = re.sub('([^0-9])([.,!?])([^0-9])', r'\\1 \\2 \\3', s) # Remove",
"encoding=\"utf-8\") as input: with open(output_name, \"w\", encoding=\"utf-8\") as output: document",
"tweet file to a newly generated .txt file :type: name:",
"# Collapse multiples of certain chars s = re.sub('([.-])+', r'\\1',",
"the first char is a \".\" # return s[1:] if",
"symbols with raw character variants s = re.sub(r'\\\\u2026', '...', s)",
"document] # Clean and only include not retweeted messages document",
"= [x['text'] for x in document] # Clean and only",
"s) # Pad sentence punctuation chars with whitespace s =",
"where the first char is a \".\" # return s[1:]",
"emoji_pattern.sub(r'', s) # Care for a special case where the",
"name + '.json' output_name = name + \"_filtered.txt\" with open(filename,",
"re.sub(r'http[\\S]*', '', s) # Replace some common unicode symbols with",
"not None] # Preventing unicode characters by ensuring false ascii",
"= re.compile(\"[\" u\"\\U0001F600-\\U0001F64F\" # emoticons u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs",
"self-scraped tweets via https://github.com/bpb27/twitter_scraping # document = [x['text'] for x",
"u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\" u\"\\U0001F1F4\" u\"\\U0001F620\" u\"\\u200d\" u\"\\u2640-\\u2642\" \"]+\", flags=re.UNICODE) s =",
"# Clean and only include not retweeted messages document =",
"u\"\\U0001F1E6-\\U0001F1FF\" # flags u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\" u\"\\U0001F1F4\" u\"\\U0001F620\"",
"(iOS) u\"\\U0001F1F2-\\U0001F1F4\" # Macau flag u\"\\U0001F1E6-\\U0001F1FF\" # flags u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\"",
">> Version i): for tweets from archive \"master_XXXX.json\" # document",
"u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)",
"flags (iOS) u\"\\U0001F1F2-\\U0001F1F4\" # Macau flag u\"\\U0001F1E6-\\U0001F1FF\" # flags u\"\\U0001F600-\\U0001F64F\"",
"re.sub('([^0-9])([.,!?])([^0-9])', r'\\1 \\2 \\3', s) # Remove extra whitespace (incl.",
"symbols and kills quotation marks/apostrophes. :type: s: String :rtype: s:",
"re.sub(r\"'\", r\"\", s) s = re.sub(r\"“\", r\"\", s) s =",
"+ '.json' output_name = name + \"_filtered.txt\" with open(filename, \"r\",",
"s = re.sub(r\"&\", r\"&\", s) s = re.sub(r'\\\\n', r\"\", s)",
"Version i): for tweets from archive \"master_XXXX.json\" # document =",
"u\"\\U0001F600-\\U0001F64F\" # emoticons u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs u\"\\U0001F680-\\U0001F6FF\" #",
"s = re.sub(r'http[\\S]*', '', s) # Replace some common unicode",
"x in document if x['user']['screen_name'] == 'realDonaldTrump' and 'full_text' in",
"ensuring false ascii encoding for _, value in enumerate(document): output.write(json.dumps(value,",
"Delete emoji modifying characters s = re.sub(chr(127996), '', s) s",
"unicode characters by ensuring false ascii encoding for _, value",
"all URLs because they don't make for interesting tweets. s",
"= name + '.json' output_name = name + \"_filtered.txt\" with",
"None if __name__ == \"__main__\": if len(sys.argv) - 1: beautify(sys.argv[1])",
"s) # Collapse multiples of certain chars s = re.sub('([.-])+',",
"symbols u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS) u\"\\U0001F1F2-\\U0001F1F4\" # Macau flag u\"\\U0001F1E6-\\U0001F1FF\"",
"punctuation because they confuse things. s = re.sub(r\"'\", r\"\", s)",
"ascii encoding for _, value in enumerate(document): output.write(json.dumps(value, ensure_ascii=False) +",
"if len(s): return s[1:] if s[0] == \".\" else s",
"s return None if __name__ == \"__main__\": if len(sys.argv) -",
"re.sub(r'\\\\n', r\"\", s) # Delete emoji modifying characters s =",
"false ascii encoding for _, value in enumerate(document): output.write(json.dumps(value, ensure_ascii=False)",
"s) s = re.sub(r\"&\", r\"&\", s) s = re.sub(r'\\\\n', r\"\",",
"https://github.com/bpb27/twitter_scraping # document = [x['text'] for x in document if",
".txt file :type: name: String :rtype: output: .txt ''' filename",
"return s[1:] if s[0] == \".\" else s return None",
"# Care for a special case where the first char",
"= name + \"_filtered.txt\" with open(filename, \"r\", encoding=\"utf-8\") as input:",
"filename = name + '.json' output_name = name + \"_filtered.txt\"",
"common symbols and kills quotation marks/apostrophes. :type: s: String :rtype:",
"as input: with open(output_name, \"w\", encoding=\"utf-8\") as output: document =",
"# Return None if given tweet is a retweet if",
"filtering and saving the JSON tweet file to a newly",
"s = re.sub(chr(127996), '', s) s = re.sub(chr(65039), '', s)",
"'', s) s = re.sub(chr(65039), '', s) # Kill apostrophes",
"# Kill apostrophes & punctuation because they confuse things. s",
"emoji modifying characters s = re.sub(chr(127996), '', s) s =",
"pictographs u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols u\"\\U0001F1E0-\\U0001F1FF\" # flags",
"document = [x['text'] for x in document if x['user']['screen_name'] ==",
"x in document if x['user']['screen_name'] == 'realDonaldTrump' and 'text' in",
"re.sub(r'\\\\u2026', '...', s) s = re.sub(r'…', '', s) s =",
"= re.sub(r'\\\\u2026', '...', s) s = re.sub(r'…', '', s) s",
"whitespace (incl. newlines) s = ' '.join(s.split()).lower() # Define emoji_pattern",
"s = re.sub(r\"'\", r\"\", s) s = re.sub(r\"“\", r\"\", s)",
"s) s = re.sub(r'\"', r\"\", s) # Collapse multiples of",
"# json.dump(document, output, ensure_ascii=False, indent=4) print(f\">> Sucessfully cleaned {filename} and",
"= re.sub(r'\\\\u2019', \"'\", s) s = re.sub(r'\\\\u2018', \"'\", s) s",
"[deep_clean(x) for x in document if deep_clean(x) is not None]",
"= re.sub('[()]', r'', s) s = re.sub(r'\"', r\"\", s) #",
"# flags u\"\\U0001F600-\\U0001F64F\" u\"\\U00002702-\\U000027B0\" u\"\\U000024C2-\\U0001F251\" u\"\\U0001f926-\\U0001f937\" u\"\\U0001F1F2\" u\"\\U0001F1F4\" u\"\\U0001F620\" u\"\\u200d\"",
"name: String :rtype: output: .txt ''' filename = name +",
"raw character variants s = re.sub(r'\\\\u2026', '...', s) s =",
"= emoji_pattern.sub(r'', s) # Care for a special case where",
"and saving the JSON tweet file to a newly generated",
"tweets from archive \"master_XXXX.json\" # document = [x['full_text'] for x",
"# Delete emoji modifying characters s = re.sub(chr(127996), '', s)",
"= re.sub(r\"”\", r\"\", s) s = re.sub('[()]', r'', s) s",
"is a retweet if s[:2] == 'RT': return None #",
"messages document = [deep_clean(x) for x in document if deep_clean(x)",
"and 'text' in x] # >> Version iii): Data set",
"import re import sys def beautify(name): ''' Loading, filtering and",
"re import sys def beautify(name): ''' Loading, filtering and saving",
"that are not retweeted # >> Version i): for tweets",
"retweeted # >> Version i): for tweets from archive \"master_XXXX.json\"",
"x in document if deep_clean(x) is not None] # Preventing",
"messages that are not retweeted # >> Version i): for",
"Delete all URLs because they don't make for interesting tweets.",
"= [deep_clean(x) for x in document if deep_clean(x) is not",
"s) s = re.sub(r\"“\", r\"\", s) s = re.sub(r\"”\", r\"\",",
"s[0] == \".\" else s return None if __name__ =="
] |
[
"\"+ str(polydets[i][1][j][k][5])+\" \"+ str(polydets[i][1][j][k][6])+\" \"+ str(polydets[i][1][j][k][7])+\" \"+ str(data2[\"cls\"][j])+\" \"+ str(polydets[i][1][j][k][8])+\"\\n\")",
"f: obbdets = pickle.load(f) polydets=copy.deepcopy(obbdets) for i in range(len(obbdets)): for",
"with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2 = pickle.load(f) with open(path1,'rb')",
"str(polydets[i][1][j][k][5])+\" \"+ str(polydets[i][1][j][k][6])+\" \"+ str(polydets[i][1][j][k][7])+\" \"+ str(data2[\"cls\"][j])+\" \"+ str(polydets[i][1][j][k][8])+\"\\n\") f.close()",
"polys=[] for k in range(len(data)): poly = bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5]) polys.append(poly)",
"else: polys=[] polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for i in range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\" f",
"polys=[] polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for i in range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\" f =",
"as np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2",
"bt import pickle import copy import numpy as np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\"",
"= bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5]) polys.append(poly) else: polys=[] polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for i",
"txtfile=savepath+polydets[i][0]+\".txt\" f = open(txtfile, \"w\") for j in range(len(polydets[0][1])): if",
"in range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\" \"+ str(polydets[i][1][j][k][1])+\" \"+ str(polydets[i][1][j][k][2])+\" \"+ str(polydets[i][1][j][k][3])+\" \"+",
"in range(len(obbdets)): for j in range(len(obbdets[0][1])): data=obbdets[i][1][j] if data.size!= 0:",
"polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for i in range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\" f = open(txtfile,",
"j in range(len(polydets[0][1])): if polydets[i][1][j]!=[]: for k in range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\"",
"f.write(str(polydets[i][1][j][k][0])+\" \"+ str(polydets[i][1][j][k][1])+\" \"+ str(polydets[i][1][j][k][2])+\" \"+ str(polydets[i][1][j][k][3])+\" \"+ str(polydets[i][1][j][k][4])+\" \"+",
"poly = bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5]) polys.append(poly) else: polys=[] polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for",
"k in range(len(data)): poly = bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5]) polys.append(poly) else: polys=[]",
"polydets=copy.deepcopy(obbdets) for i in range(len(obbdets)): for j in range(len(obbdets[0][1])): data=obbdets[i][1][j]",
"k in range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\" \"+ str(polydets[i][1][j][k][1])+\" \"+ str(polydets[i][1][j][k][2])+\" \"+ str(polydets[i][1][j][k][3])+\"",
"for k in range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\" \"+ str(polydets[i][1][j][k][1])+\" \"+ str(polydets[i][1][j][k][2])+\" \"+",
"for j in range(len(obbdets[0][1])): data=obbdets[i][1][j] if data.size!= 0: polys=[] for",
"for j in range(len(polydets[0][1])): if polydets[i][1][j]!=[]: for k in range(len(polydets[i][1][j])):",
"range(len(obbdets)): for j in range(len(obbdets[0][1])): data=obbdets[i][1][j] if data.size!= 0: polys=[]",
"range(len(obbdets[0][1])): data=obbdets[i][1][j] if data.size!= 0: polys=[] for k in range(len(data)):",
"obbdets = pickle.load(f) polydets=copy.deepcopy(obbdets) for i in range(len(obbdets)): for j",
"in range(len(obbdets[0][1])): data=obbdets[i][1][j] if data.size!= 0: polys=[] for k in",
"str(polydets[i][1][j][k][4])+\" \"+ str(polydets[i][1][j][k][5])+\" \"+ str(polydets[i][1][j][k][6])+\" \"+ str(polydets[i][1][j][k][7])+\" \"+ str(data2[\"cls\"][j])+\" \"+",
"\"+ str(polydets[i][1][j][k][4])+\" \"+ str(polydets[i][1][j][k][5])+\" \"+ str(polydets[i][1][j][k][6])+\" \"+ str(polydets[i][1][j][k][7])+\" \"+ str(data2[\"cls\"][j])+\"",
"import BboxToolkit as bt import pickle import copy import numpy",
"if polydets[i][1][j]!=[]: for k in range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\" \"+ str(polydets[i][1][j][k][1])+\" \"+",
"in range(len(data)): poly = bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5]) polys.append(poly) else: polys=[] polydets[i][1][j]=polys",
"in range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\" f = open(txtfile, \"w\") for j in",
"polydets[i][1][j]!=[]: for k in range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\" \"+ str(polydets[i][1][j][k][1])+\" \"+ str(polydets[i][1][j][k][2])+\"",
"for k in range(len(data)): poly = bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5]) polys.append(poly) else:",
"= pickle.load(f) with open(path1,'rb') as f: obbdets = pickle.load(f) polydets=copy.deepcopy(obbdets)",
"with open(path1,'rb') as f: obbdets = pickle.load(f) polydets=copy.deepcopy(obbdets) for i",
"str(polydets[i][1][j][k][1])+\" \"+ str(polydets[i][1][j][k][2])+\" \"+ str(polydets[i][1][j][k][3])+\" \"+ str(polydets[i][1][j][k][4])+\" \"+ str(polydets[i][1][j][k][5])+\" \"+",
"import pickle import copy import numpy as np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"#",
"str(polydets[i][1][j][k][2])+\" \"+ str(polydets[i][1][j][k][3])+\" \"+ str(polydets[i][1][j][k][4])+\" \"+ str(polydets[i][1][j][k][5])+\" \"+ str(polydets[i][1][j][k][6])+\" \"+",
"data2 = pickle.load(f) with open(path1,'rb') as f: obbdets = pickle.load(f)",
"= open(txtfile, \"w\") for j in range(len(polydets[0][1])): if polydets[i][1][j]!=[]: for",
"np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2 =",
"path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2 = pickle.load(f) with",
"for i in range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\" f = open(txtfile, \"w\") for",
"range(len(data)): poly = bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5]) polys.append(poly) else: polys=[] polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\"",
"path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2 = pickle.load(f)",
"#/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2 = pickle.load(f) with open(path1,'rb') as f: obbdets =",
"i in range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\" f = open(txtfile, \"w\") for j",
"data=obbdets[i][1][j] if data.size!= 0: polys=[] for k in range(len(data)): poly",
"range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\" \"+ str(polydets[i][1][j][k][1])+\" \"+ str(polydets[i][1][j][k][2])+\" \"+ str(polydets[i][1][j][k][3])+\" \"+ str(polydets[i][1][j][k][4])+\"",
"as bt import pickle import copy import numpy as np",
"import copy import numpy as np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with open(path2,'rb')",
"for i in range(len(obbdets)): for j in range(len(obbdets[0][1])): data=obbdets[i][1][j] if",
"\"+ str(polydets[i][1][j][k][3])+\" \"+ str(polydets[i][1][j][k][4])+\" \"+ str(polydets[i][1][j][k][5])+\" \"+ str(polydets[i][1][j][k][6])+\" \"+ str(polydets[i][1][j][k][7])+\"",
"range(len(polydets[0][1])): if polydets[i][1][j]!=[]: for k in range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\" \"+ str(polydets[i][1][j][k][1])+\"",
"open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2 = pickle.load(f) with open(path1,'rb') as",
"if data.size!= 0: polys=[] for k in range(len(data)): poly =",
"pickle import copy import numpy as np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with",
"i in range(len(obbdets)): for j in range(len(obbdets[0][1])): data=obbdets[i][1][j] if data.size!=",
"import numpy as np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with open(path2,'rb') as f:",
"numpy as np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl",
"pickle.load(f) polydets=copy.deepcopy(obbdets) for i in range(len(obbdets)): for j in range(len(obbdets[0][1])):",
"\"w\") for j in range(len(polydets[0][1])): if polydets[i][1][j]!=[]: for k in",
"as f: obbdets = pickle.load(f) polydets=copy.deepcopy(obbdets) for i in range(len(obbdets)):",
"in range(len(polydets[0][1])): if polydets[i][1][j]!=[]: for k in range(len(polydets[i][1][j])): f.write(str(polydets[i][1][j][k][0])+\" \"+",
"str(polydets[i][1][j][k][3])+\" \"+ str(polydets[i][1][j][k][4])+\" \"+ str(polydets[i][1][j][k][5])+\" \"+ str(polydets[i][1][j][k][6])+\" \"+ str(polydets[i][1][j][k][7])+\" \"+",
"pickle.load(f) with open(path1,'rb') as f: obbdets = pickle.load(f) polydets=copy.deepcopy(obbdets) for",
"f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2 = pickle.load(f) with open(path1,'rb') as f: obbdets",
"f = open(txtfile, \"w\") for j in range(len(polydets[0][1])): if polydets[i][1][j]!=[]:",
"= pickle.load(f) polydets=copy.deepcopy(obbdets) for i in range(len(obbdets)): for j in",
"savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for i in range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\" f = open(txtfile, \"w\")",
"as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl data2 = pickle.load(f) with open(path1,'rb') as f:",
"copy import numpy as np path1=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl\" path2=\"/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl\"# with open(path2,'rb') as",
"BboxToolkit as bt import pickle import copy import numpy as",
"data.size!= 0: polys=[] for k in range(len(data)): poly = bt.obb2poly(data[k][0:5])",
"polys.append(poly) else: polys=[] polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for i in range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\"",
"open(path1,'rb') as f: obbdets = pickle.load(f) polydets=copy.deepcopy(obbdets) for i in",
"open(txtfile, \"w\") for j in range(len(polydets[0][1])): if polydets[i][1][j]!=[]: for k",
"j in range(len(obbdets[0][1])): data=obbdets[i][1][j] if data.size!= 0: polys=[] for k",
"\"+ str(polydets[i][1][j][k][2])+\" \"+ str(polydets[i][1][j][k][3])+\" \"+ str(polydets[i][1][j][k][4])+\" \"+ str(polydets[i][1][j][k][5])+\" \"+ str(polydets[i][1][j][k][6])+\"",
"range(len(polydets)): txtfile=savepath+polydets[i][0]+\".txt\" f = open(txtfile, \"w\") for j in range(len(polydets[0][1])):",
"poly=np.append(poly,data[k][5]) polys.append(poly) else: polys=[] polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for i in range(len(polydets)):",
"0: polys=[] for k in range(len(data)): poly = bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5])",
"bt.obb2poly(data[k][0:5]) poly=np.append(poly,data[k][5]) polys.append(poly) else: polys=[] polydets[i][1][j]=polys savepath=\"/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/\" for i in",
"\"+ str(polydets[i][1][j][k][1])+\" \"+ str(polydets[i][1][j][k][2])+\" \"+ str(polydets[i][1][j][k][3])+\" \"+ str(polydets[i][1][j][k][4])+\" \"+ str(polydets[i][1][j][k][5])+\""
] |
[
"* y1) self.z1 = int(res * z1) @ti.kernel def init_kernel(self,",
"def init_scene(self, simulator): self.init_kernel(simulator.cell_type) dx = simulator.dx simulator.level_set.initialize_with_aabb((self.x0 * dx,",
">= self.z0 and k <= self.z1: cell_type[i, j, k] =",
"if i >= self.x0 and i <= self.x1 and \\",
"y1) self.z1 = int(res * z1) @ti.kernel def init_kernel(self, cell_type",
"ti.template()): for i, j, k in cell_type: if i >=",
"self.res = res self.x0 = int(res * x0) self.y0 =",
"= utils.FLUID def init_scene(self, simulator): self.init_kernel(simulator.cell_type) dx = simulator.dx simulator.level_set.initialize_with_aabb((self.x0",
"self.z1 = int(res * z1) @ti.kernel def init_kernel(self, cell_type :",
"z1): self.res = res self.x0 = int(res * x0) self.y0",
"self.x0 = int(res * x0) self.y0 = int(res * y0)",
"utils from apic_extension import * @ti.data_oriented class Initializer3D: # tmp",
"self.x1 and \\ j >= self.y0 and j <= self.y1",
"self.z0 = int(res * z0) self.x1 = int(res * x1)",
"z1) @ti.kernel def init_kernel(self, cell_type : ti.template()): for i, j,",
"in cell_type: if i >= self.x0 and i <= self.x1",
"j >= self.y0 and j <= self.y1 and \\ k",
"\\ k >= self.z0 and k <= self.z1: cell_type[i, j,",
"def init_kernel(self, cell_type : ti.template()): for i, j, k in",
"x1, y1, z1): self.res = res self.x0 = int(res *",
"* x0) self.y0 = int(res * y0) self.z0 = int(res",
"as ti import utils from apic_extension import * @ti.data_oriented class",
"taichi as ti import utils from apic_extension import * @ti.data_oriented",
"* dx, self.z0 * dx), (self.x1 * dx, self.y1 *",
"y0, z0, x1, y1, z1): self.res = res self.x0 =",
"self.y0 = int(res * y0) self.z0 = int(res * z0)",
"self.y0 * dx, self.z0 * dx), (self.x1 * dx, self.y1",
"x1) self.y1 = int(res * y1) self.z1 = int(res *",
"self.y1 = int(res * y1) self.z1 = int(res * z1)",
"y1, z1): self.res = res self.x0 = int(res * x0)",
"for i, j, k in cell_type: if i >= self.x0",
"z0) self.x1 = int(res * x1) self.y1 = int(res *",
"ti import utils from apic_extension import * @ti.data_oriented class Initializer3D:",
"simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx, self.z0 * dx), (self.x1",
"j, k in cell_type: if i >= self.x0 and i",
"k in cell_type: if i >= self.x0 and i <=",
"self.init_kernel(simulator.cell_type) dx = simulator.dx simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx,",
"cell_type[i, j, k] = utils.FLUID def init_scene(self, simulator): self.init_kernel(simulator.cell_type) dx",
"* y0) self.z0 = int(res * z0) self.x1 = int(res",
"= int(res * y1) self.z1 = int(res * z1) @ti.kernel",
"* @ti.data_oriented class Initializer3D: # tmp initializer def __init__(self, res,",
"<= self.y1 and \\ k >= self.z0 and k <=",
"dx = simulator.dx simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx, self.z0",
"= int(res * y0) self.z0 = int(res * z0) self.x1",
"* dx, self.y0 * dx, self.z0 * dx), (self.x1 *",
"init_kernel(self, cell_type : ti.template()): for i, j, k in cell_type:",
"import * @ti.data_oriented class Initializer3D: # tmp initializer def __init__(self,",
"dx), (self.x1 * dx, self.y1 * dx, self.z1 * dx))",
"<= self.x1 and \\ j >= self.y0 and j <=",
"import utils from apic_extension import * @ti.data_oriented class Initializer3D: #",
">= self.x0 and i <= self.x1 and \\ j >=",
"# tmp initializer def __init__(self, res, x0, y0, z0, x1,",
"self.z1: cell_type[i, j, k] = utils.FLUID def init_scene(self, simulator): self.init_kernel(simulator.cell_type)",
"i >= self.x0 and i <= self.x1 and \\ j",
"dx, self.z0 * dx), (self.x1 * dx, self.y1 * dx,",
"j <= self.y1 and \\ k >= self.z0 and k",
"utils.FLUID def init_scene(self, simulator): self.init_kernel(simulator.cell_type) dx = simulator.dx simulator.level_set.initialize_with_aabb((self.x0 *",
"* x1) self.y1 = int(res * y1) self.z1 = int(res",
"y0) self.z0 = int(res * z0) self.x1 = int(res *",
"self.z0 and k <= self.z1: cell_type[i, j, k] = utils.FLUID",
"= res self.x0 = int(res * x0) self.y0 = int(res",
"and \\ j >= self.y0 and j <= self.y1 and",
"= simulator.dx simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx, self.z0 *",
"k <= self.z1: cell_type[i, j, k] = utils.FLUID def init_scene(self,",
"dx, self.y0 * dx, self.z0 * dx), (self.x1 * dx,",
"= int(res * z1) @ti.kernel def init_kernel(self, cell_type : ti.template()):",
"self.x1 = int(res * x1) self.y1 = int(res * y1)",
"init_scene(self, simulator): self.init_kernel(simulator.cell_type) dx = simulator.dx simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0",
"res self.x0 = int(res * x0) self.y0 = int(res *",
"k] = utils.FLUID def init_scene(self, simulator): self.init_kernel(simulator.cell_type) dx = simulator.dx",
"and j <= self.y1 and \\ k >= self.z0 and",
"int(res * z1) @ti.kernel def init_kernel(self, cell_type : ti.template()): for",
"res, x0, y0, z0, x1, y1, z1): self.res = res",
"import taichi as ti import utils from apic_extension import *",
"i, j, k in cell_type: if i >= self.x0 and",
"int(res * x1) self.y1 = int(res * y1) self.z1 =",
"i <= self.x1 and \\ j >= self.y0 and j",
"= int(res * z0) self.x1 = int(res * x1) self.y1",
"self.x0 and i <= self.x1 and \\ j >= self.y0",
"x0, y0, z0, x1, y1, z1): self.res = res self.x0",
"self.y0 and j <= self.y1 and \\ k >= self.z0",
"apic_extension import * @ti.data_oriented class Initializer3D: # tmp initializer def",
"and \\ k >= self.z0 and k <= self.z1: cell_type[i,",
">= self.y0 and j <= self.y1 and \\ k >=",
"\\ j >= self.y0 and j <= self.y1 and \\",
"int(res * z0) self.x1 = int(res * x1) self.y1 =",
"int(res * y1) self.z1 = int(res * z1) @ti.kernel def",
"k >= self.z0 and k <= self.z1: cell_type[i, j, k]",
"simulator): self.init_kernel(simulator.cell_type) dx = simulator.dx simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 *",
"tmp initializer def __init__(self, res, x0, y0, z0, x1, y1,",
"from apic_extension import * @ti.data_oriented class Initializer3D: # tmp initializer",
"self.y1 and \\ k >= self.z0 and k <= self.z1:",
"* dx), (self.x1 * dx, self.y1 * dx, self.z1 *",
"and i <= self.x1 and \\ j >= self.y0 and",
"j, k] = utils.FLUID def init_scene(self, simulator): self.init_kernel(simulator.cell_type) dx =",
"def __init__(self, res, x0, y0, z0, x1, y1, z1): self.res",
"cell_type: if i >= self.x0 and i <= self.x1 and",
"Initializer3D: # tmp initializer def __init__(self, res, x0, y0, z0,",
"x0) self.y0 = int(res * y0) self.z0 = int(res *",
"class Initializer3D: # tmp initializer def __init__(self, res, x0, y0,",
"__init__(self, res, x0, y0, z0, x1, y1, z1): self.res =",
"and k <= self.z1: cell_type[i, j, k] = utils.FLUID def",
"= int(res * x1) self.y1 = int(res * y1) self.z1",
"simulator.dx simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx, self.z0 * dx),",
"self.z0 * dx), (self.x1 * dx, self.y1 * dx, self.z1",
"int(res * y0) self.z0 = int(res * z0) self.x1 =",
"cell_type : ti.template()): for i, j, k in cell_type: if",
"@ti.data_oriented class Initializer3D: # tmp initializer def __init__(self, res, x0,",
": ti.template()): for i, j, k in cell_type: if i",
"<= self.z1: cell_type[i, j, k] = utils.FLUID def init_scene(self, simulator):",
"int(res * x0) self.y0 = int(res * y0) self.z0 =",
"= int(res * x0) self.y0 = int(res * y0) self.z0",
"z0, x1, y1, z1): self.res = res self.x0 = int(res",
"initializer def __init__(self, res, x0, y0, z0, x1, y1, z1):",
"* z0) self.x1 = int(res * x1) self.y1 = int(res",
"@ti.kernel def init_kernel(self, cell_type : ti.template()): for i, j, k",
"* z1) @ti.kernel def init_kernel(self, cell_type : ti.template()): for i,"
] |
[
"def copy_block(iring, space): return CopyBlock(iring, space) bc = bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'],",
"import TransformBlock from bifrost.ndarray import copy_array class CopyBlock(TransformBlock):# $\\tikzmark{block-start}$ \"\"\"Copy",
"self.orings = [self.create_ring(space=space)] def on_sequence(self, iseq): return deepcopy(iseq.header) def on_data(self,",
"= [self.create_ring(space=space)] def on_sequence(self, iseq): return deepcopy(iseq.header) def on_data(self, ispan,",
"ospan): copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$ def copy_block(iring, space): return CopyBlock(iring, space) bc",
"def __init__(self, iring, space): super(CopyBlock, self).__init__(iring) self.orings = [self.create_ring(space=space)] def",
"on_sequence(self, iseq): return deepcopy(iseq.header) def on_data(self, ispan, ospan): copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$",
"256, label='fine_time') bc.blocks.fft(axes='fine_time', axis_labels='freq') bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time', 'pol', 'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system') bc.blocks.quantize('i8')",
"output ring\"\"\" def __init__(self, iring, space): super(CopyBlock, self).__init__(iring) self.orings =",
"bifrost as bf from bifrost.pipeline import TransformBlock from bifrost.ndarray import",
"return deepcopy(iseq.header) def on_data(self, ispan, ospan): copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$ def copy_block(iring,",
"[self.create_ring(space=space)] def on_sequence(self, iseq): return deepcopy(iseq.header) def on_data(self, ispan, ospan):",
"as bf from bifrost.pipeline import TransformBlock from bifrost.ndarray import copy_array",
"def on_data(self, ispan, ospan): copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$ def copy_block(iring, space): return",
"bc.blocks.fft(axes='fine_time', axis_labels='freq') bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time', 'pol', 'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system') bc.blocks.quantize('i8') bc.blocks.write_sigproc() pipeline",
"axis_labels='freq') bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time', 'pol', 'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system') bc.blocks.quantize('i8') bc.blocks.write_sigproc() pipeline =",
"deepcopy import bifrost as bf from bifrost.pipeline import TransformBlock from",
"bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$ bc.views.split_axis('time', 256, label='fine_time') bc.blocks.fft(axes='fine_time', axis_labels='freq') bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time', 'pol',",
"copy import deepcopy import bifrost as bf from bifrost.pipeline import",
"bifrost.pipeline import TransformBlock from bifrost.ndarray import copy_array class CopyBlock(TransformBlock):# $\\tikzmark{block-start}$",
"space): super(CopyBlock, self).__init__(iring) self.orings = [self.create_ring(space=space)] def on_sequence(self, iseq): return",
"bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096) bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$ bc.views.split_axis('time', 256, label='fine_time') bc.blocks.fft(axes='fine_time', axis_labels='freq')",
"import deepcopy import bifrost as bf from bifrost.pipeline import TransformBlock",
"ring\"\"\" def __init__(self, iring, space): super(CopyBlock, self).__init__(iring) self.orings = [self.create_ring(space=space)]",
"return CopyBlock(iring, space) bc = bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096) bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$",
"copy_block(iring, space): return CopyBlock(iring, space) bc = bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096)",
"on_data(self, ispan, ospan): copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$ def copy_block(iring, space): return CopyBlock(iring,",
"'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system') bc.blocks.quantize('i8') bc.blocks.write_sigproc() pipeline = bf.get_default_pipeline()# $\\tikzmark{pipeline-start}$ pipeline.shutdown_on_signals() pipeline.run()#$\\tikzmark{pipeline-end}$",
"label='fine_time') bc.blocks.fft(axes='fine_time', axis_labels='freq') bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time', 'pol', 'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system') bc.blocks.quantize('i8') bc.blocks.write_sigproc()",
"iseq): return deepcopy(iseq.header) def on_data(self, ispan, ospan): copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$ def",
"<gh_stars>0 from copy import deepcopy import bifrost as bf from",
"deepcopy(iseq.header) def on_data(self, ispan, ospan): copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$ def copy_block(iring, space):",
"import copy_array class CopyBlock(TransformBlock):# $\\tikzmark{block-start}$ \"\"\"Copy the input ring to",
"\"\"\"Copy the input ring to output ring\"\"\" def __init__(self, iring,",
"self).__init__(iring) self.orings = [self.create_ring(space=space)] def on_sequence(self, iseq): return deepcopy(iseq.header) def",
"bc = bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096) bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$ bc.views.split_axis('time', 256, label='fine_time')",
"the input ring to output ring\"\"\" def __init__(self, iring, space):",
"$\\tikzmark{gpu-start}$ bc.views.split_axis('time', 256, label='fine_time') bc.blocks.fft(axes='fine_time', axis_labels='freq') bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time', 'pol', 'freq'])#$\\tikzmark{gpu-end}$",
"bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time', 'pol', 'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system') bc.blocks.quantize('i8') bc.blocks.write_sigproc() pipeline = bf.get_default_pipeline()#",
"from copy import deepcopy import bifrost as bf from bifrost.pipeline",
"gulp_nframe=4096) bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$ bc.views.split_axis('time', 256, label='fine_time') bc.blocks.fft(axes='fine_time', axis_labels='freq') bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time',",
"bf from bifrost.pipeline import TransformBlock from bifrost.ndarray import copy_array class",
"ispan.data)#$\\tikzmark{block-end}$ def copy_block(iring, space): return CopyBlock(iring, space) bc = bf.BlockChainer()",
"copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$ def copy_block(iring, space): return CopyBlock(iring, space) bc =",
"$\\tikzmark{block-start}$ \"\"\"Copy the input ring to output ring\"\"\" def __init__(self,",
"TransformBlock from bifrost.ndarray import copy_array class CopyBlock(TransformBlock):# $\\tikzmark{block-start}$ \"\"\"Copy the",
"import bifrost as bf from bifrost.pipeline import TransformBlock from bifrost.ndarray",
"space) bc = bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096) bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$ bc.views.split_axis('time', 256,",
"from bifrost.pipeline import TransformBlock from bifrost.ndarray import copy_array class CopyBlock(TransformBlock):#",
"space): return CopyBlock(iring, space) bc = bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096) bc.custom(copy_block)(space='cuda')#",
"__init__(self, iring, space): super(CopyBlock, self).__init__(iring) self.orings = [self.create_ring(space=space)] def on_sequence(self,",
"copy_array class CopyBlock(TransformBlock):# $\\tikzmark{block-start}$ \"\"\"Copy the input ring to output",
"class CopyBlock(TransformBlock):# $\\tikzmark{block-start}$ \"\"\"Copy the input ring to output ring\"\"\"",
"= bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096) bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$ bc.views.split_axis('time', 256, label='fine_time') bc.blocks.fft(axes='fine_time',",
"CopyBlock(TransformBlock):# $\\tikzmark{block-start}$ \"\"\"Copy the input ring to output ring\"\"\" def",
"bifrost.ndarray import copy_array class CopyBlock(TransformBlock):# $\\tikzmark{block-start}$ \"\"\"Copy the input ring",
"ispan, ospan): copy_array(ospan.data, ispan.data)#$\\tikzmark{block-end}$ def copy_block(iring, space): return CopyBlock(iring, space)",
"def on_sequence(self, iseq): return deepcopy(iseq.header) def on_data(self, ispan, ospan): copy_array(ospan.data,",
"bc.blocks.transpose(['time', 'pol', 'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system') bc.blocks.quantize('i8') bc.blocks.write_sigproc() pipeline = bf.get_default_pipeline()# $\\tikzmark{pipeline-start}$",
"iring, space): super(CopyBlock, self).__init__(iring) self.orings = [self.create_ring(space=space)] def on_sequence(self, iseq):",
"from bifrost.ndarray import copy_array class CopyBlock(TransformBlock):# $\\tikzmark{block-start}$ \"\"\"Copy the input",
"super(CopyBlock, self).__init__(iring) self.orings = [self.create_ring(space=space)] def on_sequence(self, iseq): return deepcopy(iseq.header)",
"CopyBlock(iring, space) bc = bf.BlockChainer() bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096) bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$ bc.views.split_axis('time',",
"bc.views.split_axis('time', 256, label='fine_time') bc.blocks.fft(axes='fine_time', axis_labels='freq') bc.blocks.detect(mode='scalar') bc.blocks.transpose(['time', 'pol', 'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system')",
"bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096) bc.custom(copy_block)(space='cuda')# $\\tikzmark{gpu-start}$ bc.views.split_axis('time', 256, label='fine_time') bc.blocks.fft(axes='fine_time', axis_labels='freq') bc.blocks.detect(mode='scalar')",
"input ring to output ring\"\"\" def __init__(self, iring, space): super(CopyBlock,",
"ring to output ring\"\"\" def __init__(self, iring, space): super(CopyBlock, self).__init__(iring)",
"to output ring\"\"\" def __init__(self, iring, space): super(CopyBlock, self).__init__(iring) self.orings",
"'pol', 'freq'])#$\\tikzmark{gpu-end}$ bc.blocks.copy(space='system') bc.blocks.quantize('i8') bc.blocks.write_sigproc() pipeline = bf.get_default_pipeline()# $\\tikzmark{pipeline-start}$ pipeline.shutdown_on_signals()"
] |
[
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"help='database name where metrics are stored', default='mon'), cfg.HostAddressOpt('ip_address', help='Valid IP",
"# # Licensed under the Apache License, Version 2.0 (the",
"compliance with the License. # You may obtain a copy",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"2.0 (the \"License\"); # you may not use this file",
"agreed to in writing, software # distributed under the License",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"Unless required by applicable law or agreed to in writing,",
"default='mon'), cfg.HostAddressOpt('ip_address', help='Valid IP address or hostname ' 'to InfluxDB",
"distributed under the License is distributed on an \"AS IS\"",
"or # implied. # See the License for the specific",
"(C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP # Copyright",
"the specific language governing permissions and # limitations under the",
"Packard Enterprise Development LP # Copyright 2017 FUJITSU LIMITED #",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"user ', default='mon_persister'), cfg.StrOpt('password', secret=True, help='influxdb password')] influxdb_group = cfg.OptGroup(name='influxdb',",
"applicable law or agreed to in writing, software # distributed",
"influxdb', default=8086), cfg.StrOpt('user', help='influxdb user ', default='mon_persister'), cfg.StrOpt('password', secret=True, help='influxdb",
"except in compliance with the License. # You may obtain",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"'to InfluxDB instance'), cfg.PortOpt('port', help='port to influxdb', default=8086), cfg.StrOpt('user', help='influxdb",
"cfg.StrOpt('password', secret=True, help='influxdb password')] influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb') def register_opts(conf):",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"governing permissions and # limitations under the License. from oslo_config",
"not use this file except in compliance with the License.",
"or hostname ' 'to InfluxDB instance'), cfg.PortOpt('port', help='port to influxdb',",
"express or # implied. # See the License for the",
"Hewlett Packard Enterprise Development LP # Copyright 2017 FUJITSU LIMITED",
"import cfg influxdb_opts = [ cfg.StrOpt('database_name', help='database name where metrics",
"influxdb_opts = [ cfg.StrOpt('database_name', help='database name where metrics are stored',",
"address or hostname ' 'to InfluxDB instance'), cfg.PortOpt('port', help='port to",
"writing, software # distributed under the License is distributed on",
"help='Valid IP address or hostname ' 'to InfluxDB instance'), cfg.PortOpt('port',",
"in writing, software # distributed under the License is distributed",
"you may not use this file except in compliance with",
"ANY KIND, either express or # implied. # See the",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"language governing permissions and # limitations under the License. from",
"def register_opts(conf): conf.register_group(influxdb_group) conf.register_opts(influxdb_opts, influxdb_group) def list_opts(): return influxdb_group, influxdb_opts",
"[ cfg.StrOpt('database_name', help='database name where metrics are stored', default='mon'), cfg.HostAddressOpt('ip_address',",
"permissions and # limitations under the License. from oslo_config import",
"use this file except in compliance with the License. #",
"Enterprise Development LP # Copyright 2017 FUJITSU LIMITED # #",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"stored', default='mon'), cfg.HostAddressOpt('ip_address', help='Valid IP address or hostname ' 'to",
"help='influxdb password')] influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb') def register_opts(conf): conf.register_group(influxdb_group) conf.register_opts(influxdb_opts,",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"# Copyright 2017 FUJITSU LIMITED # # Licensed under the",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"OF ANY KIND, either express or # implied. # See",
"License. # You may obtain a copy of the License",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"License, Version 2.0 (the \"License\"); # you may not use",
"= cfg.OptGroup(name='influxdb', title='influxdb') def register_opts(conf): conf.register_group(influxdb_group) conf.register_opts(influxdb_opts, influxdb_group) def list_opts():",
"# You may obtain a copy of the License at",
"Copyright 2016-2017 Hewlett Packard Enterprise Development LP # Copyright 2017",
"specific language governing permissions and # limitations under the License.",
"and # limitations under the License. from oslo_config import cfg",
"cfg.OptGroup(name='influxdb', title='influxdb') def register_opts(conf): conf.register_group(influxdb_group) conf.register_opts(influxdb_opts, influxdb_group) def list_opts(): return",
"under the License is distributed on an \"AS IS\" BASIS,",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"License for the specific language governing permissions and # limitations",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"either express or # implied. # See the License for",
"FUJITSU LIMITED # # Licensed under the Apache License, Version",
"Copyright 2017 FUJITSU LIMITED # # Licensed under the Apache",
"the License for the specific language governing permissions and #",
"oslo_config import cfg influxdb_opts = [ cfg.StrOpt('database_name', help='database name where",
"(the \"License\"); # you may not use this file except",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"2016-2017 Hewlett Packard Enterprise Development LP # Copyright 2017 FUJITSU",
"# you may not use this file except in compliance",
"LIMITED # # Licensed under the Apache License, Version 2.0",
"2017 FUJITSU LIMITED # # Licensed under the Apache License,",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"cfg.StrOpt('database_name', help='database name where metrics are stored', default='mon'), cfg.HostAddressOpt('ip_address', help='Valid",
"the License is distributed on an \"AS IS\" BASIS, #",
"the License. from oslo_config import cfg influxdb_opts = [ cfg.StrOpt('database_name',",
"cfg.PortOpt('port', help='port to influxdb', default=8086), cfg.StrOpt('user', help='influxdb user ', default='mon_persister'),",
"default='mon_persister'), cfg.StrOpt('password', secret=True, help='influxdb password')] influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb') def",
"in compliance with the License. # You may obtain a",
"software # distributed under the License is distributed on an",
"', default='mon_persister'), cfg.StrOpt('password', secret=True, help='influxdb password')] influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb')",
"from oslo_config import cfg influxdb_opts = [ cfg.StrOpt('database_name', help='database name",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or #",
"# # Unless required by applicable law or agreed to",
"# implied. # See the License for the specific language",
"# limitations under the License. from oslo_config import cfg influxdb_opts",
"License. from oslo_config import cfg influxdb_opts = [ cfg.StrOpt('database_name', help='database",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"default=8086), cfg.StrOpt('user', help='influxdb user ', default='mon_persister'), cfg.StrOpt('password', secret=True, help='influxdb password')]",
"Version 2.0 (the \"License\"); # you may not use this",
"law or agreed to in writing, software # distributed under",
"# (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP #",
"under the License. from oslo_config import cfg influxdb_opts = [",
"InfluxDB instance'), cfg.PortOpt('port', help='port to influxdb', default=8086), cfg.StrOpt('user', help='influxdb user",
"KIND, either express or # implied. # See the License",
"metrics are stored', default='mon'), cfg.HostAddressOpt('ip_address', help='Valid IP address or hostname",
"implied. # See the License for the specific language governing",
"cfg.HostAddressOpt('ip_address', help='Valid IP address or hostname ' 'to InfluxDB instance'),",
"Development LP # Copyright 2017 FUJITSU LIMITED # # Licensed",
"LP # Copyright 2017 FUJITSU LIMITED # # Licensed under",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"name where metrics are stored', default='mon'), cfg.HostAddressOpt('ip_address', help='Valid IP address",
"\"License\"); # you may not use this file except in",
"= [ cfg.StrOpt('database_name', help='database name where metrics are stored', default='mon'),",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb') def register_opts(conf): conf.register_group(influxdb_group) conf.register_opts(influxdb_opts, influxdb_group) def",
"secret=True, help='influxdb password')] influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb') def register_opts(conf): conf.register_group(influxdb_group)",
"CONDITIONS OF ANY KIND, either express or # implied. #",
"to influxdb', default=8086), cfg.StrOpt('user', help='influxdb user ', default='mon_persister'), cfg.StrOpt('password', secret=True,",
"by applicable law or agreed to in writing, software #",
"# distributed under the License is distributed on an \"AS",
"hostname ' 'to InfluxDB instance'), cfg.PortOpt('port', help='port to influxdb', default=8086),",
"may obtain a copy of the License at # #",
"# Unless required by applicable law or agreed to in",
"See the License for the specific language governing permissions and",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"IP address or hostname ' 'to InfluxDB instance'), cfg.PortOpt('port', help='port",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"to in writing, software # distributed under the License is",
"help='port to influxdb', default=8086), cfg.StrOpt('user', help='influxdb user ', default='mon_persister'), cfg.StrOpt('password',",
"are stored', default='mon'), cfg.HostAddressOpt('ip_address', help='Valid IP address or hostname '",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"# See the License for the specific language governing permissions",
"OR CONDITIONS OF ANY KIND, either express or # implied.",
"You may obtain a copy of the License at #",
"instance'), cfg.PortOpt('port', help='port to influxdb', default=8086), cfg.StrOpt('user', help='influxdb user ',",
"may not use this file except in compliance with the",
"or agreed to in writing, software # distributed under the",
"required by applicable law or agreed to in writing, software",
"' 'to InfluxDB instance'), cfg.PortOpt('port', help='port to influxdb', default=8086), cfg.StrOpt('user',",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"cfg.StrOpt('user', help='influxdb user ', default='mon_persister'), cfg.StrOpt('password', secret=True, help='influxdb password')] influxdb_group",
"with the License. # You may obtain a copy of",
"this file except in compliance with the License. # You",
"where metrics are stored', default='mon'), cfg.HostAddressOpt('ip_address', help='Valid IP address or",
"password')] influxdb_group = cfg.OptGroup(name='influxdb', title='influxdb') def register_opts(conf): conf.register_group(influxdb_group) conf.register_opts(influxdb_opts, influxdb_group)",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"limitations under the License. from oslo_config import cfg influxdb_opts =",
"help='influxdb user ', default='mon_persister'), cfg.StrOpt('password', secret=True, help='influxdb password')] influxdb_group =",
"title='influxdb') def register_opts(conf): conf.register_group(influxdb_group) conf.register_opts(influxdb_opts, influxdb_group) def list_opts(): return influxdb_group,",
"cfg influxdb_opts = [ cfg.StrOpt('database_name', help='database name where metrics are"
] |
[
"= '[\"foo\", {\"bar\":[\"baz\", null, 1.0, 2]}]' parsed = json.loads(your_json) print(type(your_json))",
"'[\"foo\", {\"bar\":[\"baz\", null, 1.0, 2]}]' parsed = json.loads(your_json) print(type(your_json)) print(type(parsed))",
"null, 1.0, 2]}]' parsed = json.loads(your_json) print(type(your_json)) print(type(parsed)) #print(json.dumps(parsed, indent=4,",
"{\"bar\":[\"baz\", null, 1.0, 2]}]' parsed = json.loads(your_json) print(type(your_json)) print(type(parsed)) #print(json.dumps(parsed,",
"import json your_json = '[\"foo\", {\"bar\":[\"baz\", null, 1.0, 2]}]' parsed",
"json your_json = '[\"foo\", {\"bar\":[\"baz\", null, 1.0, 2]}]' parsed =",
"your_json = '[\"foo\", {\"bar\":[\"baz\", null, 1.0, 2]}]' parsed = json.loads(your_json)",
"1.0, 2]}]' parsed = json.loads(your_json) print(type(your_json)) print(type(parsed)) #print(json.dumps(parsed, indent=4, sort_keys=True))"
] |
[
"data.accepted: service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date,",
"language = get_solution_settings(users.User(merchant.service_user_email)).main_language if data.accepted: email_id = cirklo_city.get_signup_accepted_mail(language) if not",
"2.0 (the \"License\"); # you may not use this file",
"1, service.email) sheet.write(row, 2, service.address) sheet.write(row, 3, service.phone_number) sheet.write(row, 4,",
"0 for service in all_services.results: row += 1 sheet.write(row, 0,",
"from rogerthat.rpc import users from rogerthat.rpc.users import get_current_session from rogerthat.utils",
"service_user = users.get_current_user() city_sln_settings = get_solution_settings(service_user) _check_permission(city_sln_settings) all_services = get_cirklo_vouchers_services()",
"_check_permission(city_sln_settings) to = CirkloVoucherListTO() to.total = 0 to.results = []",
"HttpForbiddenException() if len(city_sln_settings.modules) != 1: _check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities', 'get', silent_result=True) @returns([dict])",
"email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) else: email_id = cirklo_city.get_signup_accepted_mail(language) if not",
"solutions.common.dal import get_solution_settings from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \\ list_whitelisted_merchants,",
"as gcs_file: book.save(gcs_file) deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400) return { 'url': get_serving_url(gcs_path),",
"service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False,",
"for service in all_services.results: row += 1 sheet.write(row, 0, service.name)",
"from solutions.common.dal import get_solution_settings from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \\",
"= '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-')) content_type = 'application/vnd.ms-excel'",
"get_current_session().shop: lang = get_solution_settings(service_user).main_language raise HttpForbiddenException(translate(lang, 'no_permission')) other_city = CirkloCity.get_by_service_email(service_user.email())",
"in cirklo_emails: cirklo_merchant = cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return to @rest('/common/vouchers/services/whitelist', 'put',",
"customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants] customers_dict = {customer.id:",
"= list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict = {} cirklo_emails = [] for merchant",
"for merchant in osa_merchants] customers_dict = {customer.id: customer for customer",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"solutions import translate from solutions.common.bizz import SolutionModule, broadcast_updates_pending from solutions.common.bizz.campaignmonitor",
"zip(models, osa_merchants): customer = customers_dict[merchant.customer_id] if not customer.service_user: merchant.key.delete() continue",
"permissions and # limitations under the License. # # @@license_version:1.7@@",
"from solutions.common.bizz.campaignmonitor import send_smart_email_without_check from solutions.common.consts import OCA_FILES_BUCKET from solutions.common.dal",
"datetime from google.appengine.ext import ndb, deferred, db from typing import",
"other_city: other_city.key.delete() return CirkloCityTO.from_model(None) key = CirkloCity.create_key(data.city_id) city = key.get()",
"date.replace(' ', '-')) content_type = 'application/vnd.ms-excel' with cloudstorage.open(gcs_path, 'w', content_type=content_type)",
"city = key.get() if not city: city = CirkloCity(key=key, service_user_email=service_user.email())",
"in cirklo_emails: logging.error('Duplicate found %s', merchant['email']) continue cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']] =",
"= None city.signup_mail = SignupMails.from_to(data.signup_mail) if data.signup_name_nl and data.signup_name_fr: city.signup_names",
"@rest('/common/vouchers/cirklo', 'put') @returns(CirkloCityTO) @arguments(data=CirkloCityTO) def api_vouchers_save_cirklo_settings(data): service_user = users.get_current_user() if",
"= city_sln_settings.main_language sheet.write(0, 0, translate(language, 'reservation-name')) sheet.write(0, 1, translate(language, 'Email'))",
"users from rogerthat.rpc.users import get_current_session from rogerthat.utils import parse_date from",
"None to.more = False cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) if not cirklo_city:",
"ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get() customer = Customer.get_by_id(merchant.customer_id) # type: Customer if data.accepted:",
"SolutionModule, broadcast_updates_pending from solutions.common.bizz.campaignmonitor import send_smart_email_without_check from solutions.common.consts import OCA_FILES_BUCKET",
"True else: merchant.denied = True merchant.put() service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get()",
"city.app_info = info sln_settings.updates_pending = True sln_settings.put() broadcast_updates_pending(sln_settings) city.put() return",
"import get_solution_settings from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \\ list_whitelisted_merchants, list_cirklo_cities",
"= XFStyle() date_format.num_format_str = 'dd/mm/yyyy' row = 0 for service",
"from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \\ list_whitelisted_merchants, list_cirklo_cities from solutions.common.integrations.cirklo.models",
"Customer from solutions import translate from solutions.common.bizz import SolutionModule, broadcast_updates_pending",
"CirkloVoucherListTO() to.total = 0 to.results = [] to.cursor = None",
"merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False service_to",
"from rogerthat.models import ServiceIdentity from rogerthat.models.settings import ServiceInfo from rogerthat.rpc",
"use this file except in compliance with the License. #",
"CirkloCity.get_by_service_email(city_service_user.email()) if not cirklo_city: return to cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict",
"# type: CirkloCity if not data.city_id: if other_city: other_city.key.delete() return",
"for merchant in osa_merchants] models = ndb.get_multi(info_keys) for service_info, merchant",
"continue cirklo_merchant = cirklo_dict.get(customer.user_email) should_save = False if cirklo_merchant: if",
"qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant] osa_merchants = [] for",
"api_vouchers_save_cirklo_settings(data): service_user = users.get_current_user() if not get_current_session().shop: lang = get_solution_settings(service_user).main_language",
"translate(language, 'created')) sheet.write(0, 5, translate(language, 'merchant_registered')) date_format = XFStyle() date_format.num_format_str",
"HttpNotFoundException from mcfw.restapi import rest from mcfw.rpc import returns, arguments",
"CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup') to.populate_from_info(service_info, customer) return to else:",
"return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo', 'put') @returns(CirkloCityTO) @arguments(data=CirkloCityTO) def api_vouchers_save_cirklo_settings(data): service_user =",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"import returns, arguments from rogerthat.bizz.gcs import get_serving_url from rogerthat.bizz.service import",
"if data.accepted else None if not is_cirklo_only_merchant: if data.accepted: merchant.whitelisted",
"is_cirklo_only_merchant = '@' not in data.id if is_cirklo_only_merchant: merchant =",
"from mcfw.restapi import rest from mcfw.rpc import returns, arguments from",
"License. # You may obtain a copy of the License",
"= info sln_settings.updates_pending = True sln_settings.put() broadcast_updates_pending(sln_settings) city.put() return CirkloCityTO.from_model(city)",
"= True else: merchant.denied = True merchant.put() service_info = ServiceInfo.create_key(users.User(merchant.service_user_email),",
"CirkloMerchant.create_key(data.id).get() language = get_solution_settings(users.User(merchant.service_user_email)).main_language if data.accepted: email_id = cirklo_city.get_signup_accepted_mail(language) if",
"from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \\ WhitelistVoucherServiceTO from solutions.common.restapi.services",
"elif city.service_user_email != service_user.email(): raise HttpBadRequestException('City id %s is already",
"the License. # # @@license_version:1.7@@ import cloudstorage import logging from",
"under the License is distributed on an \"AS IS\" BASIS,",
"translate(language, 'No')) date = format_datetime(datetime.now(), format='medium', locale='en_GB') gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls'",
"License for the specific language governing permissions and # limitations",
"= users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) to = CirkloVoucherListTO() to.total",
"import CirkloCity, CirkloMerchant, SignupLanguageProperty, \\ SignupMails, CirkloAppInfo from solutions.common.integrations.cirklo.to import",
"@rest('/common/vouchers/cirklo/export', 'post') @returns(dict) @arguments() def api_export_cirklo_services(): service_user = users.get_current_user() city_sln_settings",
"'/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-')) content_type = 'application/vnd.ms-excel' with",
"and data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr) elif data.signup_name_nl: city.signup_names =",
"return to cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict = {} cirklo_emails =",
"= True sln_settings.put() broadcast_updates_pending(sln_settings) city.put() return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export', 'post') @returns(dict)",
"row += 1 sheet.write(row, 0, service.name) sheet.write(row, 1, service.email) sheet.write(row,",
"email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id: raise HttpBadRequestException('City settings aren\\'t",
"already in use by another service' % data.city_id) if other_city",
"get_city_id_by_service_email, whitelist_merchant, \\ list_whitelisted_merchants, list_cirklo_cities from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant,",
"sheet.write(row, 4, parse_date(service.creation_date), date_format) sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered",
"_check_permission(city_sln_settings): if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules: raise HttpForbiddenException() if len(city_sln_settings.modules)",
"settings aren\\'t fully setup yet.') whitelist_merchant(cirklo_city.city_id, data.email) deferred.defer(send_smart_email_without_check, email_id, [data.email],",
"is_cirklo_only_merchant: if data.accepted: merchant.whitelisted = True else: merchant.denied = True",
"= format_datetime(datetime.now(), format='medium', locale='en_GB') gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace('",
"type: List[CirkloMerchant] osa_merchants = [] for merchant in qry: if",
"limitations under the License. # # @@license_version:1.7@@ import cloudstorage import",
"city = CirkloCity(key=key, service_user_email=service_user.email()) elif city.service_user_email != service_user.email(): raise HttpBadRequestException('City",
"city_sln_settings.main_language sheet.write(0, 0, translate(language, 'reservation-name')) sheet.write(0, 1, translate(language, 'Email')) sheet.write(0,",
"from mcfw.rpc import returns, arguments from rogerthat.bizz.gcs import get_serving_url from",
"= [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT) for merchant in osa_merchants] models = ndb.get_multi(info_keys)",
"send_smart_email_without_check from solutions.common.consts import OCA_FILES_BUCKET from solutions.common.dal import get_solution_settings from",
"import _check_is_city def _check_permission(city_sln_settings): if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules: raise",
"merchant qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant] osa_merchants = []",
"email_id: raise HttpBadRequestException('City settings aren\\'t fully setup yet.') deferred.defer(send_smart_email_without_check, email_id,",
"in data.id if is_cirklo_only_merchant: merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant",
"broadcast_updates_pending from solutions.common.bizz.campaignmonitor import send_smart_email_without_check from solutions.common.consts import OCA_FILES_BUCKET from",
"CirkloCity if not cirklo_city: raise HttpNotFoundException('No cirklo settings found.') is_cirklo_only_merchant",
"type: CirkloMerchant language = merchant.get_language() else: merchant = CirkloMerchant.create_key(data.id).get() language",
"invalidate_cache(get_city_id_by_service_email, service_user.email()) city.logo_url = data.logo_url city.signup_enabled = data.signup_enabled city.signup_logo_url =",
"in compliance with the License. # You may obtain a",
"-*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium",
"users.get_current_user() city_sln_settings = get_solution_settings(service_user) _check_permission(city_sln_settings) all_services = get_cirklo_vouchers_services() if all_services.cursor:",
"city.put() return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export', 'post') @returns(dict) @arguments() def api_export_cirklo_services(): service_user",
"HttpBadRequestException('City id %s is already in use by another service'",
"software # distributed under the License is distributed on an",
"= CirkloCity.get_by_service_email(service_user.email()) return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo', 'put') @returns(CirkloCityTO) @arguments(data=CirkloCityTO) def api_vouchers_save_cirklo_settings(data):",
"= get_solution_settings(service_user) _check_permission(city_sln_settings) all_services = get_cirklo_vouchers_services() if all_services.cursor: raise NotImplementedError()",
"@returns(CirkloCityTO) @arguments(data=CirkloCityTO) def api_vouchers_save_cirklo_settings(data): service_user = users.get_current_user() if not get_current_session().shop:",
"if not customer.service_user: merchant.key.delete() continue cirklo_merchant = cirklo_dict.get(customer.user_email) should_save =",
"import invalidate_cache from mcfw.consts import REST_TYPE_TO from mcfw.exceptions import HttpBadRequestException,",
"# Copyright 2020 Green Valley Belgium NV # # Licensed",
"'Phone number')) sheet.write(0, 4, translate(language, 'created')) sheet.write(0, 5, translate(language, 'merchant_registered'))",
"to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup') to.populate_from_info(service_info, customer) return",
"service' % data.city_id) if other_city and other_city.key != key: other_city.key.delete()",
"else False to.results.append( CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup')) if osa_merchants:",
"format_datetime from datetime import datetime from google.appengine.ext import ndb, deferred,",
"= [] for merchant in cirklo_merchants: if merchant['email'] in cirklo_emails:",
"merchant = CirkloMerchant.create_key(data.id).get() language = get_solution_settings(users.User(merchant.service_user_email)).main_language if data.accepted: email_id =",
"to.results = [] to.cursor = None to.more = False cirklo_city",
"import datetime from google.appengine.ext import ndb, deferred, db from typing",
"Workbook, XFStyle from mcfw.cache import invalidate_cache from mcfw.consts import REST_TYPE_TO",
"'reservation-name')) sheet.write(0, 1, translate(language, 'Email')) sheet.write(0, 2, translate(language, 'address')) sheet.write(0,",
"return to else: if data.accepted: merchant.whitelisted = True else: merchant.denied",
"api_vouchers_get_cirklo_settings(): service_user = users.get_current_user() city = CirkloCity.get_by_service_email(service_user.email()) return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo',",
"= cirklo_city.get_signup_accepted_mail(language) if not email_id: raise HttpBadRequestException('City settings aren\\'t fully",
"'put', type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO) def whitelist_voucher_service(data): city_service_user = users.get_current_user() city_sln_settings",
"!= 1: _check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities', 'get', silent_result=True) @returns([dict]) @arguments(staging=bool) def api_list_cirklo_cities(staging=False):",
"'address')) sheet.write(0, 3, translate(language, 'Phone number')) sheet.write(0, 4, translate(language, 'created'))",
"date = format_datetime(datetime.now(), format='medium', locale='en_GB') gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET,",
"settings found.') is_cirklo_only_merchant = '@' not in data.id if is_cirklo_only_merchant:",
"cirklo_merchant = cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return to @rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO)",
"CirkloMerchant, SignupLanguageProperty, \\ SignupMails, CirkloAppInfo from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO,",
"not is_cirklo_only_merchant: if data.accepted: merchant.whitelisted = True else: merchant.denied =",
"merchant.put() elif merchant.whitelisted: merchant.whitelisted = False merchant.put() whitelist_date = cirklo_merchant['createdAt']",
"cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) if not cirklo_city: return to cirklo_merchants =",
"'Yes') if service.merchant_registered else translate(language, 'No')) date = format_datetime(datetime.now(), format='medium',",
"'Z' if data.accepted else None if not is_cirklo_only_merchant: if data.accepted:",
"import get_city_id_by_service_email, whitelist_merchant, \\ list_whitelisted_merchants, list_cirklo_cities from solutions.common.integrations.cirklo.models import CirkloCity,",
"get_cirklo_vouchers_services() if all_services.cursor: raise NotImplementedError() book = Workbook(encoding='utf-8') sheet =",
"number')) sheet.write(0, 4, translate(language, 'created')) sheet.write(0, 5, translate(language, 'merchant_registered')) date_format",
"elif merchant.whitelisted: merchant.whitelisted = False should_save = True if should_save:",
"import send_smart_email_without_check from solutions.common.consts import OCA_FILES_BUCKET from solutions.common.dal import get_solution_settings",
"sheet.write(row, 2, service.address) sheet.write(row, 3, service.phone_number) sheet.write(row, 4, parse_date(service.creation_date), date_format)",
"u'Cirklo signup')) if osa_merchants: customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in",
"# -*- coding: utf-8 -*- # Copyright 2020 Green Valley",
"@rest('/common/vouchers/cities', 'get', silent_result=True) @returns([dict]) @arguments(staging=bool) def api_list_cirklo_cities(staging=False): return list_cirklo_cities(staging) @rest('/common/vouchers/services',",
"True elif merchant.whitelisted: merchant.whitelisted = False should_save = True if",
"whitelist_voucher_service(data): city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) cirklo_city =",
"if cirklo_merchant else False to.results.append( CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup'))",
"= cirklo_dict.get(merchant.data['company']['email']) if cirklo_merchant: if merchant.data['company']['email'] in cirklo_emails: cirklo_emails.remove(merchant.data['company']['email']) if",
"ServiceIdentity.DEFAULT).get() customer = Customer.get_by_id(merchant.customer_id) # type: Customer if data.accepted: service_identity_user",
"solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \\ list_whitelisted_merchants, list_cirklo_cities from solutions.common.integrations.cirklo.models import",
"other_city and other_city.key != key: other_city.key.delete() invalidate_cache(get_city_id_by_service_email, service_user.email()) city.logo_url =",
"[] to.cursor = None to.more = False cirklo_city = CirkloCity.get_by_service_email(city_service_user.email())",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"if merchant['email'] in cirklo_emails: logging.error('Duplicate found %s', merchant['email']) continue cirklo_emails.append(merchant['email'])",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"= False should_save = True if should_save: merchant.put() service_identity_user =",
"fr=data.signup_name_fr) elif data.signup_name_nl: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl) elif data.signup_name_fr: city.signup_names",
"merchant_registered, u'OSA signup') service_to.populate_from_info(service_info, customer) to.results.append(service_to) for email in cirklo_emails:",
"cirklo_merchant if cirklo_merchant else False to.results.append( CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo",
"'get') @returns(CirkloCityTO) @arguments() def api_vouchers_get_cirklo_settings(): service_user = users.get_current_user() city =",
"merchant.put() service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get() customer = Customer.get_by_id(merchant.customer_id) # type:",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"and other_city.key != key: other_city.key.delete() invalidate_cache(get_city_id_by_service_email, service_user.email()) city.logo_url = data.logo_url",
"found %s', merchant['email']) continue cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']] = merchant qry =",
"= city.app_info and city.app_info.to_dict() info = CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title, buttons=data.app_info.buttons) sln_settings",
"import Customer from solutions import translate from solutions.common.bizz import SolutionModule,",
"to in writing, software # distributed under the License is",
"CirkloCity.get_by_service_email(service_user.email()) return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo', 'put') @returns(CirkloCityTO) @arguments(data=CirkloCityTO) def api_vouchers_save_cirklo_settings(data): service_user",
"1 sheet.write(row, 0, service.name) sheet.write(row, 1, service.email) sheet.write(row, 2, service.address)",
"'get', silent_result=True) @returns([dict]) @arguments(staging=bool) def api_list_cirklo_cities(staging=False): return list_cirklo_cities(staging) @rest('/common/vouchers/services', 'get',",
"raise NotImplementedError() book = Workbook(encoding='utf-8') sheet = book.add_sheet('Cirklo') # type:",
"format_datetime(datetime.now(), format='medium', locale='en_GB') gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ',",
"CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo', 'put') @returns(CirkloCityTO) @arguments(data=CirkloCityTO) def api_vouchers_save_cirklo_settings(data): service_user = users.get_current_user()",
"# See the License for the specific language governing permissions",
"silent_result=True) @returns(CirkloVoucherListTO) @arguments() def get_cirklo_vouchers_services(): city_service_user = users.get_current_user() city_sln_settings =",
"'merchant_registered')) date_format = XFStyle() date_format.num_format_str = 'dd/mm/yyyy' row = 0",
"ServiceIdentity from rogerthat.models.settings import ServiceInfo from rogerthat.rpc import users from",
"SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr) elif data.signup_name_nl: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl) elif data.signup_name_fr:",
"_check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities', 'get', silent_result=True) @returns([dict]) @arguments(staging=bool) def api_list_cirklo_cities(staging=False): return list_cirklo_cities(staging)",
"language governing permissions and # limitations under the License. #",
"'shopInfo' in cirklo_merchant if cirklo_merchant else False to.results.append( CirkloVoucherServiceTO.from_model(merchant, whitelist_date,",
"from rogerthat.bizz.service import re_index_map_only from rogerthat.consts import FAST_QUEUE from rogerthat.models",
"or agreed to in writing, software # distributed under the",
"import rest from mcfw.rpc import returns, arguments from rogerthat.bizz.gcs import",
"# type: List[CirkloMerchant] osa_merchants = [] for merchant in qry:",
"= data.logo_url city.signup_enabled = data.signup_enabled city.signup_logo_url = data.signup_logo_url city.signup_names =",
"mcfw.cache import invalidate_cache from mcfw.consts import REST_TYPE_TO from mcfw.exceptions import",
"# type: Worksheet language = city_sln_settings.main_language sheet.write(0, 0, translate(language, 'reservation-name'))",
"required by applicable law or agreed to in writing, software",
"logging.error('Duplicate found %s', merchant['email']) continue cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']] = merchant qry",
"= merchant.get_language() else: merchant = CirkloMerchant.create_key(data.id).get() language = get_solution_settings(users.User(merchant.service_user_email)).main_language if",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"else None merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else",
"parse_date from rogerthat.utils.service import create_service_identity_user from shop.models import Customer from",
"with the License. # You may obtain a copy of",
"type: Worksheet language = city_sln_settings.main_language sheet.write(0, 0, translate(language, 'reservation-name')) sheet.write(0,",
"!= og_info and not sln_settings.ciklo_vouchers_only(): city.app_info = info sln_settings.updates_pending =",
"XFStyle from mcfw.cache import invalidate_cache from mcfw.consts import REST_TYPE_TO from",
"rest from mcfw.rpc import returns, arguments from rogerthat.bizz.gcs import get_serving_url",
"translate(language, 'Phone number')) sheet.write(0, 4, translate(language, 'created')) sheet.write(0, 5, translate(language,",
"to = CirkloVoucherListTO() to.total = 0 to.results = [] to.cursor",
"from rogerthat.utils.service import create_service_identity_user from shop.models import Customer from solutions",
"def api_list_cirklo_cities(staging=False): return list_cirklo_cities(staging) @rest('/common/vouchers/services', 'get', silent_result=True) @returns(CirkloVoucherListTO) @arguments() def",
"data.email) deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) else: email_id = cirklo_city.get_signup_accepted_mail(language)",
"(OCA_FILES_BUCKET, date.replace(' ', '-')) content_type = 'application/vnd.ms-excel' with cloudstorage.open(gcs_path, 'w',",
"parse_date(service.creation_date), date_format) sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else translate(language,",
"service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup') service_to.populate_from_info(service_info, customer) to.results.append(service_to)",
"# # @@license_version:1.7@@ import cloudstorage import logging from babel.dates import",
"get_cirklo_vouchers_services(): city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) to =",
"service.merchant_registered else translate(language, 'No')) date = format_datetime(datetime.now(), format='medium', locale='en_GB') gcs_path",
"in city_sln_settings.modules: raise HttpForbiddenException() if len(city_sln_settings.modules) != 1: _check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities',",
"cirklo_city: raise HttpNotFoundException('No cirklo settings found.') is_cirklo_only_merchant = '@' not",
"SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr) og_info = city.app_info and city.app_info.to_dict() info = CirkloAppInfo(enabled=data.app_info.enabled,",
"from rogerthat.bizz.gcs import get_serving_url from rogerthat.bizz.service import re_index_map_only from rogerthat.consts",
"= CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup') to.populate_from_info(service_info, customer) return to",
"if is_cirklo_only_merchant: merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant language =",
"%s is already in use by another service' % data.city_id)",
"compliance with the License. # You may obtain a copy",
"CirkloVoucherListTO, CirkloVoucherServiceTO, \\ WhitelistVoucherServiceTO from solutions.common.restapi.services import _check_is_city def _check_permission(city_sln_settings):",
"= SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl) elif data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr) og_info",
"agreed to in writing, software # distributed under the License",
"Workbook(encoding='utf-8') sheet = book.add_sheet('Cirklo') # type: Worksheet language = city_sln_settings.main_language",
"else: cirklo_merchant = cirklo_dict.get(merchant.data['company']['email']) if cirklo_merchant: if merchant.data['company']['email'] in cirklo_emails:",
"'Email')) sheet.write(0, 2, translate(language, 'address')) sheet.write(0, 3, translate(language, 'Phone number'))",
"SignupMails, CirkloAppInfo from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \\ WhitelistVoucherServiceTO",
"data.accepted else None if not is_cirklo_only_merchant: if data.accepted: merchant.whitelisted =",
"CirkloCity if not data.city_id: if other_city: other_city.key.delete() return CirkloCityTO.from_model(None) key",
"return CirkloCityTO.from_model(None) key = CirkloCity.create_key(data.city_id) city = key.get() if not",
"distributed under the License is distributed on an \"AS IS\"",
"cirklo_merchants: if merchant['email'] in cirklo_emails: logging.error('Duplicate found %s', merchant['email']) continue",
"= cirklo_merchant['createdAt'] if cirklo_merchant else None merchant_registered = 'shopInfo' in",
"sln_settings = get_solution_settings(service_user) if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only():",
"License. # # @@license_version:1.7@@ import cloudstorage import logging from babel.dates",
"u'OSA signup') to.populate_from_info(service_info, customer) return to else: if data.accepted: merchant.whitelisted",
"1: _check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities', 'get', silent_result=True) @returns([dict]) @arguments(staging=bool) def api_list_cirklo_cities(staging=False): return",
"db.get(customer_to_get)} info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT) for merchant in osa_merchants] models",
"HttpForbiddenException(translate(lang, 'no_permission')) other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity if not",
"= datetime.now().isoformat() + 'Z' if data.accepted else None if not",
"raise HttpBadRequestException('City settings aren\\'t fully setup yet.') deferred.defer(send_smart_email_without_check, email_id, [data.email],",
"SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules: raise HttpForbiddenException() if len(city_sln_settings.modules) != 1:",
"cirklo_merchant if cirklo_merchant else False service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered,",
"import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \\ WhitelistVoucherServiceTO from solutions.common.restapi.services import _check_is_city",
"merchant.whitelisted: merchant.whitelisted = False should_save = True if should_save: merchant.put()",
"@arguments(data=WhitelistVoucherServiceTO) def whitelist_voucher_service(data): city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings)",
"express or implied. # See the License for the specific",
"from google.appengine.ext import ndb, deferred, db from typing import List",
"except in compliance with the License. # You may obtain",
"service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant",
"= users.get_current_user() city = CirkloCity.get_by_service_email(service_user.email()) return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo', 'put') @returns(CirkloCityTO)",
"get_serving_url from rogerthat.bizz.service import re_index_map_only from rogerthat.consts import FAST_QUEUE from",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"= Workbook(encoding='utf-8') sheet = book.add_sheet('Cirklo') # type: Worksheet language =",
"not use this file except in compliance with the License.",
"service.name) sheet.write(row, 1, service.email) sheet.write(row, 2, service.address) sheet.write(row, 3, service.phone_number)",
"cirklo_emails = [] for merchant in cirklo_merchants: if merchant['email'] in",
"list_whitelisted_merchants, list_cirklo_cities from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \\ SignupMails,",
"_check_permission(city_sln_settings) all_services = get_cirklo_vouchers_services() if all_services.cursor: raise NotImplementedError() book =",
"writing, software # distributed under the License is distributed on",
"city: city = CirkloCity(key=key, service_user_email=service_user.email()) elif city.service_user_email != service_user.email(): raise",
"CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \\ WhitelistVoucherServiceTO from solutions.common.restapi.services import _check_is_city def",
"= Customer.get_by_id(merchant.customer_id) # type: Customer if data.accepted: service_identity_user = create_service_identity_user(customer.service_user)",
"book = Workbook(encoding='utf-8') sheet = book.add_sheet('Cirklo') # type: Worksheet language",
"you may not use this file except in compliance with",
"merchant.whitelisted = True else: merchant.denied = True merchant.put() return CirkloVoucherServiceTO.from_model(merchant,",
"0, translate(language, 'reservation-name')) sheet.write(0, 1, translate(language, 'Email')) sheet.write(0, 2, translate(language,",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"to.total = 0 to.results = [] to.cursor = None to.more",
"merchant.whitelisted = True should_save = True elif merchant.whitelisted: merchant.whitelisted =",
"in cirklo_merchant if cirklo_merchant else False service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date,",
"def whitelist_voucher_service(data): city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) cirklo_city",
"CirkloCity.create_key(data.city_id) city = key.get() if not city: city = CirkloCity(key=key,",
"import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException from mcfw.restapi import rest from mcfw.rpc",
"!= service_user.email(): raise HttpBadRequestException('City id %s is already in use",
"from typing import List from xlwt import Worksheet, Workbook, XFStyle",
"language = city_sln_settings.main_language sheet.write(0, 0, translate(language, 'reservation-name')) sheet.write(0, 1, translate(language,",
"@arguments() def api_vouchers_get_cirklo_settings(): service_user = users.get_current_user() city = CirkloCity.get_by_service_email(service_user.email()) return",
"re_index_map_only from rogerthat.consts import FAST_QUEUE from rogerthat.models import ServiceIdentity from",
"create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup')",
"HttpBadRequestException('City settings aren\\'t fully setup yet.') whitelist_merchant(cirklo_city.city_id, data.email) deferred.defer(send_smart_email_without_check, email_id,",
"merchant.denied = True merchant.put() service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get() customer =",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"date_format.num_format_str = 'dd/mm/yyyy' row = 0 for service in all_services.results:",
"not email_id: raise HttpBadRequestException('City settings aren\\'t fully setup yet.') whitelist_merchant(cirklo_city.city_id,",
"= users.get_current_user() if not get_current_session().shop: lang = get_solution_settings(service_user).main_language raise HttpForbiddenException(translate(lang,",
"translate(language, 'merchant_registered')) date_format = XFStyle() date_format.num_format_str = 'dd/mm/yyyy' row =",
"sheet.write(row, 1, service.email) sheet.write(row, 2, service.address) sheet.write(row, 3, service.phone_number) sheet.write(row,",
"cirklo_merchant else False to.results.append( CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup')) if",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"service in all_services.results: row += 1 sheet.write(row, 0, service.name) sheet.write(row,",
"<filename>src/solutions/common/integrations/cirklo/api.py # -*- coding: utf-8 -*- # Copyright 2020 Green",
"sheet.write(row, 0, service.name) sheet.write(row, 1, service.email) sheet.write(row, 2, service.address) sheet.write(row,",
"SignupLanguageProperty, \\ SignupMails, CirkloAppInfo from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO,",
"not customer.service_user: merchant.key.delete() continue cirklo_merchant = cirklo_dict.get(customer.user_email) should_save = False",
"customer = customers_dict[merchant.customer_id] if not customer.service_user: merchant.key.delete() continue cirklo_merchant =",
"if cirklo_merchant else False service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA",
"= merchant qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant] osa_merchants =",
"if other_city: other_city.key.delete() return CirkloCityTO.from_model(None) key = CirkloCity.create_key(data.city_id) city =",
"= CirkloCity(key=key, service_user_email=service_user.email()) elif city.service_user_email != service_user.email(): raise HttpBadRequestException('City id",
"5, translate(language, 'Yes') if service.merchant_registered else translate(language, 'No')) date =",
"whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None merchant_registered = 'shopInfo'",
"get_solution_settings(city_service_user) _check_permission(city_sln_settings) cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity if not",
"email_id: raise HttpBadRequestException('City settings aren\\'t fully setup yet.') whitelist_merchant(cirklo_city.city_id, data.email)",
"Green Valley Belgium NV # # Licensed under the Apache",
"gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-')) content_type =",
"logging from babel.dates import format_datetime from datetime import datetime from",
"cirklo_emails.remove(merchant.data['company']['email']) if not merchant.whitelisted: merchant.whitelisted = True merchant.put() elif merchant.whitelisted:",
"[Customer.create_key(merchant.customer_id) for merchant in osa_merchants] customers_dict = {customer.id: customer for",
"= data.signup_enabled city.signup_logo_url = data.signup_logo_url city.signup_names = None city.signup_mail =",
"cirklo_merchant = cirklo_dict.get(customer.user_email) should_save = False if cirklo_merchant: if customer.user_email",
"if merchant.data['company']['email'] in cirklo_emails: cirklo_emails.remove(merchant.data['company']['email']) if not merchant.whitelisted: merchant.whitelisted =",
"= True should_save = True elif merchant.whitelisted: merchant.whitelisted = False",
"if all_services.cursor: raise NotImplementedError() book = Workbook(encoding='utf-8') sheet = book.add_sheet('Cirklo')",
"= {} cirklo_emails = [] for merchant in cirklo_merchants: if",
"models = ndb.get_multi(info_keys) for service_info, merchant in zip(models, osa_merchants): customer",
"signup') service_to.populate_from_info(service_info, customer) to.results.append(service_to) for email in cirklo_emails: cirklo_merchant =",
"from babel.dates import format_datetime from datetime import datetime from google.appengine.ext",
"shop.models import Customer from solutions import translate from solutions.common.bizz import",
"merchant.whitelisted: merchant.whitelisted = False merchant.put() whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant",
"= book.add_sheet('Cirklo') # type: Worksheet language = city_sln_settings.main_language sheet.write(0, 0,",
"from mcfw.consts import REST_TYPE_TO from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException",
"sheet.write(0, 5, translate(language, 'merchant_registered')) date_format = XFStyle() date_format.num_format_str = 'dd/mm/yyyy'",
"import get_current_session from rogerthat.utils import parse_date from rogerthat.utils.service import create_service_identity_user",
"api_export_cirklo_services(): service_user = users.get_current_user() city_sln_settings = get_solution_settings(service_user) _check_permission(city_sln_settings) all_services =",
"'application/vnd.ms-excel' with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file: book.save(gcs_file) deferred.defer(cloudstorage.delete, gcs_path,",
"= True merchant.put() elif merchant.whitelisted: merchant.whitelisted = False merchant.put() whitelist_date",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"def _check_permission(city_sln_settings): if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules: raise HttpForbiddenException() if",
"get_solution_settings(city_service_user) _check_permission(city_sln_settings) to = CirkloVoucherListTO() to.total = 0 to.results =",
"setup yet.') whitelist_merchant(cirklo_city.city_id, data.email) deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) else:",
"merchant['email']) continue cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']] = merchant qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) #",
"customer = Customer.get_by_id(merchant.customer_id) # type: Customer if data.accepted: service_identity_user =",
"the License is distributed on an \"AS IS\" BASIS, #",
"= create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA",
"service_user.email(): raise HttpBadRequestException('City id %s is already in use by",
"data.signup_name_nl: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl) elif data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr,",
"content_type=content_type) as gcs_file: book.save(gcs_file) deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400) return { 'url':",
"fr=data.signup_name_nl) elif data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr) og_info = city.app_info",
"[] for merchant in qry: if merchant.service_user_email: osa_merchants.append(merchant) else: cirklo_merchant",
"= SignupMails.from_to(data.signup_mail) if data.signup_name_nl and data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr)",
"= CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup') service_to.populate_from_info(service_info, customer) to.results.append(service_to) for",
"city.service_user_email != service_user.email(): raise HttpBadRequestException('City id %s is already in",
"True sln_settings.put() broadcast_updates_pending(sln_settings) city.put() return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export', 'post') @returns(dict) @arguments()",
"translate(language, 'reservation-name')) sheet.write(0, 1, translate(language, 'Email')) sheet.write(0, 2, translate(language, 'address'))",
"mcfw.consts import REST_TYPE_TO from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException from",
"False service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup') service_to.populate_from_info(service_info, customer)",
"in db.get(customer_to_get)} info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT) for merchant in osa_merchants]",
"sheet.write(row, 3, service.phone_number) sheet.write(row, 4, parse_date(service.creation_date), date_format) sheet.write(row, 5, translate(language,",
"import logging from babel.dates import format_datetime from datetime import datetime",
"all_services.cursor: raise NotImplementedError() book = Workbook(encoding='utf-8') sheet = book.add_sheet('Cirklo') #",
"_check_is_city def _check_permission(city_sln_settings): if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules: raise HttpForbiddenException()",
"if not cirklo_city: return to cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict =",
"is already in use by another service' % data.city_id) if",
"not cirklo_city: return to cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict = {}",
"service_identity_user) whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None merchant_registered =",
"return to @rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO) def whitelist_voucher_service(data): city_service_user",
"CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export', 'post') @returns(dict) @arguments() def api_export_cirklo_services(): service_user = users.get_current_user()",
"'put') @returns(CirkloCityTO) @arguments(data=CirkloCityTO) def api_vouchers_save_cirklo_settings(data): service_user = users.get_current_user() if not",
"= users.get_current_user() city_sln_settings = get_solution_settings(service_user) _check_permission(city_sln_settings) all_services = get_cirklo_vouchers_services() if",
"service.phone_number) sheet.write(row, 4, parse_date(service.creation_date), date_format) sheet.write(row, 5, translate(language, 'Yes') if",
"True merchant.put() return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup') @rest('/common/vouchers/cirklo', 'get')",
"data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr) og_info = city.app_info and city.app_info.to_dict()",
"@returns(CirkloCityTO) @arguments() def api_vouchers_get_cirklo_settings(): service_user = users.get_current_user() city = CirkloCity.get_by_service_email(service_user.email())",
"service_user_email=service_user.email()) elif city.service_user_email != service_user.email(): raise HttpBadRequestException('City id %s is",
"'-')) content_type = 'application/vnd.ms-excel' with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file:",
"other_city.key != key: other_city.key.delete() invalidate_cache(get_city_id_by_service_email, service_user.email()) city.logo_url = data.logo_url city.signup_enabled",
"# type: CirkloMerchant language = merchant.get_language() else: merchant = CirkloMerchant.create_key(data.id).get()",
"service.address) sheet.write(row, 3, service.phone_number) sheet.write(row, 4, parse_date(service.creation_date), date_format) sheet.write(row, 5,",
"key = CirkloCity.create_key(data.city_id) city = key.get() if not city: city",
"= get_solution_settings(service_user).main_language raise HttpForbiddenException(translate(lang, 'no_permission')) other_city = CirkloCity.get_by_service_email(service_user.email()) # type:",
"law or agreed to in writing, software # distributed under",
"2020 Green Valley Belgium NV # # Licensed under the",
"def api_export_cirklo_services(): service_user = users.get_current_user() city_sln_settings = get_solution_settings(service_user) _check_permission(city_sln_settings) all_services",
"cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict = {} cirklo_emails = [] for",
"= True else: merchant.denied = True merchant.put() return CirkloVoucherServiceTO.from_model(merchant, whitelist_date,",
"cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity if not cirklo_city: raise",
"import FAST_QUEUE from rogerthat.models import ServiceIdentity from rogerthat.models.settings import ServiceInfo",
"-*- # Copyright 2020 Green Valley Belgium NV # #",
"info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT) for merchant in osa_merchants] models =",
"raise HttpForbiddenException() if len(city_sln_settings.modules) != 1: _check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities', 'get', silent_result=True)",
"if cirklo_merchant: if customer.user_email in cirklo_emails: cirklo_emails.remove(customer.user_email) if not merchant.whitelisted:",
"', '-')) content_type = 'application/vnd.ms-excel' with cloudstorage.open(gcs_path, 'w', content_type=content_type) as",
"2, translate(language, 'address')) sheet.write(0, 3, translate(language, 'Phone number')) sheet.write(0, 4,",
"from mcfw.cache import invalidate_cache from mcfw.consts import REST_TYPE_TO from mcfw.exceptions",
"else: if data.accepted: merchant.whitelisted = True else: merchant.denied = True",
"CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup')) if osa_merchants: customer_to_get = [Customer.create_key(merchant.customer_id)",
"babel.dates import format_datetime from datetime import datetime from google.appengine.ext import",
"@returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO) def whitelist_voucher_service(data): city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user)",
"+ 'Z' if data.accepted else None if not is_cirklo_only_merchant: if",
"Customer if data.accepted: service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) to =",
"merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant language = merchant.get_language() else:",
"to.results.append(service_to) for email in cirklo_emails: cirklo_merchant = cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return",
"if not email_id: raise HttpBadRequestException('City settings aren\\'t fully setup yet.')",
"to.cursor = None to.more = False cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) if",
"sheet.write(0, 4, translate(language, 'created')) sheet.write(0, 5, translate(language, 'merchant_registered')) date_format =",
"False, u'OSA signup') to.populate_from_info(service_info, customer) return to else: if data.accepted:",
"CirkloCityTO.from_model(None) key = CirkloCity.create_key(data.city_id) city = key.get() if not city:",
"None city.signup_mail = SignupMails.from_to(data.signup_mail) if data.signup_name_nl and data.signup_name_fr: city.signup_names =",
"= {customer.id: customer for customer in db.get(customer_to_get)} info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email),",
"else: merchant = CirkloMerchant.create_key(data.id).get() language = get_solution_settings(users.User(merchant.service_user_email)).main_language if data.accepted: email_id",
"may obtain a copy of the License at # #",
"use by another service' % data.city_id) if other_city and other_city.key",
"under the License. # # @@license_version:1.7@@ import cloudstorage import logging",
"email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) whitelist_date = datetime.now().isoformat() + 'Z' if",
"og_info = city.app_info and city.app_info.to_dict() info = CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title, buttons=data.app_info.buttons)",
"date_format = XFStyle() date_format.num_format_str = 'dd/mm/yyyy' row = 0 for",
"= 'shopInfo' in cirklo_merchant if cirklo_merchant else False service_to =",
"HttpBadRequestException, HttpForbiddenException, HttpNotFoundException from mcfw.restapi import rest from mcfw.rpc import",
"= key.get() if not city: city = CirkloCity(key=key, service_user_email=service_user.email()) elif",
"in osa_merchants] models = ndb.get_multi(info_keys) for service_info, merchant in zip(models,",
"email in cirklo_emails: cirklo_merchant = cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return to @rest('/common/vouchers/services/whitelist',",
"CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant language = merchant.get_language() else: merchant =",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"data.city_id) if other_city and other_city.key != key: other_city.key.delete() invalidate_cache(get_city_id_by_service_email, service_user.email())",
"if service.merchant_registered else translate(language, 'No')) date = format_datetime(datetime.now(), format='medium', locale='en_GB')",
"rogerthat.utils import parse_date from rogerthat.utils.service import create_service_identity_user from shop.models import",
"data.accepted: merchant.whitelisted = True else: merchant.denied = True merchant.put() service_info",
"merchant in qry: if merchant.service_user_email: osa_merchants.append(merchant) else: cirklo_merchant = cirklo_dict.get(merchant.data['company']['email'])",
"= get_cirklo_vouchers_services() if all_services.cursor: raise NotImplementedError() book = Workbook(encoding='utf-8') sheet",
"signup') @rest('/common/vouchers/cirklo', 'get') @returns(CirkloCityTO) @arguments() def api_vouchers_get_cirklo_settings(): service_user = users.get_current_user()",
"= 'dd/mm/yyyy' row = 0 for service in all_services.results: row",
"= [] to.cursor = None to.more = False cirklo_city =",
"may not use this file except in compliance with the",
"Worksheet, Workbook, XFStyle from mcfw.cache import invalidate_cache from mcfw.consts import",
"from rogerthat.rpc.users import get_current_session from rogerthat.utils import parse_date from rogerthat.utils.service",
"cirklo_dict = {} cirklo_emails = [] for merchant in cirklo_merchants:",
"found.') is_cirklo_only_merchant = '@' not in data.id if is_cirklo_only_merchant: merchant",
"data.signup_logo_url city.signup_names = None city.signup_mail = SignupMails.from_to(data.signup_mail) if data.signup_name_nl and",
"aren\\'t fully setup yet.') whitelist_merchant(cirklo_city.city_id, data.email) deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,",
"= None to.more = False cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) if not",
"False if cirklo_merchant: if customer.user_email in cirklo_emails: cirklo_emails.remove(customer.user_email) if not",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"get_solution_settings(users.User(merchant.service_user_email)).main_language if data.accepted: email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id: raise",
"cirklo_emails: cirklo_emails.remove(merchant.data['company']['email']) if not merchant.whitelisted: merchant.whitelisted = True merchant.put() elif",
"this file except in compliance with the License. # You",
"language = merchant.get_language() else: merchant = CirkloMerchant.create_key(data.id).get() language = get_solution_settings(users.User(merchant.service_user_email)).main_language",
"merchant.get_language() else: merchant = CirkloMerchant.create_key(data.id).get() language = get_solution_settings(users.User(merchant.service_user_email)).main_language if data.accepted:",
"= True merchant.put() return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup') @rest('/common/vouchers/cirklo',",
"type: Customer if data.accepted: service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) to",
"0 to.results = [] to.cursor = None to.more = False",
"CirkloCity, CirkloMerchant, SignupLanguageProperty, \\ SignupMails, CirkloAppInfo from solutions.common.integrations.cirklo.to import CirkloCityTO,",
"merchant.put() return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup') @rest('/common/vouchers/cirklo', 'get') @returns(CirkloCityTO)",
"5, translate(language, 'merchant_registered')) date_format = XFStyle() date_format.num_format_str = 'dd/mm/yyyy' row",
"for service_info, merchant in zip(models, osa_merchants): customer = customers_dict[merchant.customer_id] if",
"if data.accepted: merchant.whitelisted = True else: merchant.denied = True merchant.put()",
"import create_service_identity_user from shop.models import Customer from solutions import translate",
"qry: if merchant.service_user_email: osa_merchants.append(merchant) else: cirklo_merchant = cirklo_dict.get(merchant.data['company']['email']) if cirklo_merchant:",
"cirklo_merchant: if merchant.data['company']['email'] in cirklo_emails: cirklo_emails.remove(merchant.data['company']['email']) if not merchant.whitelisted: merchant.whitelisted",
"create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None",
"mcfw.restapi import rest from mcfw.rpc import returns, arguments from rogerthat.bizz.gcs",
"rogerthat.models.settings import ServiceInfo from rogerthat.rpc import users from rogerthat.rpc.users import",
"merchant.denied = True merchant.put() return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup')",
"get_solution_settings(service_user).main_language raise HttpForbiddenException(translate(lang, 'no_permission')) other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity",
"by another service' % data.city_id) if other_city and other_city.key !=",
"import format_datetime from datetime import datetime from google.appengine.ext import ndb,",
"if merchant.service_user_email: osa_merchants.append(merchant) else: cirklo_merchant = cirklo_dict.get(merchant.data['company']['email']) if cirklo_merchant: if",
"elif data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr) og_info = city.app_info and",
"not sln_settings.ciklo_vouchers_only(): city.app_info = info sln_settings.updates_pending = True sln_settings.put() broadcast_updates_pending(sln_settings)",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"List[CirkloMerchant] osa_merchants = [] for merchant in qry: if merchant.service_user_email:",
"merchant.service_user_email: osa_merchants.append(merchant) else: cirklo_merchant = cirklo_dict.get(merchant.data['company']['email']) if cirklo_merchant: if merchant.data['company']['email']",
"in qry: if merchant.service_user_email: osa_merchants.append(merchant) else: cirklo_merchant = cirklo_dict.get(merchant.data['company']['email']) if",
"customers_dict = {customer.id: customer for customer in db.get(customer_to_get)} info_keys =",
"customer) to.results.append(service_to) for email in cirklo_emails: cirklo_merchant = cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant))",
"XFStyle() date_format.num_format_str = 'dd/mm/yyyy' row = 0 for service in",
"# # Licensed under the Apache License, Version 2.0 (the",
"service_user = users.get_current_user() city = CirkloCity.get_by_service_email(service_user.email()) return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo', 'put')",
"from rogerthat.models.settings import ServiceInfo from rogerthat.rpc import users from rogerthat.rpc.users",
"solutions.common.restapi.services import _check_is_city def _check_permission(city_sln_settings): if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules:",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"WhitelistVoucherServiceTO from solutions.common.restapi.services import _check_is_city def _check_permission(city_sln_settings): if SolutionModule.CIRKLO_VOUCHERS not",
"@returns(CirkloVoucherListTO) @arguments() def get_cirklo_vouchers_services(): city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user)",
"False should_save = True if should_save: merchant.put() service_identity_user = create_service_identity_user(customer.service_user)",
"signup')) if osa_merchants: customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants]",
"= get_solution_settings(service_user) if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only(): city.app_info",
"silent_result=True) @returns([dict]) @arguments(staging=bool) def api_list_cirklo_cities(staging=False): return list_cirklo_cities(staging) @rest('/common/vouchers/services', 'get', silent_result=True)",
"return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export', 'post') @returns(dict) @arguments() def api_export_cirklo_services(): service_user =",
"row = 0 for service in all_services.results: row += 1",
"FAST_QUEUE from rogerthat.models import ServiceIdentity from rogerthat.models.settings import ServiceInfo from",
"get_current_session from rogerthat.utils import parse_date from rogerthat.utils.service import create_service_identity_user from",
"cirklo_merchant['createdAt'] if cirklo_merchant else None merchant_registered = 'shopInfo' in cirklo_merchant",
"city_sln_settings.modules: raise HttpForbiddenException() if len(city_sln_settings.modules) != 1: _check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities', 'get',",
"broadcast_updates_pending(sln_settings) city.put() return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export', 'post') @returns(dict) @arguments() def api_export_cirklo_services():",
"translate(language, 'Email')) sheet.write(0, 2, translate(language, 'address')) sheet.write(0, 3, translate(language, 'Phone",
"= 0 for service in all_services.results: row += 1 sheet.write(row,",
"= SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr) elif data.signup_name_nl: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl) elif",
"solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \\ SignupMails, CirkloAppInfo from solutions.common.integrations.cirklo.to",
"sln_settings.ciklo_vouchers_only(): city.app_info = info sln_settings.updates_pending = True sln_settings.put() broadcast_updates_pending(sln_settings) city.put()",
"raise HttpNotFoundException('No cirklo settings found.') is_cirklo_only_merchant = '@' not in",
"sheet.write(0, 3, translate(language, 'Phone number')) sheet.write(0, 4, translate(language, 'created')) sheet.write(0,",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"from solutions.common.bizz import SolutionModule, broadcast_updates_pending from solutions.common.bizz.campaignmonitor import send_smart_email_without_check from",
"is_cirklo_only_merchant: merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant language = merchant.get_language()",
"whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted else None if",
"SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl) elif data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr) og_info =",
"Valley Belgium NV # # Licensed under the Apache License,",
"from datetime import datetime from google.appengine.ext import ndb, deferred, db",
"cirklo_city.get_signup_accepted_mail(language) if not email_id: raise HttpBadRequestException('City settings aren\\'t fully setup",
"import ServiceInfo from rogerthat.rpc import users from rogerthat.rpc.users import get_current_session",
"cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file: book.save(gcs_file) deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400) return",
"@rest('/common/vouchers/cirklo', 'get') @returns(CirkloCityTO) @arguments() def api_vouchers_get_cirklo_settings(): service_user = users.get_current_user() city",
"if cirklo_merchant else None merchant_registered = 'shopInfo' in cirklo_merchant if",
"sln_settings.updates_pending = True sln_settings.put() broadcast_updates_pending(sln_settings) city.put() return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export', 'post')",
"[data.email], _countdown=1, _queue=FAST_QUEUE) whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted",
"if data.accepted: service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) to = CirkloVoucherServiceTO.from_model(merchant,",
"in use by another service' % data.city_id) if other_city and",
"= CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity if not data.city_id: if other_city:",
"whitelist_merchant, \\ list_whitelisted_merchants, list_cirklo_cities from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty,",
"arguments from rogerthat.bizz.gcs import get_serving_url from rogerthat.bizz.service import re_index_map_only from",
"api_list_cirklo_cities(staging=False): return list_cirklo_cities(staging) @rest('/common/vouchers/services', 'get', silent_result=True) @returns(CirkloVoucherListTO) @arguments() def get_cirklo_vouchers_services():",
"= [] for merchant in qry: if merchant.service_user_email: osa_merchants.append(merchant) else:",
"service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get() customer = Customer.get_by_id(merchant.customer_id) # type: Customer",
"# limitations under the License. # # @@license_version:1.7@@ import cloudstorage",
"gcs_file: book.save(gcs_file) deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400) return { 'url': get_serving_url(gcs_path), }",
"city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl) elif data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr)",
"import cloudstorage import logging from babel.dates import format_datetime from datetime",
"utf-8 -*- # Copyright 2020 Green Valley Belgium NV #",
"@@license_version:1.7@@ import cloudstorage import logging from babel.dates import format_datetime from",
"Worksheet language = city_sln_settings.main_language sheet.write(0, 0, translate(language, 'reservation-name')) sheet.write(0, 1,",
"google.appengine.ext import ndb, deferred, db from typing import List from",
"title=data.app_info.title, buttons=data.app_info.buttons) sln_settings = get_solution_settings(service_user) if info.to_dict() != og_info and",
"to.results.append( CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup')) if osa_merchants: customer_to_get =",
"+= 1 sheet.write(row, 0, service.name) sheet.write(row, 1, service.email) sheet.write(row, 2,",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"service_to.populate_from_info(service_info, customer) to.results.append(service_to) for email in cirklo_emails: cirklo_merchant = cirklo_dict[email]",
"merchant.whitelisted: merchant.whitelisted = True merchant.put() elif merchant.whitelisted: merchant.whitelisted = False",
"@returns(dict) @arguments() def api_export_cirklo_services(): service_user = users.get_current_user() city_sln_settings = get_solution_settings(service_user)",
"ServiceIdentity.DEFAULT) for merchant in osa_merchants] models = ndb.get_multi(info_keys) for service_info,",
"else: merchant.denied = True merchant.put() return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo",
"list_cirklo_cities from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \\ SignupMails, CirkloAppInfo",
"whitelist_date, merchant_registered, u'OSA signup') service_to.populate_from_info(service_info, customer) to.results.append(service_to) for email in",
"other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity if not data.city_id: if",
"elif merchant.whitelisted: merchant.whitelisted = False merchant.put() whitelist_date = cirklo_merchant['createdAt'] if",
"4, parse_date(service.creation_date), date_format) sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else",
"cirklo_emails: logging.error('Duplicate found %s', merchant['email']) continue cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']] = merchant",
"% (OCA_FILES_BUCKET, date.replace(' ', '-')) content_type = 'application/vnd.ms-excel' with cloudstorage.open(gcs_path,",
"if osa_merchants: customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants] customers_dict",
"merchant.data['company']['email'] in cirklo_emails: cirklo_emails.remove(merchant.data['company']['email']) if not merchant.whitelisted: merchant.whitelisted = True",
"or implied. # See the License for the specific language",
"= ndb.get_multi(info_keys) for service_info, merchant in zip(models, osa_merchants): customer =",
"rogerthat.utils.service import create_service_identity_user from shop.models import Customer from solutions import",
"\\ SignupMails, CirkloAppInfo from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \\",
"merchant in zip(models, osa_merchants): customer = customers_dict[merchant.customer_id] if not customer.service_user:",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"if data.accepted: email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id: raise HttpBadRequestException('City",
"elif data.signup_name_nl: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl) elif data.signup_name_fr: city.signup_names =",
"mcfw.rpc import returns, arguments from rogerthat.bizz.gcs import get_serving_url from rogerthat.bizz.service",
"another service' % data.city_id) if other_city and other_city.key != key:",
"to else: if data.accepted: merchant.whitelisted = True else: merchant.denied =",
"= get_solution_settings(city_service_user) _check_permission(city_sln_settings) cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity if",
"sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else translate(language, 'No')) date",
"= CirkloVoucherListTO() to.total = 0 to.results = [] to.cursor =",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"import parse_date from rogerthat.utils.service import create_service_identity_user from shop.models import Customer",
"= CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity if not cirklo_city: raise HttpNotFoundException('No",
"def api_vouchers_save_cirklo_settings(data): service_user = users.get_current_user() if not get_current_session().shop: lang =",
"len(city_sln_settings.modules) != 1: _check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities', 'get', silent_result=True) @returns([dict]) @arguments(staging=bool) def",
"merchant.key.delete() continue cirklo_merchant = cirklo_dict.get(customer.user_email) should_save = False if cirklo_merchant:",
"city.signup_mail = SignupMails.from_to(data.signup_mail) if data.signup_name_nl and data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,",
"CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title, buttons=data.app_info.buttons) sln_settings = get_solution_settings(service_user) if info.to_dict() != og_info",
"city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr) elif data.signup_name_nl: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_nl)",
"import get_serving_url from rogerthat.bizz.service import re_index_map_only from rogerthat.consts import FAST_QUEUE",
"to.populate_from_info(service_info, customer) return to else: if data.accepted: merchant.whitelisted = True",
"to cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict = {} cirklo_emails = []",
"data.id if is_cirklo_only_merchant: merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant language",
"and # limitations under the License. # # @@license_version:1.7@@ import",
"buttons=data.app_info.buttons) sln_settings = get_solution_settings(service_user) if info.to_dict() != og_info and not",
"True if should_save: merchant.put() service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) whitelist_date",
"# type: CirkloCity if not cirklo_city: raise HttpNotFoundException('No cirklo settings",
"in zip(models, osa_merchants): customer = customers_dict[merchant.customer_id] if not customer.service_user: merchant.key.delete()",
"= CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title, buttons=data.app_info.buttons) sln_settings = get_solution_settings(service_user) if info.to_dict() !=",
"(the \"License\"); # you may not use this file except",
"@rest('/common/vouchers/services', 'get', silent_result=True) @returns(CirkloVoucherListTO) @arguments() def get_cirklo_vouchers_services(): city_service_user = users.get_current_user()",
"should_save = True elif merchant.whitelisted: merchant.whitelisted = False should_save =",
"if not get_current_session().shop: lang = get_solution_settings(service_user).main_language raise HttpForbiddenException(translate(lang, 'no_permission')) other_city",
"OCA_FILES_BUCKET from solutions.common.dal import get_solution_settings from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant,",
"= SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr) og_info = city.app_info and city.app_info.to_dict() info =",
"# you may not use this file except in compliance",
"format='medium', locale='en_GB') gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-'))",
"ServiceInfo from rogerthat.rpc import users from rogerthat.rpc.users import get_current_session from",
"from rogerthat.utils import parse_date from rogerthat.utils.service import create_service_identity_user from shop.models",
"for merchant in cirklo_merchants: if merchant['email'] in cirklo_emails: logging.error('Duplicate found",
"= get_solution_settings(users.User(merchant.service_user_email)).main_language if data.accepted: email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id:",
"@returns([dict]) @arguments(staging=bool) def api_list_cirklo_cities(staging=False): return list_cirklo_cities(staging) @rest('/common/vouchers/services', 'get', silent_result=True) @returns(CirkloVoucherListTO)",
"id %s is already in use by another service' %",
"def get_cirklo_vouchers_services(): city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) to",
"merchant['email'] in cirklo_emails: logging.error('Duplicate found %s', merchant['email']) continue cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']]",
"sln_settings.put() broadcast_updates_pending(sln_settings) city.put() return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export', 'post') @returns(dict) @arguments() def",
"if not cirklo_city: raise HttpNotFoundException('No cirklo settings found.') is_cirklo_only_merchant =",
"info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only(): city.app_info = info sln_settings.updates_pending",
"merchant.whitelisted: merchant.whitelisted = True should_save = True elif merchant.whitelisted: merchant.whitelisted",
"translate(language, 'address')) sheet.write(0, 3, translate(language, 'Phone number')) sheet.write(0, 4, translate(language,",
"rogerthat.models import ServiceIdentity from rogerthat.models.settings import ServiceInfo from rogerthat.rpc import",
"datetime.now().isoformat() + 'Z' if data.accepted else None if not is_cirklo_only_merchant:",
"users.get_current_user() city = CirkloCity.get_by_service_email(service_user.email()) return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo', 'put') @returns(CirkloCityTO) @arguments(data=CirkloCityTO)",
"with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file: book.save(gcs_file) deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400)",
"raise HttpForbiddenException(translate(lang, 'no_permission')) other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity if",
"CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity if not data.city_id: if other_city: other_city.key.delete()",
"data.signup_name_nl and data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr) elif data.signup_name_nl: city.signup_names",
"= customers_dict[merchant.customer_id] if not customer.service_user: merchant.key.delete() continue cirklo_merchant = cirklo_dict.get(customer.user_email)",
"# # Unless required by applicable law or agreed to",
"xlwt import Worksheet, Workbook, XFStyle from mcfw.cache import invalidate_cache from",
"in cirklo_merchants: if merchant['email'] in cirklo_emails: logging.error('Duplicate found %s', merchant['email'])",
"deferred.defer(re_index_map_only, service_identity_user) to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup') to.populate_from_info(service_info,",
"True merchant.put() elif merchant.whitelisted: merchant.whitelisted = False merchant.put() whitelist_date =",
"book.add_sheet('Cirklo') # type: Worksheet language = city_sln_settings.main_language sheet.write(0, 0, translate(language,",
"else: email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id: raise HttpBadRequestException('City settings",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"fully setup yet.') whitelist_merchant(cirklo_city.city_id, data.email) deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE)",
"rogerthat.bizz.service import re_index_map_only from rogerthat.consts import FAST_QUEUE from rogerthat.models import",
"not merchant.whitelisted: merchant.whitelisted = True merchant.put() elif merchant.whitelisted: merchant.whitelisted =",
"city.signup_names = None city.signup_mail = SignupMails.from_to(data.signup_mail) if data.signup_name_nl and data.signup_name_fr:",
"osa_merchants): customer = customers_dict[merchant.customer_id] if not customer.service_user: merchant.key.delete() continue cirklo_merchant",
"= [Customer.create_key(merchant.customer_id) for merchant in osa_merchants] customers_dict = {customer.id: customer",
"Version 2.0 (the \"License\"); # you may not use this",
"in cirklo_merchant if cirklo_merchant else False to.results.append( CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered,",
"_queue=FAST_QUEUE) else: email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id: raise HttpBadRequestException('City",
"users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type:",
"mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException from mcfw.restapi import rest from",
"osa_merchants] customers_dict = {customer.id: customer for customer in db.get(customer_to_get)} info_keys",
"city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr, fr=data.signup_name_fr) og_info = city.app_info and city.app_info.to_dict() info",
"merchant in osa_merchants] customers_dict = {customer.id: customer for customer in",
"to @rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO) def whitelist_voucher_service(data): city_service_user =",
"= CirkloMerchant.create_key(data.id).get() language = get_solution_settings(users.User(merchant.service_user_email)).main_language if data.accepted: email_id = cirklo_city.get_signup_accepted_mail(language)",
"setup yet.') deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) whitelist_date = datetime.now().isoformat()",
"not data.city_id: if other_city: other_city.key.delete() return CirkloCityTO.from_model(None) key = CirkloCity.create_key(data.city_id)",
"settings aren\\'t fully setup yet.') deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE)",
"False, u'Cirklo signup') @rest('/common/vouchers/cirklo', 'get') @returns(CirkloCityTO) @arguments() def api_vouchers_get_cirklo_settings(): service_user",
"= CirkloCity.create_key(data.city_id) city = key.get() if not city: city =",
"from shop.models import Customer from solutions import translate from solutions.common.bizz",
"else translate(language, 'No')) date = format_datetime(datetime.now(), format='medium', locale='en_GB') gcs_path =",
"%s', merchant['email']) continue cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']] = merchant qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id)",
"u'Cirklo signup') @rest('/common/vouchers/cirklo', 'get') @returns(CirkloCityTO) @arguments() def api_vouchers_get_cirklo_settings(): service_user =",
"CirkloMerchant language = merchant.get_language() else: merchant = CirkloMerchant.create_key(data.id).get() language =",
"implied. # See the License for the specific language governing",
"for customer in db.get(customer_to_get)} info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT) for merchant",
"CirkloCity(key=key, service_user_email=service_user.email()) elif city.service_user_email != service_user.email(): raise HttpBadRequestException('City id %s",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"True merchant.put() service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get() customer = Customer.get_by_id(merchant.customer_id) #",
"'dd/mm/yyyy' row = 0 for service in all_services.results: row +=",
"= True merchant.put() service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get() customer = Customer.get_by_id(merchant.customer_id)",
"rogerthat.rpc import users from rogerthat.rpc.users import get_current_session from rogerthat.utils import",
"customer) return to else: if data.accepted: merchant.whitelisted = True else:",
"data.accepted: email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id: raise HttpBadRequestException('City settings",
"_queue=FAST_QUEUE) whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted else None",
"CirkloVoucherServiceTO, \\ WhitelistVoucherServiceTO from solutions.common.restapi.services import _check_is_city def _check_permission(city_sln_settings): if",
"if not merchant.whitelisted: merchant.whitelisted = True should_save = True elif",
"merchant.whitelisted = False should_save = True if should_save: merchant.put() service_identity_user",
"by applicable law or agreed to in writing, software #",
"= CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant] osa_merchants = [] for merchant",
"service.email) sheet.write(row, 2, service.address) sheet.write(row, 3, service.phone_number) sheet.write(row, 4, parse_date(service.creation_date),",
"2, service.address) sheet.write(row, 3, service.phone_number) sheet.write(row, 4, parse_date(service.creation_date), date_format) sheet.write(row,",
"REST_TYPE_TO from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException from mcfw.restapi import",
"cirklo_merchant else None merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant",
"= 'shopInfo' in cirklo_merchant if cirklo_merchant else False to.results.append( CirkloVoucherServiceTO.from_model(merchant,",
"sheet.write(0, 0, translate(language, 'reservation-name')) sheet.write(0, 1, translate(language, 'Email')) sheet.write(0, 2,",
"= create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else",
"osa_merchants] models = ndb.get_multi(info_keys) for service_info, merchant in zip(models, osa_merchants):",
"0, service.name) sheet.write(row, 1, service.email) sheet.write(row, 2, service.address) sheet.write(row, 3,",
"coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV",
"type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO) def whitelist_voucher_service(data): city_service_user = users.get_current_user() city_sln_settings =",
"return list_cirklo_cities(staging) @rest('/common/vouchers/services', 'get', silent_result=True) @returns(CirkloVoucherListTO) @arguments() def get_cirklo_vouchers_services(): city_service_user",
"CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant] osa_merchants = [] for merchant in",
"solutions.common.bizz import SolutionModule, broadcast_updates_pending from solutions.common.bizz.campaignmonitor import send_smart_email_without_check from solutions.common.consts",
"for merchant in qry: if merchant.service_user_email: osa_merchants.append(merchant) else: cirklo_merchant =",
"info = CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title, buttons=data.app_info.buttons) sln_settings = get_solution_settings(service_user) if info.to_dict()",
"NV # # Licensed under the Apache License, Version 2.0",
"rogerthat.bizz.gcs import get_serving_url from rogerthat.bizz.service import re_index_map_only from rogerthat.consts import",
"get_solution_settings(service_user) if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only(): city.app_info =",
"raise HttpBadRequestException('City id %s is already in use by another",
"cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']] = merchant qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant]",
"merchant.whitelisted = True merchant.put() elif merchant.whitelisted: merchant.whitelisted = False merchant.put()",
"if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules: raise HttpForbiddenException() if len(city_sln_settings.modules) !=",
"should_save: merchant.put() service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) whitelist_date = cirklo_merchant['createdAt']",
"HttpForbiddenException, HttpNotFoundException from mcfw.restapi import rest from mcfw.rpc import returns,",
"ndb, deferred, db from typing import List from xlwt import",
"from rogerthat.consts import FAST_QUEUE from rogerthat.models import ServiceIdentity from rogerthat.models.settings",
"merchant_registered, u'Cirklo signup')) if osa_merchants: customer_to_get = [Customer.create_key(merchant.customer_id) for merchant",
"= False cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) if not cirklo_city: return to",
"in cirklo_emails: cirklo_emails.remove(customer.user_email) if not merchant.whitelisted: merchant.whitelisted = True should_save",
"city.signup_enabled = data.signup_enabled city.signup_logo_url = data.signup_logo_url city.signup_names = None city.signup_mail",
"in cirklo_emails: cirklo_emails.remove(merchant.data['company']['email']) if not merchant.whitelisted: merchant.whitelisted = True merchant.put()",
"returns, arguments from rogerthat.bizz.gcs import get_serving_url from rogerthat.bizz.service import re_index_map_only",
"db from typing import List from xlwt import Worksheet, Workbook,",
"other_city.key.delete() invalidate_cache(get_city_id_by_service_email, service_user.email()) city.logo_url = data.logo_url city.signup_enabled = data.signup_enabled city.signup_logo_url",
"get_solution_settings(service_user) _check_permission(city_sln_settings) all_services = get_cirklo_vouchers_services() if all_services.cursor: raise NotImplementedError() book",
"cloudstorage import logging from babel.dates import format_datetime from datetime import",
"whitelist_date, False, u'Cirklo signup') @rest('/common/vouchers/cirklo', 'get') @returns(CirkloCityTO) @arguments() def api_vouchers_get_cirklo_settings():",
"False to.results.append( CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup')) if osa_merchants: customer_to_get",
"import Worksheet, Workbook, XFStyle from mcfw.cache import invalidate_cache from mcfw.consts",
"and not sln_settings.ciklo_vouchers_only(): city.app_info = info sln_settings.updates_pending = True sln_settings.put()",
"'@' not in data.id if is_cirklo_only_merchant: merchant = CirkloMerchant.create_key(long(data.id)).get() #",
"# @@license_version:1.7@@ import cloudstorage import logging from babel.dates import format_datetime",
"None if not is_cirklo_only_merchant: if data.accepted: merchant.whitelisted = True else:",
"to.more = False cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) if not cirklo_city: return",
"merchant.whitelisted = False merchant.put() whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else",
"service_user = users.get_current_user() if not get_current_session().shop: lang = get_solution_settings(service_user).main_language raise",
"= users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) #",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"merchant in cirklo_merchants: if merchant['email'] in cirklo_emails: logging.error('Duplicate found %s',",
"Unless required by applicable law or agreed to in writing,",
"type: CirkloCity if not data.city_id: if other_city: other_city.key.delete() return CirkloCityTO.from_model(None)",
"@arguments(data=CirkloCityTO) def api_vouchers_save_cirklo_settings(data): service_user = users.get_current_user() if not get_current_session().shop: lang",
"if customer.user_email in cirklo_emails: cirklo_emails.remove(customer.user_email) if not merchant.whitelisted: merchant.whitelisted =",
"city.app_info and city.app_info.to_dict() info = CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title, buttons=data.app_info.buttons) sln_settings =",
"import re_index_map_only from rogerthat.consts import FAST_QUEUE from rogerthat.models import ServiceIdentity",
"deferred, db from typing import List from xlwt import Worksheet,",
"osa_merchants: customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants] customers_dict =",
"'No')) date = format_datetime(datetime.now(), format='medium', locale='en_GB') gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' %",
"the specific language governing permissions and # limitations under the",
"[data.email], _countdown=1, _queue=FAST_QUEUE) else: email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id:",
"customer.service_user: merchant.key.delete() continue cirklo_merchant = cirklo_dict.get(customer.user_email) should_save = False if",
"for email in cirklo_emails: cirklo_merchant = cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return to",
"key.get() if not city: city = CirkloCity(key=key, service_user_email=service_user.email()) elif city.service_user_email",
"not merchant.whitelisted: merchant.whitelisted = True should_save = True elif merchant.whitelisted:",
"False cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) if not cirklo_city: return to cirklo_merchants",
"applicable law or agreed to in writing, software # distributed",
"city.logo_url = data.logo_url city.signup_enabled = data.signup_enabled city.signup_logo_url = data.signup_logo_url city.signup_names",
"= data.signup_logo_url city.signup_names = None city.signup_mail = SignupMails.from_to(data.signup_mail) if data.signup_name_nl",
"False merchant.put() whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None merchant_registered",
"if other_city and other_city.key != key: other_city.key.delete() invalidate_cache(get_city_id_by_service_email, service_user.email()) city.logo_url",
"_countdown=1, _queue=FAST_QUEUE) whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted else",
"not get_current_session().shop: lang = get_solution_settings(service_user).main_language raise HttpForbiddenException(translate(lang, 'no_permission')) other_city =",
"import SolutionModule, broadcast_updates_pending from solutions.common.bizz.campaignmonitor import send_smart_email_without_check from solutions.common.consts import",
"3, translate(language, 'Phone number')) sheet.write(0, 4, translate(language, 'created')) sheet.write(0, 5,",
"in all_services.results: row += 1 sheet.write(row, 0, service.name) sheet.write(row, 1,",
"[] for merchant in cirklo_merchants: if merchant['email'] in cirklo_emails: logging.error('Duplicate",
"= False if cirklo_merchant: if customer.user_email in cirklo_emails: cirklo_emails.remove(customer.user_email) if",
"sheet.write(0, 1, translate(language, 'Email')) sheet.write(0, 2, translate(language, 'address')) sheet.write(0, 3,",
"raise HttpBadRequestException('City settings aren\\'t fully setup yet.') whitelist_merchant(cirklo_city.city_id, data.email) deferred.defer(send_smart_email_without_check,",
"from xlwt import Worksheet, Workbook, XFStyle from mcfw.cache import invalidate_cache",
"cirklo_dict.get(customer.user_email) should_save = False if cirklo_merchant: if customer.user_email in cirklo_emails:",
"in writing, software # distributed under the License is distributed",
"governing permissions and # limitations under the License. # #",
"if not data.city_id: if other_city: other_city.key.delete() return CirkloCityTO.from_model(None) key =",
"and city.app_info.to_dict() info = CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title, buttons=data.app_info.buttons) sln_settings = get_solution_settings(service_user)",
"cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return to @rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO) def",
"create_service_identity_user from shop.models import Customer from solutions import translate from",
"[ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT) for merchant in osa_merchants] models = ndb.get_multi(info_keys) for",
"Belgium NV # # Licensed under the Apache License, Version",
"date_format) sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else translate(language, 'No'))",
"'shopInfo' in cirklo_merchant if cirklo_merchant else False service_to = CirkloVoucherServiceTO.from_model(merchant,",
"return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup') @rest('/common/vouchers/cirklo', 'get') @returns(CirkloCityTO) @arguments()",
"locale='en_GB') gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-')) content_type",
"CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup') @rest('/common/vouchers/cirklo', 'get') @returns(CirkloCityTO) @arguments() def",
"List from xlwt import Worksheet, Workbook, XFStyle from mcfw.cache import",
"customer in db.get(customer_to_get)} info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT) for merchant in",
"deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) whitelist_date = datetime.now().isoformat() + 'Z'",
"cirklo_merchant: if customer.user_email in cirklo_emails: cirklo_emails.remove(customer.user_email) if not merchant.whitelisted: merchant.whitelisted",
"def api_vouchers_get_cirklo_settings(): service_user = users.get_current_user() city = CirkloCity.get_by_service_email(service_user.email()) return CirkloCityTO.from_model(city)",
"fr=data.signup_name_fr) og_info = city.app_info and city.app_info.to_dict() info = CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title,",
"Copyright 2020 Green Valley Belgium NV # # Licensed under",
"yet.') deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) whitelist_date = datetime.now().isoformat() +",
"3, service.phone_number) sheet.write(row, 4, parse_date(service.creation_date), date_format) sheet.write(row, 5, translate(language, 'Yes')",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"License, Version 2.0 (the \"License\"); # you may not use",
"if not merchant.whitelisted: merchant.whitelisted = True merchant.put() elif merchant.whitelisted: merchant.whitelisted",
"# You may obtain a copy of the License at",
"to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return to @rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO) def whitelist_voucher_service(data):",
"else None if not is_cirklo_only_merchant: if data.accepted: merchant.whitelisted = True",
"service_info, merchant in zip(models, osa_merchants): customer = customers_dict[merchant.customer_id] if not",
"1, translate(language, 'Email')) sheet.write(0, 2, translate(language, 'address')) sheet.write(0, 3, translate(language,",
"@arguments() def api_export_cirklo_services(): service_user = users.get_current_user() city_sln_settings = get_solution_settings(service_user) _check_permission(city_sln_settings)",
"key: other_city.key.delete() invalidate_cache(get_city_id_by_service_email, service_user.email()) city.logo_url = data.logo_url city.signup_enabled = data.signup_enabled",
"whitelist_date, False, u'OSA signup') to.populate_from_info(service_info, customer) return to else: if",
"else: merchant.denied = True merchant.put() service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get() customer",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"type: CirkloCity if not cirklo_city: raise HttpNotFoundException('No cirklo settings found.')",
"city.signup_logo_url = data.signup_logo_url city.signup_names = None city.signup_mail = SignupMails.from_to(data.signup_mail) if",
"rogerthat.consts import FAST_QUEUE from rogerthat.models import ServiceIdentity from rogerthat.models.settings import",
"merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False to.results.append(",
"solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \\ WhitelistVoucherServiceTO from solutions.common.restapi.services import",
"{} cirklo_emails = [] for merchant in cirklo_merchants: if merchant['email']",
"'post') @returns(dict) @arguments() def api_export_cirklo_services(): service_user = users.get_current_user() city_sln_settings =",
"\\ WhitelistVoucherServiceTO from solutions.common.restapi.services import _check_is_city def _check_permission(city_sln_settings): if SolutionModule.CIRKLO_VOUCHERS",
"not in city_sln_settings.modules: raise HttpForbiddenException() if len(city_sln_settings.modules) != 1: _check_is_city(city_sln_settings.service_user)",
"cirklo_dict[merchant['email']] = merchant qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant] osa_merchants",
"if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only(): city.app_info = info",
"content_type = 'application/vnd.ms-excel' with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file: book.save(gcs_file)",
"yet.') whitelist_merchant(cirklo_city.city_id, data.email) deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) else: email_id",
"import ServiceIdentity from rogerthat.models.settings import ServiceInfo from rogerthat.rpc import users",
"'w', content_type=content_type) as gcs_file: book.save(gcs_file) deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400) return {",
"import OCA_FILES_BUCKET from solutions.common.dal import get_solution_settings from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email,",
"= get_solution_settings(city_service_user) _check_permission(city_sln_settings) to = CirkloVoucherListTO() to.total = 0 to.results",
"import translate from solutions.common.bizz import SolutionModule, broadcast_updates_pending from solutions.common.bizz.campaignmonitor import",
"CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity if not cirklo_city: raise HttpNotFoundException('No cirklo",
"= ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get() customer = Customer.get_by_id(merchant.customer_id) # type: Customer if",
"all_services.results: row += 1 sheet.write(row, 0, service.name) sheet.write(row, 1, service.email)",
"customers_dict[merchant.customer_id] if not customer.service_user: merchant.key.delete() continue cirklo_merchant = cirklo_dict.get(customer.user_email) should_save",
"solutions.common.bizz.campaignmonitor import send_smart_email_without_check from solutions.common.consts import OCA_FILES_BUCKET from solutions.common.dal import",
"'get', silent_result=True) @returns(CirkloVoucherListTO) @arguments() def get_cirklo_vouchers_services(): city_service_user = users.get_current_user() city_sln_settings",
"the License for the specific language governing permissions and #",
"else False service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup') service_to.populate_from_info(service_info,",
"city_sln_settings = get_solution_settings(service_user) _check_permission(city_sln_settings) all_services = get_cirklo_vouchers_services() if all_services.cursor: raise",
"osa_merchants = [] for merchant in qry: if merchant.service_user_email: osa_merchants.append(merchant)",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"= cirklo_dict.get(customer.user_email) should_save = False if cirklo_merchant: if customer.user_email in",
"merchant.put() service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) whitelist_date = cirklo_merchant['createdAt'] if",
"if len(city_sln_settings.modules) != 1: _check_is_city(city_sln_settings.service_user) @rest('/common/vouchers/cities', 'get', silent_result=True) @returns([dict]) @arguments(staging=bool)",
"either express or implied. # See the License for the",
"ndb.get_multi(info_keys) for service_info, merchant in zip(models, osa_merchants): customer = customers_dict[merchant.customer_id]",
"CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup') service_to.populate_from_info(service_info, customer) to.results.append(service_to) for email",
"data.logo_url city.signup_enabled = data.signup_enabled city.signup_logo_url = data.signup_logo_url city.signup_names = None",
"all_services = get_cirklo_vouchers_services() if all_services.cursor: raise NotImplementedError() book = Workbook(encoding='utf-8')",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"translate(language, 'Yes') if service.merchant_registered else translate(language, 'No')) date = format_datetime(datetime.now(),",
"= CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant language = merchant.get_language() else: merchant",
"users.get_current_user() if not get_current_session().shop: lang = get_solution_settings(service_user).main_language raise HttpForbiddenException(translate(lang, 'no_permission'))",
"_countdown=1, _queue=FAST_QUEUE) else: email_id = cirklo_city.get_signup_accepted_mail(language) if not email_id: raise",
"rogerthat.rpc.users import get_current_session from rogerthat.utils import parse_date from rogerthat.utils.service import",
"@rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO) def whitelist_voucher_service(data): city_service_user = users.get_current_user()",
"city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) cirklo_city = CirkloCity.get_by_service_email(city_service_user.email())",
"!= key: other_city.key.delete() invalidate_cache(get_city_id_by_service_email, service_user.email()) city.logo_url = data.logo_url city.signup_enabled =",
"datetime import datetime from google.appengine.ext import ndb, deferred, db from",
"list_cirklo_cities(staging) @rest('/common/vouchers/services', 'get', silent_result=True) @returns(CirkloVoucherListTO) @arguments() def get_cirklo_vouchers_services(): city_service_user =",
"if data.signup_name_nl and data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr) elif data.signup_name_nl:",
"# type: Customer if data.accepted: service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user)",
"city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity",
"lang = get_solution_settings(service_user).main_language raise HttpForbiddenException(translate(lang, 'no_permission')) other_city = CirkloCity.get_by_service_email(service_user.email()) #",
"city.app_info.to_dict() info = CirkloAppInfo(enabled=data.app_info.enabled, title=data.app_info.title, buttons=data.app_info.buttons) sln_settings = get_solution_settings(service_user) if",
"NotImplementedError() book = Workbook(encoding='utf-8') sheet = book.add_sheet('Cirklo') # type: Worksheet",
"from solutions.common.restapi.services import _check_is_city def _check_permission(city_sln_settings): if SolutionModule.CIRKLO_VOUCHERS not in",
"from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \\ SignupMails, CirkloAppInfo from",
"customer for customer in db.get(customer_to_get)} info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT) for",
"'created')) sheet.write(0, 5, translate(language, 'merchant_registered')) date_format = XFStyle() date_format.num_format_str =",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"cirklo_emails.remove(customer.user_email) if not merchant.whitelisted: merchant.whitelisted = True should_save = True",
"= 0 to.results = [] to.cursor = None to.more =",
"cirklo_city: return to cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict = {} cirklo_emails",
"whitelist_date, merchant_registered, u'Cirklo signup')) if osa_merchants: customer_to_get = [Customer.create_key(merchant.customer_id) for",
"@arguments() def get_cirklo_vouchers_services(): city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings)",
"in osa_merchants] customers_dict = {customer.id: customer for customer in db.get(customer_to_get)}",
"service_user.email()) city.logo_url = data.logo_url city.signup_enabled = data.signup_enabled city.signup_logo_url = data.signup_logo_url",
"sheet = book.add_sheet('Cirklo') # type: Worksheet language = city_sln_settings.main_language sheet.write(0,",
"None merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False",
"data.city_id: if other_city: other_city.key.delete() return CirkloCityTO.from_model(None) key = CirkloCity.create_key(data.city_id) city",
"other_city.key.delete() return CirkloCityTO.from_model(None) key = CirkloCity.create_key(data.city_id) city = key.get() if",
"% data.city_id) if other_city and other_city.key != key: other_city.key.delete() invalidate_cache(get_city_id_by_service_email,",
"_check_permission(city_sln_settings) cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity if not cirklo_city:",
"cirklo_merchant else False service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup')",
"HttpBadRequestException('City settings aren\\'t fully setup yet.') deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,",
"typing import List from xlwt import Worksheet, Workbook, XFStyle from",
"from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException from mcfw.restapi import rest",
"solutions.common.consts import OCA_FILES_BUCKET from solutions.common.dal import get_solution_settings from solutions.common.integrations.cirklo.cirklo import",
"True should_save = True elif merchant.whitelisted: merchant.whitelisted = False should_save",
"if not city: city = CirkloCity(key=key, service_user_email=service_user.email()) elif city.service_user_email !=",
"whitelist_merchant(cirklo_city.city_id, data.email) deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) else: email_id =",
"{customer.id: customer for customer in db.get(customer_to_get)} info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT)",
"osa_merchants.append(merchant) else: cirklo_merchant = cirklo_dict.get(merchant.data['company']['email']) if cirklo_merchant: if merchant.data['company']['email'] in",
"= False merchant.put() whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None",
"not cirklo_city: raise HttpNotFoundException('No cirklo settings found.') is_cirklo_only_merchant = '@'",
"fully setup yet.') deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) whitelist_date =",
"= cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return to @rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO) @returns(CirkloVoucherServiceTO) @arguments(data=WhitelistVoucherServiceTO)",
"merchant.whitelisted = True else: merchant.denied = True merchant.put() service_info =",
"\"License\"); # you may not use this file except in",
"list_whitelisted_merchants(cirklo_city.city_id) cirklo_dict = {} cirklo_emails = [] for merchant in",
"cirklo_emails: cirklo_emails.remove(customer.user_email) if not merchant.whitelisted: merchant.whitelisted = True should_save =",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"signup') to.populate_from_info(service_info, customer) return to else: if data.accepted: merchant.whitelisted =",
"u'OSA signup') service_to.populate_from_info(service_info, customer) to.results.append(service_to) for email in cirklo_emails: cirklo_merchant",
"SignupMails.from_to(data.signup_mail) if data.signup_name_nl and data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr) elif",
"city = CirkloCity.get_by_service_email(service_user.email()) return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo', 'put') @returns(CirkloCityTO) @arguments(data=CirkloCityTO) def",
"not in data.id if is_cirklo_only_merchant: merchant = CirkloMerchant.create_key(long(data.id)).get() # type:",
"CirkloAppInfo from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \\ WhitelistVoucherServiceTO from",
"get_solution_settings from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \\ list_whitelisted_merchants, list_cirklo_cities from",
"invalidate_cache from mcfw.consts import REST_TYPE_TO from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException,",
"cirklo_emails: cirklo_merchant = cirklo_dict[email] to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant)) return to @rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO)",
"# distributed under the License is distributed on an \"AS",
"sheet.write(0, 2, translate(language, 'address')) sheet.write(0, 3, translate(language, 'Phone number')) sheet.write(0,",
"og_info and not sln_settings.ciklo_vouchers_only(): city.app_info = info sln_settings.updates_pending = True",
"# Unless required by applicable law or agreed to in",
"if cirklo_merchant: if merchant.data['company']['email'] in cirklo_emails: cirklo_emails.remove(merchant.data['company']['email']) if not merchant.whitelisted:",
"= True if should_save: merchant.put() service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user)",
"not email_id: raise HttpBadRequestException('City settings aren\\'t fully setup yet.') deferred.defer(send_smart_email_without_check,",
"if not is_cirklo_only_merchant: if data.accepted: merchant.whitelisted = True else: merchant.denied",
"import ndb, deferred, db from typing import List from xlwt",
"city_service_user = users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) to = CirkloVoucherListTO()",
"users.get_current_user() city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) to = CirkloVoucherListTO() to.total =",
"continue cirklo_emails.append(merchant['email']) cirklo_dict[merchant['email']] = merchant qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type:",
"= True elif merchant.whitelisted: merchant.whitelisted = False should_save = True",
"aren\\'t fully setup yet.') deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) whitelist_date",
"HttpNotFoundException('No cirklo settings found.') is_cirklo_only_merchant = '@' not in data.id",
"= '@' not in data.id if is_cirklo_only_merchant: merchant = CirkloMerchant.create_key(long(data.id)).get()",
"@arguments(staging=bool) def api_list_cirklo_cities(staging=False): return list_cirklo_cities(staging) @rest('/common/vouchers/services', 'get', silent_result=True) @returns(CirkloVoucherListTO) @arguments()",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"should_save = False if cirklo_merchant: if customer.user_email in cirklo_emails: cirklo_emails.remove(customer.user_email)",
"data.signup_name_fr: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl, fr=data.signup_name_fr) elif data.signup_name_nl: city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,",
"deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1, _queue=FAST_QUEUE) else: email_id = cirklo_city.get_signup_accepted_mail(language) if",
"import users from rogerthat.rpc.users import get_current_session from rogerthat.utils import parse_date",
"deferred.defer(re_index_map_only, service_identity_user) whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None merchant_registered",
"'no_permission')) other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity if not data.city_id:",
"import List from xlwt import Worksheet, Workbook, XFStyle from mcfw.cache",
"city_sln_settings = get_solution_settings(city_service_user) _check_permission(city_sln_settings) to = CirkloVoucherListTO() to.total = 0",
"4, translate(language, 'created')) sheet.write(0, 5, translate(language, 'merchant_registered')) date_format = XFStyle()",
"You may obtain a copy of the License at #",
"should_save = True if should_save: merchant.put() service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only,",
"Customer.get_by_id(merchant.customer_id) # type: Customer if data.accepted: service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only,",
"= 'application/vnd.ms-excel' with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file: book.save(gcs_file) deferred.defer(cloudstorage.delete,",
"\\ list_whitelisted_merchants, list_cirklo_cities from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \\",
"not city: city = CirkloCity(key=key, service_user_email=service_user.email()) elif city.service_user_email != service_user.email():",
"info sln_settings.updates_pending = True sln_settings.put() broadcast_updates_pending(sln_settings) city.put() return CirkloCityTO.from_model(city) @rest('/common/vouchers/cirklo/export',",
"merchant in osa_merchants] models = ndb.get_multi(info_keys) for service_info, merchant in",
"service_identity_user) to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup') to.populate_from_info(service_info, customer)",
"data.signup_enabled city.signup_logo_url = data.signup_logo_url city.signup_names = None city.signup_mail = SignupMails.from_to(data.signup_mail)",
"translate from solutions.common.bizz import SolutionModule, broadcast_updates_pending from solutions.common.bizz.campaignmonitor import send_smart_email_without_check",
"data.accepted: merchant.whitelisted = True else: merchant.denied = True merchant.put() return",
"customer.user_email in cirklo_emails: cirklo_emails.remove(customer.user_email) if not merchant.whitelisted: merchant.whitelisted = True",
"from solutions.common.consts import OCA_FILES_BUCKET from solutions.common.dal import get_solution_settings from solutions.common.integrations.cirklo.cirklo",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"cirklo_dict.get(merchant.data['company']['email']) if cirklo_merchant: if merchant.data['company']['email'] in cirklo_emails: cirklo_emails.remove(merchant.data['company']['email']) if not",
"cirklo settings found.') is_cirklo_only_merchant = '@' not in data.id if",
"True else: merchant.denied = True merchant.put() return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False,",
"merchant.put() whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None merchant_registered =",
"cirklo_merchant = cirklo_dict.get(merchant.data['company']['email']) if cirklo_merchant: if merchant.data['company']['email'] in cirklo_emails: cirklo_emails.remove(merchant.data['company']['email'])",
"from solutions import translate from solutions.common.bizz import SolutionModule, broadcast_updates_pending from",
"import REST_TYPE_TO from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException from mcfw.restapi",
"= CirkloCity.get_by_service_email(city_service_user.email()) if not cirklo_city: return to cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id)",
"if should_save: merchant.put() service_identity_user = create_service_identity_user(customer.service_user) deferred.defer(re_index_map_only, service_identity_user) whitelist_date ="
] |
[
"f.grid.set_xspacing('auto') f.grid.set_yspacing(2.) f.grid.set_yspacing('tick') with pytest.raises(ValueError): f.grid.set_yspacing('auto') f.close() def test_grid_color(): data",
"def test_grid_addremove(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid()",
"f.grid.set_yspacing('auto') f.close() def test_grid_color(): data = np.zeros((16, 16)) f =",
"0.3)) f.close() def test_grid_alpha(): data = np.zeros((16, 16)) f =",
"f.grid.set_color('black') f.grid.set_color('#003344') f.grid.set_color((1.0, 0.4, 0.3)) f.close() def test_grid_alpha(): data =",
"f.close() def test_grid_spacing(): data = np.zeros((16, 16)) f = FITSFigure(data)",
"f = FITSFigure(data) f.add_grid() f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted') f.close() def test_grid_linewidth():",
"matplotlib.use('Agg') import numpy as np from astropy.tests.helper import pytest from",
"= FITSFigure(data) f.add_grid() f.grid.hide() f.grid.show() f.close() def test_grid_spacing(): data =",
"import numpy as np from astropy.tests.helper import pytest from ..",
"f.grid.set_color('#003344') f.grid.set_color((1.0, 0.4, 0.3)) f.close() def test_grid_alpha(): data = np.zeros((16,",
"astropy.tests.helper import pytest from .. import FITSFigure def test_grid_addremove(): data",
"f.add_grid() f.grid.set_alpha(0.0) f.grid.set_alpha(0.3) f.grid.set_alpha(1.0) f.close() def test_grid_linestyle(): data = np.zeros((16,",
"matplotlib matplotlib.use('Agg') import numpy as np from astropy.tests.helper import pytest",
"import pytest from .. import FITSFigure def test_grid_addremove(): data =",
"np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_color('black') f.grid.set_color('#003344') f.grid.set_color((1.0, 0.4,",
"= FITSFigure(data) f.add_grid() f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted') f.close() def test_grid_linewidth(): data",
"np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.remove_grid() f.add_grid() f.close() def",
"f.remove_grid() f.add_grid() f.close() def test_grid_showhide(): data = np.zeros((16, 16)) f",
"FITSFigure(data) f.add_grid() f.grid.set_alpha(0.0) f.grid.set_alpha(0.3) f.grid.set_alpha(1.0) f.close() def test_grid_linestyle(): data =",
"f.grid.hide() f.grid.show() f.close() def test_grid_spacing(): data = np.zeros((16, 16)) f",
"= np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.hide() f.grid.show() f.close()",
"f.close() def test_grid_showhide(): data = np.zeros((16, 16)) f = FITSFigure(data)",
"= np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_xspacing(1.) f.grid.set_xspacing('tick') with",
"data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_color('black') f.grid.set_color('#003344')",
"np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted') f.close()",
"f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted') f.close() def test_grid_linewidth(): data = np.zeros((16, 16)) f",
"test_grid_showhide(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.hide()",
"f = FITSFigure(data) f.add_grid() f.grid.set_color('black') f.grid.set_color('#003344') f.grid.set_color((1.0, 0.4, 0.3)) f.close()",
"f.grid.set_alpha(0.0) f.grid.set_alpha(0.3) f.grid.set_alpha(1.0) f.close() def test_grid_linestyle(): data = np.zeros((16, 16))",
"f = FITSFigure(data) f.add_grid() f.remove_grid() f.add_grid() f.close() def test_grid_showhide(): data",
"FITSFigure def test_grid_addremove(): data = np.zeros((16, 16)) f = FITSFigure(data)",
"test_grid_color(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_color('black')",
"import matplotlib matplotlib.use('Agg') import numpy as np from astropy.tests.helper import",
"data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.hide() f.grid.show()",
"f.close() def test_grid_color(): data = np.zeros((16, 16)) f = FITSFigure(data)",
"np from astropy.tests.helper import pytest from .. import FITSFigure def",
"= FITSFigure(data) f.add_grid() f.remove_grid() f.add_grid() f.close() def test_grid_showhide(): data =",
"pytest.raises(ValueError): f.grid.set_xspacing('auto') f.grid.set_yspacing(2.) f.grid.set_yspacing('tick') with pytest.raises(ValueError): f.grid.set_yspacing('auto') f.close() def test_grid_color():",
"def test_grid_spacing(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid()",
"data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_xspacing(1.) f.grid.set_xspacing('tick')",
"16)) f = FITSFigure(data) f.add_grid() f.grid.hide() f.grid.show() f.close() def test_grid_spacing():",
"16)) f = FITSFigure(data) f.add_grid() f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted') f.close() def",
"FITSFigure(data) f.add_grid() f.grid.set_xspacing(1.) f.grid.set_xspacing('tick') with pytest.raises(ValueError): f.grid.set_xspacing('auto') f.grid.set_yspacing(2.) f.grid.set_yspacing('tick') with",
"= np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_color('black') f.grid.set_color('#003344') f.grid.set_color((1.0,",
"= np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_alpha(0.0) f.grid.set_alpha(0.3) f.grid.set_alpha(1.0)",
"f.grid.set_yspacing('tick') with pytest.raises(ValueError): f.grid.set_yspacing('auto') f.close() def test_grid_color(): data = np.zeros((16,",
"= np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.remove_grid() f.add_grid() f.close()",
"pytest from .. import FITSFigure def test_grid_addremove(): data = np.zeros((16,",
"f.grid.set_xspacing(1.) f.grid.set_xspacing('tick') with pytest.raises(ValueError): f.grid.set_xspacing('auto') f.grid.set_yspacing(2.) f.grid.set_yspacing('tick') with pytest.raises(ValueError): f.grid.set_yspacing('auto')",
"test_grid_linestyle(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_linestyle('solid')",
"def test_grid_alpha(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid()",
"from astropy.tests.helper import pytest from .. import FITSFigure def test_grid_addremove():",
"as np from astropy.tests.helper import pytest from .. import FITSFigure",
"test_grid_spacing(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_xspacing(1.)",
"f.add_grid() f.close() def test_grid_showhide(): data = np.zeros((16, 16)) f =",
"f.add_grid() f.grid.hide() f.grid.show() f.close() def test_grid_spacing(): data = np.zeros((16, 16))",
".. import FITSFigure def test_grid_addremove(): data = np.zeros((16, 16)) f",
"def test_grid_showhide(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid()",
"f.grid.set_alpha(0.3) f.grid.set_alpha(1.0) f.close() def test_grid_linestyle(): data = np.zeros((16, 16)) f",
"f = FITSFigure(data) f.add_grid() f.grid.set_xspacing(1.) f.grid.set_xspacing('tick') with pytest.raises(ValueError): f.grid.set_xspacing('auto') f.grid.set_yspacing(2.)",
"FITSFigure(data) f.add_grid() f.grid.set_color('black') f.grid.set_color('#003344') f.grid.set_color((1.0, 0.4, 0.3)) f.close() def test_grid_alpha():",
"16)) f = FITSFigure(data) f.add_grid() f.remove_grid() f.add_grid() f.close() def test_grid_showhide():",
"f.close() def test_grid_linewidth(): data = np.zeros((16, 16)) f = FITSFigure(data)",
"data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_linewidth(0) f.grid.set_linewidth(2)",
"data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.remove_grid() f.add_grid()",
"= FITSFigure(data) f.add_grid() f.grid.set_xspacing(1.) f.grid.set_xspacing('tick') with pytest.raises(ValueError): f.grid.set_xspacing('auto') f.grid.set_yspacing(2.) f.grid.set_yspacing('tick')",
"f.add_grid() f.grid.set_color('black') f.grid.set_color('#003344') f.grid.set_color((1.0, 0.4, 0.3)) f.close() def test_grid_alpha(): data",
"f.add_grid() f.remove_grid() f.add_grid() f.close() def test_grid_showhide(): data = np.zeros((16, 16))",
"f.add_grid() f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted') f.close() def test_grid_linewidth(): data = np.zeros((16,",
"def test_grid_linestyle(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid()",
"with pytest.raises(ValueError): f.grid.set_yspacing('auto') f.close() def test_grid_color(): data = np.zeros((16, 16))",
"0.4, 0.3)) f.close() def test_grid_alpha(): data = np.zeros((16, 16)) f",
"= FITSFigure(data) f.add_grid() f.grid.set_color('black') f.grid.set_color('#003344') f.grid.set_color((1.0, 0.4, 0.3)) f.close() def",
"def test_grid_color(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid()",
"= np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted')",
"f = FITSFigure(data) f.add_grid() f.grid.set_alpha(0.0) f.grid.set_alpha(0.3) f.grid.set_alpha(1.0) f.close() def test_grid_linestyle():",
"= FITSFigure(data) f.add_grid() f.grid.set_alpha(0.0) f.grid.set_alpha(0.3) f.grid.set_alpha(1.0) f.close() def test_grid_linestyle(): data",
"data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed')",
"16)) f = FITSFigure(data) f.add_grid() f.grid.set_xspacing(1.) f.grid.set_xspacing('tick') with pytest.raises(ValueError): f.grid.set_xspacing('auto')",
"data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_alpha(0.0) f.grid.set_alpha(0.3)",
"def test_grid_linewidth(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid()",
"test_grid_linewidth(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_linewidth(0)",
"FITSFigure(data) f.add_grid() f.grid.hide() f.grid.show() f.close() def test_grid_spacing(): data = np.zeros((16,",
"f.grid.set_color((1.0, 0.4, 0.3)) f.close() def test_grid_alpha(): data = np.zeros((16, 16))",
"np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_xspacing(1.) f.grid.set_xspacing('tick') with pytest.raises(ValueError):",
"np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_alpha(0.0) f.grid.set_alpha(0.3) f.grid.set_alpha(1.0) f.close()",
"import FITSFigure def test_grid_addremove(): data = np.zeros((16, 16)) f =",
"f.grid.set_alpha(1.0) f.close() def test_grid_linestyle(): data = np.zeros((16, 16)) f =",
"test_grid_addremove(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.remove_grid()",
"f.close() def test_grid_alpha(): data = np.zeros((16, 16)) f = FITSFigure(data)",
"numpy as np from astropy.tests.helper import pytest from .. import",
"f.grid.set_yspacing(2.) f.grid.set_yspacing('tick') with pytest.raises(ValueError): f.grid.set_yspacing('auto') f.close() def test_grid_color(): data =",
"from .. import FITSFigure def test_grid_addremove(): data = np.zeros((16, 16))",
"f.grid.set_xspacing('tick') with pytest.raises(ValueError): f.grid.set_xspacing('auto') f.grid.set_yspacing(2.) f.grid.set_yspacing('tick') with pytest.raises(ValueError): f.grid.set_yspacing('auto') f.close()",
"16)) f = FITSFigure(data) f.add_grid() f.grid.set_color('black') f.grid.set_color('#003344') f.grid.set_color((1.0, 0.4, 0.3))",
"16)) f = FITSFigure(data) f.add_grid() f.grid.set_alpha(0.0) f.grid.set_alpha(0.3) f.grid.set_alpha(1.0) f.close() def",
"f.grid.set_linestyle('dotted') f.close() def test_grid_linewidth(): data = np.zeros((16, 16)) f =",
"FITSFigure(data) f.add_grid() f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted') f.close() def test_grid_linewidth(): data =",
"f.grid.set_linestyle('solid') f.grid.set_linestyle('dashed') f.grid.set_linestyle('dotted') f.close() def test_grid_linewidth(): data = np.zeros((16, 16))",
"FITSFigure(data) f.add_grid() f.remove_grid() f.add_grid() f.close() def test_grid_showhide(): data = np.zeros((16,",
"f.grid.show() f.close() def test_grid_spacing(): data = np.zeros((16, 16)) f =",
"f.close() def test_grid_linestyle(): data = np.zeros((16, 16)) f = FITSFigure(data)",
"pytest.raises(ValueError): f.grid.set_yspacing('auto') f.close() def test_grid_color(): data = np.zeros((16, 16)) f",
"= np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_linewidth(0) f.grid.set_linewidth(2) f.grid.set_linewidth(5)",
"with pytest.raises(ValueError): f.grid.set_xspacing('auto') f.grid.set_yspacing(2.) f.grid.set_yspacing('tick') with pytest.raises(ValueError): f.grid.set_yspacing('auto') f.close() def",
"f.add_grid() f.grid.set_xspacing(1.) f.grid.set_xspacing('tick') with pytest.raises(ValueError): f.grid.set_xspacing('auto') f.grid.set_yspacing(2.) f.grid.set_yspacing('tick') with pytest.raises(ValueError):",
"np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_linewidth(0) f.grid.set_linewidth(2) f.grid.set_linewidth(5) f.close()",
"np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.hide() f.grid.show() f.close() def",
"f = FITSFigure(data) f.add_grid() f.grid.hide() f.grid.show() f.close() def test_grid_spacing(): data",
"test_grid_alpha(): data = np.zeros((16, 16)) f = FITSFigure(data) f.add_grid() f.grid.set_alpha(0.0)"
] |
[
"zip') ) def sync(self): self.readme() self.d.sync() def readme(self): self.readme =",
"self.mk.package self.mk.package \\ // f'SYSLINUX_VER = 6.0.3' # self.mk.src =",
"http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') # self.mk.install = Sec('install', pfx=''); self.mk // self.mk.install self.mk.install",
"// self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}' self.readme // self.ABOUT",
"{self}\\n' for i in self: ret += i.gen(to, depth +",
"Sec('cfg', pfx=''); self.mk // self.mk.cfg self.mk.cfg \\ // f'PEPS =",
"f'{\"TMP\":<11} = $(CWD)/tmp' # self.mk.tool = Sec('tool', pfx=''); self.mk //",
"= $(shell which python3)' \\ // f'PYT = $(shell which",
"ret += f'{to.tab*depth}{self.value}\\n' for i in self: ret += i.gen(to,",
"ext) self.top = Sec(); self.bot = Sec() self.tab = tab;",
"'.PHONY: install update' self.mk.install \\ // (S('install: $(OS)_install doc') //",
"\\ // (S('Linux_install Linux_update:', pfx='.PHONY: Linux_install Linux_update') // 'sudo apt",
"`cat apt.txt`') # self.mk.merge = Sec('merge', pfx=''); self.mk // self.mk.merge",
".gitignore README.md apt.txt $(S)' \\ // 'MERGE += .vscode bin",
"// self.watcher // self.assoc) # self.editor = (Sec('editor', pfx='') //",
"doc lib src tmp' self.mk.merge \\ // (S('dev:', pfx='\\n.PHONY: dev')",
"f'{\"DOC\":<11} = $(CWD)/doc' \\ // f'{\"LIB\":<11} = $(CWD)/lib' \\ //",
"self.settings # def multi(key, cmd): return (S('{', '},') // f'\"command\":",
"'<NAME>' self.EMAIL = '<EMAIL>' self.GITHUB = 'https://github.com/ponyatov' self.YEAR = 2020",
"$(Y)' # self.mk.cfg = Sec('cfg', pfx=''); self.mk // self.mk.cfg self.mk.cfg",
"def __floordiv__(self, that): self.nest.append(self.box(that)); return self class Primitive(Object): pass class",
"# self.bin = Dir('bin'); self.d // self.bin def mk(self): self.mk",
"super().__init__(C.__name__) self.clazz = C; self.sup = sup def gen(self, to,",
"']') // task('project', 'install') // task('project', 'update') // task('git', 'dev')",
"= os.getcwd().split('/')[-1] super().__init__(V) # self.TITLE = title if title else",
"= '' if self.pfx is not None: ret += f'{to.tab*depth}{self.pfx}\\n'",
"title else f'{self}' self.ABOUT = about self.AUTHOR = '<NAME>' self.EMAIL",
"= Sec('cfg', pfx=''); self.mk // self.mk.cfg self.mk.cfg \\ // f'PEPS",
"self.mk.cfg \\ // f'PEPS = E26,E302,E305,E401,E402,E701,E702' # self.mk.all = Sec('all',",
"// '$(MAKE) test' ) self.mk.install \\ // (S('Linux_install Linux_update:', pfx='.PHONY:",
"dump / string def test(self): return self.dump(test=True) def __repr__(self): return",
"Sec('dir', pfx=''); self.mk // self.mk.dir self.mk.dir \\ // f'{\"CWD\":<11} =",
"# self.mk.tool = Sec('tool', pfx=''); self.mk // self.mk.tool self.mk.tool \\",
"sup def gen(self, to, depth=0): ret = S(f'class {self}:', pfx='')",
"test) # subtree return ret def head(self, prefix='', test=False): gid",
"IO(Object): def __init__(self, V): super().__init__(V) self.path = V class Dir(IO):",
"sys' for i in [Object, S, Sec, IO, Dir, File,",
"= $(CURDIR)' \\ // f'{\"BIN\":<11} = $(CWD)/bin' \\ // f'{\"DOC\":<11}",
"f'PEPS = E26,E302,E305,E401,E402,E701,E702' # self.mk.all = Sec('all', pfx=''); self.mk //",
"f'{\"SRC\":<11} = $(CWD)/src' \\ // f'{\"TMP\":<11} = $(CWD)/tmp' # self.mk.tool",
"f'PYT = $(shell which pytest)' \\ // f'PEP = $(shell",
"\\ // 'import pytest' \\ // f'from {self} import *'",
"// (S('shadow:', pfx='\\n.PHONY: shadow') // 'git push -v' // 'git",
"if self.value is not None: ret += f'{to.tab*depth}{self.value}\\n' for i",
"IO, Dir, File, Meta, Class, Project]: self.py // Class(i) self.py",
"self.pfx else '\\n' if self.nest and self.value is not None:",
"V, ext='', tab=' ' * 4, comment='#'): super().__init__(V + ext)",
"// 'python3 python3-venv' \\ // 'build-essential g++' def vscode(self): self.vscode",
"None: ret += f'{to.tab*depth}{self.sfx}\\n' if self.pfx else '\\n' return ret",
"\\ // f'PYT = $(shell which pytest)' \\ // f'PEP",
"f'\"label\": \"{clazz}: {cmd}\",' // f'\"type\": \"shell\",' // f'\"command\": \"make {cmd}\",'",
"{cmd}\",' // f'\"type\": \"shell\",' // f'\"command\": \"make {cmd}\",' // f'\"problemMatcher\":",
"def py(self): self.py = pyFile(f'{self}'); self.d // self.py self.py \\",
"Sec() self.tab = tab; self.comment = comment def sync(self): with",
"meta') // multi('f12', 'make all') )) # self.files = (Sec()",
"V='.gitignore'): super().__init__(V) self.bot // f'!{self}' class Makefile(File): def __init__(self, V='Makefile'):",
"\\ // f'CURL = curl -L -o' \\ // f'PY",
"// self.apt self.apt \\ // 'git make curl' // 'code",
"# self.settings \\ // (S('{', '}') // self.multi // self.files",
"jsonFile('settings'); self.vscode // self.settings # def multi(key, cmd): return (S('{',",
"if title else f'{self}' self.ABOUT = about self.AUTHOR = '<NAME>'",
"Sec, IO, Dir, File, Meta, Class, Project]: self.py // Class(i)",
"\\ // (S('doc/pyMorphic.pdf:') // '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') # self.mk.install =",
"\\ // f'P += config.py' \\ // f'S += $(Y)'",
") self.mk.merge \\ // (S('shadow:', pfx='\\n.PHONY: shadow') // 'git push",
"lib src tmp' self.mk.merge \\ // (S('dev:', pfx='\\n.PHONY: dev') //",
"else '\\n' return ret class IO(Object): def __init__(self, V): super().__init__(V)",
"// self.py self.py \\ // 'import os, sys' for i",
"S('Project().sync()', pfx='') def test(self): self.test = pyFile(f'test_{self}'); self.d // self.test",
"'build-essential g++' def vscode(self): self.vscode = Dir('.vscode'); self.d // self.vscode",
"'\"workbench.action.files.saveAll\",' // (S('{\"command\": \"workbench.action.terminal.sendSequence\",') // f'\"args\": {{\"text\": \"\\\\u000D {cmd} \\\\u000D\"}}}}'",
"self.AUTHOR = '<NAME>' self.EMAIL = '<EMAIL>' self.GITHUB = 'https://github.com/ponyatov' self.YEAR",
"\"workbench.action.terminal.sendSequence\",') // f'\"args\": {{\"text\": \"\\\\u000D {cmd} \\\\u000D\"}}}}' ))) self.multi =",
"settings(self): self.settings = jsonFile('settings'); self.vscode // self.settings # def multi(key,",
"-u `cat apt.txt`') # self.mk.merge = Sec('merge', pfx=''); self.mk //",
"self.val() raise TypeError(['__format__', spec]) def tag(self): return self.__class__.__name__.lower() def val(self):",
"os.getcwd().split('/')[-1] super().__init__(V) # self.TITLE = title if title else f'{self}'",
"that if isinstance(that, str): return S(that) raise TypeError(['box', type(that), that])",
"self.end is not None: ret += f'{to.tab*depth}{self.end}\\n' if self.sfx is",
"= V self.nest = [] def box(self, that): if isinstance(that,",
"{{\"text\": \"\\\\u000D {cmd} \\\\u000D\"}}}}' ))) self.multi = \\ (Sec('multi') //",
"$(S)' \\ // 'MERGE += .vscode bin doc lib src",
".vscode bin doc lib src tmp' self.mk.merge \\ // (S('dev:',",
"that): if isinstance(that, Object): return that if isinstance(that, str): return",
"// f'#  `{self}`' // f'## {self.TITLE}' self.readme \\ //",
"= File('README', '.md'); self.d // self.readme self.readme \\ // f'#",
"push -v' // 'git checkout $(SHADOW)' ) self.mk.merge \\ //",
"'git push -v' // 'git checkout $(SHADOW)' ) self.mk.merge \\",
"self.value = V self.nest = [] def box(self, that): if",
"reserved' self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}' # self.dirs()",
"\\ // 'def test_any(): assert True' def dirs(self): self.d =",
"self.sfx is not None: ret += f'{to.tab*depth}{self.sfx}\\n' if self.pfx else",
"title='ViZual language environment', about=''' * object (hyper)graph interpreter ''' ).sync()",
"Sec(S): def gen(self, to, depth=0): ret = '' if self.pfx",
"\\ // f'{\"SRC\":<11} = $(CWD)/src' \\ // f'{\"TMP\":<11} = $(CWD)/tmp'",
"class Makefile(File): def __init__(self, V='Makefile'): super().__init__(V, tab='\\t') class pyFile(File): def",
"pfx='.PHONY: Linux_install Linux_update') // 'sudo apt update' // 'sudo apt",
"shadow') // 'git push -v' // 'git checkout $(SHADOW)' )",
"task('git', 'shadow') )) def src(self): self.py() self.test() self.config() def config(self):",
"-L -o' \\ // f'PY = $(shell which python3)' \\",
"$(SHADOW)' ) self.mk.merge \\ // (S('release:', pfx='\\n.PHONY: release') ) self.mk.merge",
"$(CWD)/doc' \\ // f'{\"LIB\":<11} = $(CWD)/lib' \\ // f'{\"SRC\":<11} =",
"// (S('\"multiCommand.commands\": [', '],') // multi('f11', 'make meta') // multi('f12',",
"ret += f'{to.tab*depth}{self.sfx}\\n' if self.pfx else '\\n' return ret class",
"pyFile('config'); self.d // self.config self.config \\ // f\"{'SECURE_KEY':<11} = {os.urandom(0x22)}\"",
"def __iter__(self): return iter(self.nest) def __floordiv__(self, that): self.nest.append(self.box(that)); return self",
"self.GITHUB = 'https://github.com/ponyatov' self.YEAR = 2020 self.LICENSE = 'All rights",
"= Sec('merge', pfx=''); self.mk // self.mk.merge self.mk.merge \\ // 'SHADOW",
"import os, sys class Object: ## @name constructor def __init__(self,",
"'All rights reserved' self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}'",
"// self.mk.cfg self.mk.cfg \\ // f'PEPS = E26,E302,E305,E401,E402,E701,E702' # self.mk.all",
"__init__(self, V, ext='', tab=' ' * 4, comment='#'): super().__init__(V +",
"+ '\\t' * depth ret = pad(depth) + self.head(prefix, test)",
"Sec('tool', pfx=''); self.mk // self.mk.tool self.mk.tool \\ // f'CURL =",
"'update') // task('git', 'dev') // task('git', 'shadow') )) def src(self):",
"ret += f'{to.tab*depth}{self.pfx}\\n' if self.value is not None: ret +=",
"Sec('doc', pfx=''); self.mk // self.mk.doc self.mk.doc \\ // S('doc: doc/pyMorphic.pdf',",
"V = os.getcwd().split('/')[-1] super().__init__(V) # self.TITLE = title if title",
"self.mk.install \\ // (S('update: $(OS)_update doc') // '$(MAKE) test' )",
"self.mk.merge \\ // (S('zip:', pfx='\\n.PHONY: zip') ) def sync(self): self.readme()",
"__floordiv__(self, that): assert isinstance(that, IO) that.path = f'{self.path}/{that.path}' return super().__floordiv__(that)",
"task('project', 'update') // task('git', 'dev') // task('git', 'shadow') )) def",
"self.tasks = jsonFile('tasks'); self.vscode // self.tasks def task(clazz, cmd): return",
"not spec: return self.val() raise TypeError(['__format__', spec]) def tag(self): return",
"Meta(Object): pass class Class(Meta): def __init__(self, C, sup=[]): assert callable(C)",
"curl' // 'code meld' \\ // 'python3 python3-venv' \\ //",
"// '*.swp' // '*.log'; self.giti.top.sfx = '' self.giti // f'/{self}/'",
"Makefile(File): def __init__(self, V='Makefile'): super().__init__(V, tab='\\t') class pyFile(File): def __init__(self,",
"(Sec('multi') // (S('\"multiCommand.commands\": [', '],') // multi('f11', 'make meta') //",
"'.txt'); self.d // self.apt self.apt \\ // 'git make curl'",
"self.config \\ // f\"{'SECURE_KEY':<11} = {os.urandom(0x22)}\" \\ // f\"{'HOST':<11} =",
"is not None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.pfx else '\\n'",
"ext='.py'): super().__init__(V, ext) class jsonFile(File): def __init__(self, V, ext='.json', comment='//'):",
"spec: return self.val() raise TypeError(['__format__', spec]) def tag(self): return self.__class__.__name__.lower()",
"def __init__(self, V=None, title='', about=''): if not V: V =",
"/ {self}\\n' if self.sfx is not None: ret += f'{to.tab*depth}{self.sfx}\\n'",
"jsonFile('tasks'); self.vscode // self.tasks def task(clazz, cmd): return (S('{', '},')",
"self.mk.src self.mk.src \\ // f'Y += $(MODULE).py test_$(MODULE).py' \\ //",
"ret += f'{to.tab*depth}{self.sfx}\\n' return ret class Sec(S): def gen(self, to,",
"[] def box(self, that): if isinstance(that, Object): return that if",
"V self.nest = [] def box(self, that): if isinstance(that, Object):",
"pfx=''); self.mk // self.mk.src self.mk.src \\ // f'Y += $(MODULE).py",
"'$(PEP) --ignore=$(PEPS) --in-place $?') self.mk.all \\ // (S('test: $(Y)', pfx='\\n.PHONY:",
"// f'## {self.TITLE}' self.readme \\ // '' // self.COPYRIGHT //",
"depth ret = pad(depth) + self.head(prefix, test) # subtree return",
"multi('f12', 'make all') )) # self.files = (Sec() // f'\"{self}/**\":true,'",
"os, sys class Object: ## @name constructor def __init__(self, V):",
"README.md apt.txt $(S)' \\ // 'MERGE += .vscode bin doc",
"self.watcher // self.assoc) # self.editor = (Sec('editor', pfx='') // '\"editor.tabSize\":",
"f'S += $(Y)' # self.mk.cfg = Sec('cfg', pfx=''); self.mk //",
"'sudo apt update' // 'sudo apt install -u `cat apt.txt`')",
"dump(self, cycle=[], depth=0, prefix='', test=False): # head def pad(depth): return",
"def __init__(self, V, ext='', tab=' ' * 4, comment='#'): super().__init__(V",
"ret = S(f'class {self}:', pfx='') // 'pass' return ret.gen(to, depth)",
"self.py = pyFile(f'{self}'); self.d // self.py self.py \\ // 'import",
"else f' @{id(self):x}' return f'{prefix}<{self.tag()}:{self.val()}>{gid}' def __format__(self, spec=''): if not",
"not None: ret += f'{to.tab*depth}{self.value}\\n' for i in self: ret",
"apt(self): self.apt = File('apt', '.txt'); self.d // self.apt self.apt \\",
"f'{\"BIN\":<11} = $(CWD)/bin' \\ // f'{\"DOC\":<11} = $(CWD)/doc' \\ //",
"apt.txt`') # self.mk.merge = Sec('merge', pfx=''); self.mk // self.mk.merge self.mk.merge",
"'sudo apt install -u `cat apt.txt`') # self.mk.merge = Sec('merge',",
"Sec('merge', pfx=''); self.mk // self.mk.merge self.mk.merge \\ // 'SHADOW ?=",
"\\ // f'#  `{self}`' // f'## {self.TITLE}' self.readme \\",
"\\ // 'import os, sys' for i in [Object, S,",
"i in self: F.write(i.gen(self)) F.write(self.bot.gen(self)) class giti(File): def __init__(self, V='.gitignore'):",
"self.pfx = pfx; self.sfx = sfx def gen(self, to, depth=0):",
"+= f'{to.tab*depth}{self.pfx}\\n' if self.pfx else '\\n' if self.nest and self.value",
"$(notdir $(CURDIR))' \\ // f'{\"OS\":<11} = $(shell uname -s)' \\",
"f\"{'SECURE_KEY':<11} = {os.urandom(0x22)}\" \\ // f\"{'HOST':<11} = '127..0.0.1'\" \\ //",
"-l)' # self.mk.dir = Sec('dir', pfx=''); self.mk // self.mk.dir self.mk.dir",
"(S('release:', pfx='\\n.PHONY: release') ) self.mk.merge \\ // (S('zip:', pfx='\\n.PHONY: zip')",
"// self.editor) def tasks(self): self.tasks = jsonFile('tasks'); self.vscode // self.tasks",
"apt install -u `cat apt.txt`') # self.mk.merge = Sec('merge', pfx='');",
"// f'CURL = curl -L -o' \\ // f'PY =",
"= Dir('bin'); self.d // self.bin def mk(self): self.mk = Makefile();",
"-s)' \\ // f'{\"CORES\":<11} = $(shell grep processor /proc/cpuinfo |",
"def readme(self): self.readme = File('README', '.md'); self.d // self.readme self.readme",
"def __init__(self, V='Makefile'): super().__init__(V, tab='\\t') class pyFile(File): def __init__(self, V,",
"autopep8)' # self.mk.package = Sec('package', pfx=''); self.mk // self.mk.package self.mk.package",
"\\ // f'S += $(Y)' # self.mk.cfg = Sec('cfg', pfx='');",
"release') ) self.mk.merge \\ // (S('zip:', pfx='\\n.PHONY: zip') ) def",
"// (S('test: $(Y)', pfx='\\n.PHONY: test') // '$(PYT) test_$(MODULE).py') # self.mk.rule",
"i in self: i.sync() class File(IO): def __init__(self, V, ext='',",
"{self.YEAR} {self.LICENSE}' # self.dirs() self.mk() self.src() self.vscode() self.apt() def apt(self):",
"+= f'{to.tab*depth}{self.sfx}\\n' if self.pfx else '\\n' return ret class IO(Object):",
"'SHADOW ?= ponymuck' self.mk.merge \\ // 'MERGE = Makefile .gitignore",
"{cmd}\",' // f'\"problemMatcher\": []' ) self.tasks \\ // (S('{', '}')",
"self.bot = Sec() self.tab = tab; self.comment = comment def",
"head(self, prefix='', test=False): gid = '' if test else f'",
"f'{to.tab*depth}{self.sfx}\\n' if self.pfx else '\\n' return ret class IO(Object): def",
"ret class Sec(S): def gen(self, to, depth=0): ret = ''",
"pfx=''); self.mk // self.mk.package self.mk.package \\ // f'SYSLINUX_VER = 6.0.3'",
"// f'PYT = $(shell which pytest)' \\ // f'PEP =",
"which pytest)' \\ // f'PEP = $(shell which autopep8)' #",
"raise TypeError(['box', type(that), that]) ## @name dump / string def",
"self.mk // self.mk.install self.mk.install // '.PHONY: install update' self.mk.install \\",
"return self.__class__.__name__.lower() def val(self): return f'{self.value}' ## @name operator def",
"\\\\ {self}\\n' for i in self: ret += i.gen(to, depth",
"f'\"command\": \"multiCommand.{key}\",' // (S('\"sequence\": [', ']') // '\"workbench.action.files.saveAll\",' // (S('{\"command\":",
"self.mk.all self.mk.all \\ // (S('meta: $(Y)', pfx='.PHONY: meta') // '$(MAKE)",
"self.giti self.giti.top // '*~' // '*.swp' // '*.log'; self.giti.top.sfx =",
"/proc/cpuinfo | wc -l)' # self.mk.dir = Sec('dir', pfx=''); self.mk",
"= Dir('.vscode'); self.d // self.vscode self.settings() self.tasks() def settings(self): self.settings",
"(S('dev:', pfx='\\n.PHONY: dev') // 'git push -v' // 'git checkout",
"= end; self.pfx = pfx; self.sfx = sfx def gen(self,",
"$(MODULE).py test_$(MODULE).py' \\ // f'P += config.py' \\ // f'S",
"V: V = os.getcwd().split('/')[-1] super().__init__(V) # self.TITLE = title if",
"super().__init__(V) # self.TITLE = title if title else f'{self}' self.ABOUT",
"self.readme \\ // f'#  `{self}`' // f'## {self.TITLE}' self.readme",
"self.sfx is not None: ret += f'{to.tab*depth}{self.sfx}\\n' return ret class",
"self.mk.merge self.mk.merge \\ // 'SHADOW ?= ponymuck' self.mk.merge \\ //",
"self.path = V class Dir(IO): def __floordiv__(self, that): assert isinstance(that,",
"'*.log'; self.giti.top.sfx = '' self.giti // f'/{self}/' // '/__pycache__/' self.giti.bot.pfx",
"// 'sudo apt update' // 'sudo apt install -u `cat",
"i in self: ret += i.gen(to, depth + 0) if",
"comment def sync(self): with open(self.path, 'w') as F: F.write(self.top.gen(self)) for",
"(Sec() // (S('\"files.associations\": {', '},'))) self.files = (Sec('files', pfx='') //",
"self.config self.config \\ // f\"{'SECURE_KEY':<11} = {os.urandom(0x22)}\" \\ // f\"{'HOST':<11}",
"# self.mk.dir = Sec('dir', pfx=''); self.mk // self.mk.dir self.mk.dir \\",
"// self.mk.tool self.mk.tool \\ // f'CURL = curl -L -o'",
"4, comment='#'): super().__init__(V + ext) self.top = Sec(); self.bot =",
"pfx='\\n.PHONY: zip') ) def sync(self): self.readme() self.d.sync() def readme(self): self.readme",
"f'PEP = $(shell which autopep8)' # self.mk.package = Sec('package', pfx='');",
"// (S('dev:', pfx='\\n.PHONY: dev') // 'git push -v' // 'git",
"+= f'{to.tab*depth}{self.value}\\n' for i in self: ret += i.gen(to, depth",
"self.clazz = C; self.sup = sup def gen(self, to, depth=0):",
"(S('Linux_install Linux_update:', pfx='.PHONY: Linux_install Linux_update') // 'sudo apt update' //",
"Object: ## @name constructor def __init__(self, V): self.value = V",
"else f'{self}' self.ABOUT = about self.AUTHOR = '<NAME>' self.EMAIL =",
"'' self.giti // f'/{self}/' // '/__pycache__/' self.giti.bot.pfx = '' #",
"self.mk // self.mk.merge self.mk.merge \\ // 'SHADOW ?= ponymuck' self.mk.merge",
"// (S('\"tasks\": [', ']') // task('project', 'install') // task('project', 'update')",
"ext) class jsonFile(File): def __init__(self, V, ext='.json', comment='//'): super().__init__(V, ext,",
"--ignore=$(PEPS) --in-place $?') self.mk.all \\ // (S('test: $(Y)', pfx='\\n.PHONY: test')",
"def vscode(self): self.vscode = Dir('.vscode'); self.d // self.vscode self.settings() self.tasks()",
"// self.files // self.editor) def tasks(self): self.tasks = jsonFile('tasks'); self.vscode",
"// 'def test_any(): assert True' def dirs(self): self.d = Dir(f'{self}');",
"S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc') self.mk.doc \\ // (S('doc/pyMorphic.pdf:') // '$(CURL)",
"f'{\"CWD\":<11} = $(CURDIR)' \\ // f'{\"BIN\":<11} = $(CWD)/bin' \\ //",
"i in [Object, S, Sec, IO, Dir, File, Meta, Class,",
"+= f'{to.tab*depth}{self.sfx}\\n' return ret class Sec(S): def gen(self, to, depth=0):",
"self.pfx else '\\n' return ret class IO(Object): def __init__(self, V):",
"pfx=''); self.mk // self.mk.var self.mk.var \\ // f'{\"MODULE\":<11} = $(notdir",
"checkout $@' // 'git checkout $(SHADOW) -- $(MERGE)' ) self.mk.merge",
"'127..0.0.1'\" \\ // f\"{'PORT':<11} = 12345\" def py(self): self.py =",
"as F: F.write(self.top.gen(self)) for i in self: F.write(i.gen(self)) F.write(self.bot.gen(self)) class",
"self.mk // self.mk.var self.mk.var \\ // f'{\"MODULE\":<11} = $(notdir $(CURDIR))'",
"comment='//'): super().__init__(V, ext, comment=comment) class Meta(Object): pass class Class(Meta): def",
"{self.TITLE}' self.readme \\ // '' // self.COPYRIGHT // '' //",
"## @name constructor def __init__(self, V): self.value = V self.nest",
"def __init__(self, V, ext='.json', comment='//'): super().__init__(V, ext, comment=comment) class Meta(Object):",
"not None: ret += f'{to.tab*depth}{self.sfx}\\n' return ret class Sec(S): def",
"// 'pass' return ret.gen(to, depth) class Project(Meta): def __init__(self, V=None,",
"about=''): if not V: V = os.getcwd().split('/')[-1] super().__init__(V) # self.TITLE",
"// f'\"{self}/**\":true,' ) self.exclude = \\ (Sec() // (S('\"files.exclude\": {',",
"self.mk // self.mk.cfg self.mk.cfg \\ // f'PEPS = E26,E302,E305,E401,E402,E701,E702' #",
"= \\ (Sec() // (S('\"files.watcherExclude\": {', '},') // self.files)) self.assoc",
"1) if self.end is not None: ret += f'{to.tab*depth}{self.end}\\n' if",
"super().__init__(V) self.path = V class Dir(IO): def __floordiv__(self, that): assert",
"self.vscode // self.settings # def multi(key, cmd): return (S('{', '},')",
"$(MERGE)' ) self.mk.merge \\ // (S('shadow:', pfx='\\n.PHONY: shadow') // 'git",
"ret += i.gen(to, depth + 0) if self.nest and self.value",
"self.mk.doc = Sec('doc', pfx=''); self.mk // self.mk.doc self.mk.doc \\ //",
"install update' self.mk.install \\ // (S('install: $(OS)_install doc') // '$(MAKE)",
"Dir(IO): def __floordiv__(self, that): assert isinstance(that, IO) that.path = f'{self.path}/{that.path}'",
"\\ // 'MERGE += .vscode bin doc lib src tmp'",
" `{self}`' // f'## {self.TITLE}' self.readme \\ // '' //",
"\\ (Sec() // (S('\"files.associations\": {', '},'))) self.files = (Sec('files', pfx='')",
"S(that) raise TypeError(['box', type(that), that]) ## @name dump / string",
"def config(self): self.config = pyFile('config'); self.d // self.config self.config \\",
"\\ // f'{\"DOC\":<11} = $(CWD)/doc' \\ // f'{\"LIB\":<11} = $(CWD)/lib'",
"$(shell which pytest)' \\ // f'PEP = $(shell which autopep8)'",
"## @name operator def __iter__(self): return iter(self.nest) def __floordiv__(self, that):",
"self: i.sync() class File(IO): def __init__(self, V, ext='', tab=' '",
"(S('install: $(OS)_install doc') // '$(MAKE) test' ) self.mk.install \\ //",
"self.dump(test=False) def dump(self, cycle=[], depth=0, prefix='', test=False): # head def",
"File('apt', '.txt'); self.d // self.apt self.apt \\ // 'git make",
"'make all') )) # self.files = (Sec() // f'\"{self}/**\":true,' )",
"\\ // '' // self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}'",
"// (S('doc/pyMorphic.pdf:') // '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') # self.mk.install = Sec('install',",
"pfx='\\n.PHONY: shadow') // 'git push -v' // 'git checkout $(SHADOW)'",
"// f'PEPS = E26,E302,E305,E401,E402,E701,E702' # self.mk.all = Sec('all', pfx=''); self.mk",
"= $(shell grep processor /proc/cpuinfo | wc -l)' # self.mk.dir",
"// self.exclude // self.watcher // self.assoc) # self.editor = (Sec('editor',",
"pfx='') def test(self): self.test = pyFile(f'test_{self}'); self.d // self.test self.test",
"class Class(Meta): def __init__(self, C, sup=[]): assert callable(C) super().__init__(C.__name__) self.clazz",
"\\ // f'{\"MODULE\":<11} = $(notdir $(CURDIR))' \\ // f'{\"OS\":<11} =",
"// '$(MAKE) test' ) self.mk.install \\ // (S('update: $(OS)_update doc')",
"self.test self.test \\ // 'import pytest' \\ // f'from {self}",
"def dirs(self): self.d = Dir(f'{self}'); self.giti = giti(); self.d //",
"f'#  `{self}`' // f'## {self.TITLE}' self.readme \\ // ''",
"\\ // 'SHADOW ?= ponymuck' self.mk.merge \\ // 'MERGE =",
"= sup def gen(self, to, depth=0): ret = S(f'class {self}:',",
"__repr__(self): return self.dump(test=False) def dump(self, cycle=[], depth=0, prefix='', test=False): #",
"g++' def vscode(self): self.vscode = Dir('.vscode'); self.d // self.vscode self.settings()",
"'},') // self.files)) self.watcher = \\ (Sec() // (S('\"files.watcherExclude\": {',",
"self.d // self.py self.py \\ // 'import os, sys' for",
"self.config = pyFile('config'); self.d // self.config self.config \\ // f\"{'SECURE_KEY':<11}",
"self.py // Class(i) self.py // Class(Primitive, [Object]) self.py \\ //",
"dirs(self): self.d = Dir(f'{self}'); self.giti = giti(); self.d // self.giti",
"// self.mk # self.mk.var = Sec('var', pfx=''); self.mk // self.mk.var",
"callable(C) super().__init__(C.__name__) self.clazz = C; self.sup = sup def gen(self,",
"Sec('install', pfx=''); self.mk // self.mk.install self.mk.install // '.PHONY: install update'",
"self.mk.install self.mk.install // '.PHONY: install update' self.mk.install \\ // (S('install:",
"$(shell which autopep8)' # self.mk.package = Sec('package', pfx=''); self.mk //",
"= $(CWD)/lib' \\ // f'{\"SRC\":<11} = $(CWD)/src' \\ // f'{\"TMP\":<11}",
"self.mk.merge \\ // (S('shadow:', pfx='\\n.PHONY: shadow') // 'git push -v'",
"self.bot // f'!{self}' class Makefile(File): def __init__(self, V='Makefile'): super().__init__(V, tab='\\t')",
"if not spec: return self.val() raise TypeError(['__format__', spec]) def tag(self):",
"iter(self.nest) def __floordiv__(self, that): self.nest.append(self.box(that)); return self class Primitive(Object): pass",
"\"\\\\u000D {cmd} \\\\u000D\"}}}}' ))) self.multi = \\ (Sec('multi') // (S('\"multiCommand.commands\":",
"= Sec('var', pfx=''); self.mk // self.mk.var self.mk.var \\ // f'{\"MODULE\":<11}",
"return that if isinstance(that, str): return S(that) raise TypeError(['box', type(that),",
"pfx='') // 'pass' return ret.gen(to, depth) class Project(Meta): def __init__(self,",
"f'{to.tab*depth}{self.value}\\n' for i in self: ret += i.gen(to, depth +",
"return '\\n' + '\\t' * depth ret = pad(depth) +",
"{self}\\n' if self.sfx is not None: ret += f'{to.tab*depth}{self.sfx}\\n' if",
"// S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc') self.mk.doc \\ // (S('doc/pyMorphic.pdf:') //",
"giti(File): def __init__(self, V='.gitignore'): super().__init__(V) self.bot // f'!{self}' class Makefile(File):",
"depth + 1) if self.end is not None: ret +=",
"cmd): return (S('{', '},') // f'\"label\": \"{clazz}: {cmd}\",' // f'\"type\":",
"# self.mk.package = Sec('package', pfx=''); self.mk // self.mk.package self.mk.package \\",
"\\ // (S('{', '}') // self.multi // self.files // self.editor)",
"'make meta') // multi('f12', 'make all') )) # self.files =",
"which autopep8)' # self.mk.package = Sec('package', pfx=''); self.mk // self.mk.package",
"def __init__(self, V): self.value = V self.nest = [] def",
"// self.ABOUT Project( title='ViZual language environment', about=''' * object (hyper)graph",
"if self.pfx else '\\n' if self.nest and self.value is not",
"super().__init__(V, ext, comment=comment) class Meta(Object): pass class Class(Meta): def __init__(self,",
"self.ABOUT Project( title='ViZual language environment', about=''' * object (hyper)graph interpreter",
"$(MODULE).py' // '$(PEP) --ignore=$(PEPS) --in-place $?') self.mk.all \\ // (S('test:",
"and self.value is not None: ret += f'{to.tab*depth}{to.comment} / {self}\\n'",
"test' ) self.mk.install \\ // (S('Linux_install Linux_update:', pfx='.PHONY: Linux_install Linux_update')",
"\\ // 'MERGE = Makefile .gitignore README.md apt.txt $(S)' \\",
"all') )) # self.files = (Sec() // f'\"{self}/**\":true,' ) self.exclude",
"'}') // self.multi // self.files // self.editor) def tasks(self): self.tasks",
"<{self.EMAIL}> {self.YEAR} {self.LICENSE}' # self.dirs() self.mk() self.src() self.vscode() self.apt() def",
"is not None: ret += f'{to.tab*depth}{to.comment} / {self}\\n' if self.sfx",
"= $(CWD)/doc' \\ // f'{\"LIB\":<11} = $(CWD)/lib' \\ // f'{\"SRC\":<11}",
"$(OS)_install doc') // '$(MAKE) test' ) self.mk.install \\ // (S('update:",
"ext='.json', comment='//'): super().__init__(V, ext, comment=comment) class Meta(Object): pass class Class(Meta):",
"// f\"{'HOST':<11} = '127..0.0.1'\" \\ // f\"{'PORT':<11} = 12345\" def",
"self.nest and self.value is not None: ret += f'{to.tab*depth}{to.comment} \\\\",
"self.bin = Dir('bin'); self.d // self.bin def mk(self): self.mk =",
"'.md'); self.d // self.readme self.readme \\ // f'#  `{self}`'",
"self.nest = [] def box(self, that): if isinstance(that, Object): return",
"def __floordiv__(self, that): assert isinstance(that, IO) that.path = f'{self.path}/{that.path}' return",
"f'SYSLINUX_VER = 6.0.3' # self.mk.src = Sec('src', pfx=''); self.mk //",
"tab; self.comment = comment def sync(self): with open(self.path, 'w') as",
"self.YEAR = 2020 self.LICENSE = 'All rights reserved' self.COPYRIGHT =",
"f'\"command\": \"make {cmd}\",' // f'\"problemMatcher\": []' ) self.tasks \\ //",
"= Makefile(); self.d // self.mk # self.mk.var = Sec('var', pfx='');",
"'shadow') )) def src(self): self.py() self.test() self.config() def config(self): self.config",
"self.test = pyFile(f'test_{self}'); self.d // self.test self.test \\ // 'import",
")) # self.files = (Sec() // f'\"{self}/**\":true,' ) self.exclude =",
"$?') self.mk.all \\ // (S('test: $(Y)', pfx='\\n.PHONY: test') // '$(PYT)",
"self.mk // self.mk.doc self.mk.doc \\ // S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc')",
"\\ // f\"{'HOST':<11} = '127..0.0.1'\" \\ // f\"{'PORT':<11} = 12345\"",
"f\"{'PORT':<11} = 12345\" def py(self): self.py = pyFile(f'{self}'); self.d //",
"Sec('var', pfx=''); self.mk // self.mk.var self.mk.var \\ // f'{\"MODULE\":<11} =",
"f'{\"CORES\":<11} = $(shell grep processor /proc/cpuinfo | wc -l)' #",
"ret += f'{to.tab*depth}{to.comment} \\\\ {self}\\n' for i in self: ret",
"V, ext='.py'): super().__init__(V, ext) class jsonFile(File): def __init__(self, V, ext='.json',",
"// self.mk.package self.mk.package \\ // f'SYSLINUX_VER = 6.0.3' # self.mk.src",
"Project( title='ViZual language environment', about=''' * object (hyper)graph interpreter '''",
"for i in self: ret += i.gen(to, depth + 0)",
"super().__init__(V) self.bot // f'!{self}' class Makefile(File): def __init__(self, V='Makefile'): super().__init__(V,",
"+= i.gen(to, depth + 0) if self.nest and self.value is",
"self.bin def mk(self): self.mk = Makefile(); self.d // self.mk #",
"= sfx def gen(self, to, depth=0): ret = '' if",
")) def src(self): self.py() self.test() self.config() def config(self): self.config =",
"self.mk.merge = Sec('merge', pfx=''); self.mk // self.mk.merge self.mk.merge \\ //",
"None: ret += f'{to.tab*depth}{to.comment} / {self}\\n' if self.sfx is not",
"// self.vscode self.settings() self.tasks() def settings(self): self.settings = jsonFile('settings'); self.vscode",
"'},') // f'\"label\": \"{clazz}: {cmd}\",' // f'\"type\": \"shell\",' // f'\"command\":",
"__format__(self, spec=''): if not spec: return self.val() raise TypeError(['__format__', spec])",
"// '\"workbench.tree.indent\": 32,' ) # self.settings \\ // (S('{', '}')",
"// (S('{', '}') // '\"version\": \"2.0.0\",' // (S('\"tasks\": [', ']')",
"self.multi // self.files // self.editor) def tasks(self): self.tasks = jsonFile('tasks');",
"ret class IO(Object): def __init__(self, V): super().__init__(V) self.path = V",
"// self.tasks def task(clazz, cmd): return (S('{', '},') // f'\"label\":",
"(S('\"tasks\": [', ']') // task('project', 'install') // task('project', 'update') //",
"None: ret += f'{to.tab*depth}{self.value}\\n' for i in self: ret +=",
"// self.assoc) # self.editor = (Sec('editor', pfx='') // '\"editor.tabSize\": 4,'",
"self.ABOUT = about self.AUTHOR = '<NAME>' self.EMAIL = '<EMAIL>' self.GITHUB",
"not None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.pfx else '\\n' if",
"(S('doc/pyMorphic.pdf:') // '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') # self.mk.install = Sec('install', pfx='');",
"# self.mk.rule = Sec('rule', pfx=''); self.mk // self.mk.rule # self.mk.doc",
"'import os, sys' for i in [Object, S, Sec, IO,",
"+ 0) if self.nest and self.value is not None: ret",
"# self.mk.merge = Sec('merge', pfx=''); self.mk // self.mk.merge self.mk.merge \\",
"depth=0): ret = S(f'class {self}:', pfx='') // 'pass' return ret.gen(to,",
"\\ // f'{\"CWD\":<11} = $(CURDIR)' \\ // f'{\"BIN\":<11} = $(CWD)/bin'",
"'code meld' \\ // 'python3 python3-venv' \\ // 'build-essential g++'",
"f'!{self}' class Makefile(File): def __init__(self, V='Makefile'): super().__init__(V, tab='\\t') class pyFile(File):",
"self.mk // self.mk.dir self.mk.dir \\ // f'{\"CWD\":<11} = $(CURDIR)' \\",
"self.giti // f'/{self}/' // '/__pycache__/' self.giti.bot.pfx = '' # self.bin",
"super().__init__(V, tab='\\t') class pyFile(File): def __init__(self, V, ext='.py'): super().__init__(V, ext)",
"[', '],') // multi('f11', 'make meta') // multi('f12', 'make all')",
"))) self.multi = \\ (Sec('multi') // (S('\"multiCommand.commands\": [', '],') //",
"(S('{', '},') // f'\"label\": \"{clazz}: {cmd}\",' // f'\"type\": \"shell\",' //",
"= about self.AUTHOR = '<NAME>' self.EMAIL = '<EMAIL>' self.GITHUB =",
"pass class Class(Meta): def __init__(self, C, sup=[]): assert callable(C) super().__init__(C.__name__)",
"'git push -v' // 'git checkout $@' // 'git checkout",
"constructor def __init__(self, V): self.value = V self.nest = []",
"title='', about=''): if not V: V = os.getcwd().split('/')[-1] super().__init__(V) #",
"self.py \\ // S('Project().sync()', pfx='') def test(self): self.test = pyFile(f'test_{self}');",
"def pad(depth): return '\\n' + '\\t' * depth ret =",
"depth) class Project(Meta): def __init__(self, V=None, title='', about=''): if not",
"f'/{self}/' // '/__pycache__/' self.giti.bot.pfx = '' # self.bin = Dir('bin');",
"// f'{\"TMP\":<11} = $(CWD)/tmp' # self.mk.tool = Sec('tool', pfx=''); self.mk",
"type(that), that]) ## @name dump / string def test(self): return",
"test(self): return self.dump(test=True) def __repr__(self): return self.dump(test=False) def dump(self, cycle=[],",
"# subtree return ret def head(self, prefix='', test=False): gid =",
"def sync(self): with open(self.path, 'w') as F: F.write(self.top.gen(self)) for i",
"// f'{\"SRC\":<11} = $(CWD)/src' \\ // f'{\"TMP\":<11} = $(CWD)/tmp' #",
"{self.GITHUB}/{self}' self.readme // self.ABOUT Project( title='ViZual language environment', about=''' *",
"= $(CWD)/bin' \\ // f'{\"DOC\":<11} = $(CWD)/doc' \\ // f'{\"LIB\":<11}",
"\\ // f'SYSLINUX_VER = 6.0.3' # self.mk.src = Sec('src', pfx='');",
"-v' // 'git checkout $@' // 'git checkout $(SHADOW) --",
"make curl' // 'code meld' \\ // 'python3 python3-venv' \\",
"self.mk.var = Sec('var', pfx=''); self.mk // self.mk.var self.mk.var \\ //",
"`{self}`' // f'## {self.TITLE}' self.readme \\ // '' // self.COPYRIGHT",
"// f'{\"OS\":<11} = $(shell uname -s)' \\ // f'{\"CORES\":<11} =",
"checkout $(SHADOW)' ) self.mk.merge \\ // (S('release:', pfx='\\n.PHONY: release') )",
"if self.sfx is not None: ret += f'{to.tab*depth}{self.sfx}\\n' return ret",
"pass for i in self: i.sync() class File(IO): def __init__(self,",
"= Sec(); self.bot = Sec() self.tab = tab; self.comment =",
"self.apt self.apt \\ // 'git make curl' // 'code meld'",
"if self.pfx is not None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.pfx",
"Class(Meta): def __init__(self, C, sup=[]): assert callable(C) super().__init__(C.__name__) self.clazz =",
"// S('Project().sync()', pfx='') def test(self): self.test = pyFile(f'test_{self}'); self.d //",
"None: ret += f'{to.tab*depth}{self.sfx}\\n' return ret class Sec(S): def gen(self,",
"self.mk.merge \\ // 'MERGE = Makefile .gitignore README.md apt.txt $(S)'",
"= E26,E302,E305,E401,E402,E701,E702' # self.mk.all = Sec('all', pfx=''); self.mk // self.mk.all",
"// self.files)) self.watcher = \\ (Sec() // (S('\"files.watcherExclude\": {', '},')",
"E26,E302,E305,E401,E402,E701,E702' # self.mk.all = Sec('all', pfx=''); self.mk // self.mk.all self.mk.all",
"self.mk.doc \\ // S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc') self.mk.doc \\ //",
"def __repr__(self): return self.dump(test=False) def dump(self, cycle=[], depth=0, prefix='', test=False):",
"__init__(self, C, sup=[]): assert callable(C) super().__init__(C.__name__) self.clazz = C; self.sup",
"\\ // (S('zip:', pfx='\\n.PHONY: zip') ) def sync(self): self.readme() self.d.sync()",
"self.readme \\ // '' // self.COPYRIGHT // '' // f'github:",
"// 'MERGE = Makefile .gitignore README.md apt.txt $(S)' \\ //",
"f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}' # self.dirs() self.mk() self.src() self.vscode()",
"def sync(self): self.readme() self.d.sync() def readme(self): self.readme = File('README', '.md');",
"// '$(PY) $(MODULE).py' // '$(PEP) --ignore=$(PEPS) --in-place $?') self.mk.all \\",
"' * 4, comment='#'): super().__init__(V + ext) self.top = Sec();",
"tmp' self.mk.merge \\ // (S('dev:', pfx='\\n.PHONY: dev') // 'git push",
"self.comment = comment def sync(self): with open(self.path, 'w') as F:",
"'\"editor.rulers\": [80],' // '\"workbench.tree.indent\": 32,' ) # self.settings \\ //",
"Object): return that if isinstance(that, str): return S(that) raise TypeError(['box',",
"// (S('\"files.watcherExclude\": {', '},') // self.files)) self.assoc = \\ (Sec()",
"\\ // (S('test: $(Y)', pfx='\\n.PHONY: test') // '$(PYT) test_$(MODULE).py') #",
"'git checkout $(SHADOW) -- $(MERGE)' ) self.mk.merge \\ // (S('shadow:',",
"-o' \\ // f'PY = $(shell which python3)' \\ //",
"vscode(self): self.vscode = Dir('.vscode'); self.d // self.vscode self.settings() self.tasks() def",
"doc') self.mk.doc \\ // (S('doc/pyMorphic.pdf:') // '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') #",
"def __init__(self, V, ext='.py'): super().__init__(V, ext) class jsonFile(File): def __init__(self,",
"// f'SYSLINUX_VER = 6.0.3' # self.mk.src = Sec('src', pfx=''); self.mk",
"IO) that.path = f'{self.path}/{that.path}' return super().__floordiv__(that) def sync(self): try: os.mkdir(self.path)",
"__init__(self, V=None, end=None, pfx=None, sfx=None): super().__init__(V) self.end = end; self.pfx",
"= f'{self.path}/{that.path}' return super().__floordiv__(that) def sync(self): try: os.mkdir(self.path) except FileExistsError:",
"= title if title else f'{self}' self.ABOUT = about self.AUTHOR",
") self.mk.install \\ // (S('update: $(OS)_update doc') // '$(MAKE) test'",
"self.files // self.editor) def tasks(self): self.tasks = jsonFile('tasks'); self.vscode //",
"self.__class__.__name__.lower() def val(self): return f'{self.value}' ## @name operator def __iter__(self):",
"| wc -l)' # self.mk.dir = Sec('dir', pfx=''); self.mk //",
"class Object: ## @name constructor def __init__(self, V): self.value =",
"import *' \\ // 'def test_any(): assert True' def dirs(self):",
"// (S('release:', pfx='\\n.PHONY: release') ) self.mk.merge \\ // (S('zip:', pfx='\\n.PHONY:",
"__init__(self, V='.gitignore'): super().__init__(V) self.bot // f'!{self}' class Makefile(File): def __init__(self,",
"i.sync() class File(IO): def __init__(self, V, ext='', tab=' ' *",
"test else f' @{id(self):x}' return f'{prefix}<{self.tag()}:{self.val()}>{gid}' def __format__(self, spec=''): if",
"= '<EMAIL>' self.GITHUB = 'https://github.com/ponyatov' self.YEAR = 2020 self.LICENSE =",
"(Sec() // (S('\"files.exclude\": {', '},') // self.files)) self.watcher = \\",
"= giti(); self.d // self.giti self.giti.top // '*~' // '*.swp'",
"isinstance(that, IO) that.path = f'{self.path}/{that.path}' return super().__floordiv__(that) def sync(self): try:",
"// f'/{self}/' // '/__pycache__/' self.giti.bot.pfx = '' # self.bin =",
"isinstance(that, str): return S(that) raise TypeError(['box', type(that), that]) ## @name",
"* 4, comment='#'): super().__init__(V + ext) self.top = Sec(); self.bot",
"// f'{\"CORES\":<11} = $(shell grep processor /proc/cpuinfo | wc -l)'",
"ret.gen(to, depth) class Project(Meta): def __init__(self, V=None, title='', about=''): if",
"// f\"{'SECURE_KEY':<11} = {os.urandom(0x22)}\" \\ // f\"{'HOST':<11} = '127..0.0.1'\" \\",
"def mk(self): self.mk = Makefile(); self.d // self.mk # self.mk.var",
"if not V: V = os.getcwd().split('/')[-1] super().__init__(V) # self.TITLE =",
"f'{\"OS\":<11} = $(shell uname -s)' \\ // f'{\"CORES\":<11} = $(shell",
"Linux_install Linux_update') // 'sudo apt update' // 'sudo apt install",
") self.mk.merge \\ // (S('zip:', pfx='\\n.PHONY: zip') ) def sync(self):",
"\"make {cmd}\",' // f'\"problemMatcher\": []' ) self.tasks \\ // (S('{',",
"self.mk.all \\ // (S('test: $(Y)', pfx='\\n.PHONY: test') // '$(PYT) test_$(MODULE).py')",
"// f'{\"MODULE\":<11} = $(notdir $(CURDIR))' \\ // f'{\"OS\":<11} = $(shell",
"@{id(self):x}' return f'{prefix}<{self.tag()}:{self.val()}>{gid}' def __format__(self, spec=''): if not spec: return",
"self.multi = \\ (Sec('multi') // (S('\"multiCommand.commands\": [', '],') // multi('f11',",
"File, Meta, Class, Project]: self.py // Class(i) self.py // Class(Primitive,",
"# self.editor = (Sec('editor', pfx='') // '\"editor.tabSize\": 4,' // '\"editor.rulers\":",
"not None: ret += f'{to.tab*depth}{self.sfx}\\n' if self.pfx else '\\n' return",
"wc -l)' # self.mk.dir = Sec('dir', pfx=''); self.mk // self.mk.dir",
"= comment def sync(self): with open(self.path, 'w') as F: F.write(self.top.gen(self))",
"which python3)' \\ // f'PYT = $(shell which pytest)' \\",
"pytest)' \\ // f'PEP = $(shell which autopep8)' # self.mk.package",
"--in-place $?') self.mk.all \\ // (S('test: $(Y)', pfx='\\n.PHONY: test') //",
"[Object, S, Sec, IO, Dir, File, Meta, Class, Project]: self.py",
"'$(MAKE) test' ) self.mk.install \\ // (S('Linux_install Linux_update:', pfx='.PHONY: Linux_install",
"// f'Y += $(MODULE).py test_$(MODULE).py' \\ // f'P += config.py'",
"if test else f' @{id(self):x}' return f'{prefix}<{self.tag()}:{self.val()}>{gid}' def __format__(self, spec=''):",
"self.exclude = \\ (Sec() // (S('\"files.exclude\": {', '},') // self.files))",
"if isinstance(that, str): return S(that) raise TypeError(['box', type(that), that]) ##",
"// f'\"type\": \"shell\",' // f'\"command\": \"make {cmd}\",' // f'\"problemMatcher\": []'",
"(S('meta: $(Y)', pfx='.PHONY: meta') // '$(MAKE) test' // '$(PY) $(MODULE).py'",
"update' // 'sudo apt install -u `cat apt.txt`') # self.mk.merge",
"self.head(prefix, test) # subtree return ret def head(self, prefix='', test=False):",
"= 2020 self.LICENSE = 'All rights reserved' self.COPYRIGHT = f'(c)",
"prefix='', test=False): # head def pad(depth): return '\\n' + '\\t'",
"'dev') // task('git', 'shadow') )) def src(self): self.py() self.test() self.config()",
"about self.AUTHOR = '<NAME>' self.EMAIL = '<EMAIL>' self.GITHUB = 'https://github.com/ponyatov'",
"@name dump / string def test(self): return self.dump(test=True) def __repr__(self):",
"V class Dir(IO): def __floordiv__(self, that): assert isinstance(that, IO) that.path",
"self.mk // self.mk.tool self.mk.tool \\ // f'CURL = curl -L",
"+ self.head(prefix, test) # subtree return ret def head(self, prefix='',",
"gen(self, to, depth=0): ret = '' if self.pfx is not",
"depth=0, prefix='', test=False): # head def pad(depth): return '\\n' +",
"(S('{', '}') // self.multi // self.files // self.editor) def tasks(self):",
"ret += f'{to.tab*depth}{to.comment} / {self}\\n' if self.sfx is not None:",
"if self.pfx is not None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.value",
"C; self.sup = sup def gen(self, to, depth=0): ret =",
"V='Makefile'): super().__init__(V, tab='\\t') class pyFile(File): def __init__(self, V, ext='.py'): super().__init__(V,",
"self.py // Class(Primitive, [Object]) self.py \\ // S('Project().sync()', pfx='') def",
"self.mk.install \\ // (S('install: $(OS)_install doc') // '$(MAKE) test' )",
"src tmp' self.mk.merge \\ // (S('dev:', pfx='\\n.PHONY: dev') // 'git",
"= $(shell which autopep8)' # self.mk.package = Sec('package', pfx=''); self.mk",
"python3)' \\ // f'PYT = $(shell which pytest)' \\ //",
"self.files)) self.watcher = \\ (Sec() // (S('\"files.watcherExclude\": {', '},') //",
"self.mk # self.mk.var = Sec('var', pfx=''); self.mk // self.mk.var self.mk.var",
"* depth ret = pad(depth) + self.head(prefix, test) # subtree",
"(S('zip:', pfx='\\n.PHONY: zip') ) def sync(self): self.readme() self.d.sync() def readme(self):",
"## @name dump / string def test(self): return self.dump(test=True) def",
"+= .vscode bin doc lib src tmp' self.mk.merge \\ //",
"that): self.nest.append(self.box(that)); return self class Primitive(Object): pass class S(Primitive): def",
"'def test_any(): assert True' def dirs(self): self.d = Dir(f'{self}'); self.giti",
"File(IO): def __init__(self, V, ext='', tab=' ' * 4, comment='#'):",
"= S(f'class {self}:', pfx='') // 'pass' return ret.gen(to, depth) class",
"for i in self: i.sync() class File(IO): def __init__(self, V,",
"Linux_update:', pfx='.PHONY: Linux_install Linux_update') // 'sudo apt update' // 'sudo",
"self.apt = File('apt', '.txt'); self.d // self.apt self.apt \\ //",
"(S('{', '},') // f'\"command\": \"multiCommand.{key}\",' // (S('\"sequence\": [', ']') //",
"'pass' return ret.gen(to, depth) class Project(Meta): def __init__(self, V=None, title='',",
"self.giti.bot.pfx = '' # self.bin = Dir('bin'); self.d // self.bin",
"self.giti.top.sfx = '' self.giti // f'/{self}/' // '/__pycache__/' self.giti.bot.pfx =",
"// 'sudo apt install -u `cat apt.txt`') # self.mk.merge =",
"def gen(self, to, depth=0): ret = '' if self.pfx is",
"'MERGE = Makefile .gitignore README.md apt.txt $(S)' \\ // 'MERGE",
"-v' // 'git checkout $(SHADOW)' ) self.mk.merge \\ // (S('release:',",
"multi(key, cmd): return (S('{', '},') // f'\"command\": \"multiCommand.{key}\",' // (S('\"sequence\":",
"apt update' // 'sudo apt install -u `cat apt.txt`') #",
"return S(that) raise TypeError(['box', type(that), that]) ## @name dump /",
"def __format__(self, spec=''): if not spec: return self.val() raise TypeError(['__format__',",
"TypeError(['__format__', spec]) def tag(self): return self.__class__.__name__.lower() def val(self): return f'{self.value}'",
"pfx=None, sfx=None): super().__init__(V) self.end = end; self.pfx = pfx; self.sfx",
"// f'\"command\": \"multiCommand.{key}\",' // (S('\"sequence\": [', ']') // '\"workbench.action.files.saveAll\",' //",
"= 'https://github.com/ponyatov' self.YEAR = 2020 self.LICENSE = 'All rights reserved'",
"def apt(self): self.apt = File('apt', '.txt'); self.d // self.apt self.apt",
"class Primitive(Object): pass class S(Primitive): def __init__(self, V=None, end=None, pfx=None,",
") self.exclude = \\ (Sec() // (S('\"files.exclude\": {', '},') //",
"return ret class IO(Object): def __init__(self, V): super().__init__(V) self.path =",
"{self.LICENSE}' # self.dirs() self.mk() self.src() self.vscode() self.apt() def apt(self): self.apt",
"gid = '' if test else f' @{id(self):x}' return f'{prefix}<{self.tag()}:{self.val()}>{gid}'",
"self: F.write(i.gen(self)) F.write(self.bot.gen(self)) class giti(File): def __init__(self, V='.gitignore'): super().__init__(V) self.bot",
"pyFile(File): def __init__(self, V, ext='.py'): super().__init__(V, ext) class jsonFile(File): def",
"// f'S += $(Y)' # self.mk.cfg = Sec('cfg', pfx=''); self.mk",
"// f'\"problemMatcher\": []' ) self.tasks \\ // (S('{', '}') //",
"= {os.urandom(0x22)}\" \\ // f\"{'HOST':<11} = '127..0.0.1'\" \\ // f\"{'PORT':<11}",
"= Sec('tool', pfx=''); self.mk // self.mk.tool self.mk.tool \\ // f'CURL",
"= Dir(f'{self}'); self.giti = giti(); self.d // self.giti self.giti.top //",
"self.mk.install \\ // (S('Linux_install Linux_update:', pfx='.PHONY: Linux_install Linux_update') // 'sudo",
"= (Sec() // f'\"{self}/**\":true,' ) self.exclude = \\ (Sec() //",
"// 'import os, sys' for i in [Object, S, Sec,",
"+= f'{to.tab*depth}{self.end}\\n' if self.sfx is not None: ret += f'{to.tab*depth}{self.sfx}\\n'",
"self.sfx = sfx def gen(self, to, depth=0): ret = ''",
"class Sec(S): def gen(self, to, depth=0): ret = '' if",
"// task('git', 'shadow') )) def src(self): self.py() self.test() self.config() def",
"// f'PY = $(shell which python3)' \\ // f'PYT =",
"// '' // f'github: {self.GITHUB}/{self}' self.readme // self.ABOUT Project( title='ViZual",
"self.sup = sup def gen(self, to, depth=0): ret = S(f'class",
"in [Object, S, Sec, IO, Dir, File, Meta, Class, Project]:",
"\\ // (S('release:', pfx='\\n.PHONY: release') ) self.mk.merge \\ // (S('zip:',",
"(Sec('files', pfx='') // self.exclude // self.watcher // self.assoc) # self.editor",
"f'{\"LIB\":<11} = $(CWD)/lib' \\ // f'{\"SRC\":<11} = $(CWD)/src' \\ //",
"None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.value is not None: ret",
"that.path = f'{self.path}/{that.path}' return super().__floordiv__(that) def sync(self): try: os.mkdir(self.path) except",
"= V class Dir(IO): def __floordiv__(self, that): assert isinstance(that, IO)",
"checkout $(SHADOW) -- $(MERGE)' ) self.mk.merge \\ // (S('shadow:', pfx='\\n.PHONY:",
"end=None, pfx=None, sfx=None): super().__init__(V) self.end = end; self.pfx = pfx;",
"pfx=''); self.mk // self.mk.merge self.mk.merge \\ // 'SHADOW ?= ponymuck'",
"'],') // multi('f11', 'make meta') // multi('f12', 'make all') ))",
"// f'PEP = $(shell which autopep8)' # self.mk.package = Sec('package',",
"self.vscode() self.apt() def apt(self): self.apt = File('apt', '.txt'); self.d //",
"// '$(MAKE) test' // '$(PY) $(MODULE).py' // '$(PEP) --ignore=$(PEPS) --in-place",
"return self.dump(test=False) def dump(self, cycle=[], depth=0, prefix='', test=False): # head",
"self.value is not None: ret += f'{to.tab*depth}{to.comment} \\\\ {self}\\n' for",
"Sec('rule', pfx=''); self.mk // self.mk.rule # self.mk.doc = Sec('doc', pfx='');",
"= '' if test else f' @{id(self):x}' return f'{prefix}<{self.tag()}:{self.val()}>{gid}' def",
"self.mk.package = Sec('package', pfx=''); self.mk // self.mk.package self.mk.package \\ //",
"Sec('src', pfx=''); self.mk // self.mk.src self.mk.src \\ // f'Y +=",
"\"shell\",' // f'\"command\": \"make {cmd}\",' // f'\"problemMatcher\": []' ) self.tasks",
"$(shell grep processor /proc/cpuinfo | wc -l)' # self.mk.dir =",
"pfx=''); self.mk // self.mk.install self.mk.install // '.PHONY: install update' self.mk.install",
"// '\"editor.tabSize\": 4,' // '\"editor.rulers\": [80],' // '\"workbench.tree.indent\": 32,' )",
"curl -L -o' \\ // f'PY = $(shell which python3)'",
"self.mk.rule = Sec('rule', pfx=''); self.mk // self.mk.rule # self.mk.doc =",
"'' // f'github: {self.GITHUB}/{self}' self.readme // self.ABOUT Project( title='ViZual language",
"+= f'{to.tab*depth}{to.comment} \\\\ {self}\\n' for i in self: ret +=",
"$(SHADOW) -- $(MERGE)' ) self.mk.merge \\ // (S('shadow:', pfx='\\n.PHONY: shadow')",
"is not None: ret += f'{to.tab*depth}{self.end}\\n' if self.sfx is not",
"Dir, File, Meta, Class, Project]: self.py // Class(i) self.py //",
"self.mk // self.mk.package self.mk.package \\ // f'SYSLINUX_VER = 6.0.3' #",
"self.py \\ // 'import os, sys' for i in [Object,",
"f'github: {self.GITHUB}/{self}' self.readme // self.ABOUT Project( title='ViZual language environment', about='''",
"return (S('{', '},') // f'\"label\": \"{clazz}: {cmd}\",' // f'\"type\": \"shell\",'",
"Makefile(); self.d // self.mk # self.mk.var = Sec('var', pfx=''); self.mk",
"?= ponymuck' self.mk.merge \\ // 'MERGE = Makefile .gitignore README.md",
"class Dir(IO): def __floordiv__(self, that): assert isinstance(that, IO) that.path =",
"// 'git checkout $(SHADOW)' ) self.mk.merge \\ // (S('release:', pfx='\\n.PHONY:",
"in self: ret += i.gen(to, depth + 0) if self.nest",
"pfx; self.sfx = sfx def gen(self, to, depth=0): ret =",
"\\ // f\"{'SECURE_KEY':<11} = {os.urandom(0x22)}\" \\ // f\"{'HOST':<11} = '127..0.0.1'\"",
"= Sec('doc', pfx=''); self.mk // self.mk.doc self.mk.doc \\ // S('doc:",
"Class(Primitive, [Object]) self.py \\ // S('Project().sync()', pfx='') def test(self): self.test",
"(S('{\"command\": \"workbench.action.terminal.sendSequence\",') // f'\"args\": {{\"text\": \"\\\\u000D {cmd} \\\\u000D\"}}}}' ))) self.multi",
"// '\"version\": \"2.0.0\",' // (S('\"tasks\": [', ']') // task('project', 'install')",
"def __init__(self, C, sup=[]): assert callable(C) super().__init__(C.__name__) self.clazz = C;",
"meta') // '$(MAKE) test' // '$(PY) $(MODULE).py' // '$(PEP) --ignore=$(PEPS)",
"head def pad(depth): return '\\n' + '\\t' * depth ret",
"= \\ (Sec('multi') // (S('\"multiCommand.commands\": [', '],') // multi('f11', 'make",
"self.test \\ // 'import pytest' \\ // f'from {self} import",
"self.d // self.giti self.giti.top // '*~' // '*.swp' // '*.log';",
"def box(self, that): if isinstance(that, Object): return that if isinstance(that,",
"class File(IO): def __init__(self, V, ext='', tab=' ' * 4,",
"operator def __iter__(self): return iter(self.nest) def __floordiv__(self, that): self.nest.append(self.box(that)); return",
"__init__(self, V=None, title='', about=''): if not V: V = os.getcwd().split('/')[-1]",
"'<EMAIL>' self.GITHUB = 'https://github.com/ponyatov' self.YEAR = 2020 self.LICENSE = 'All",
"self.files = (Sec('files', pfx='') // self.exclude // self.watcher // self.assoc)",
"test') // '$(PYT) test_$(MODULE).py') # self.mk.rule = Sec('rule', pfx=''); self.mk",
"= 6.0.3' # self.mk.src = Sec('src', pfx=''); self.mk // self.mk.src",
"[', ']') // task('project', 'install') // task('project', 'update') // task('git',",
"mk(self): self.mk = Makefile(); self.d // self.mk # self.mk.var =",
"test' // '$(PY) $(MODULE).py' // '$(PEP) --ignore=$(PEPS) --in-place $?') self.mk.all",
"self.mk.merge \\ // (S('release:', pfx='\\n.PHONY: release') ) self.mk.merge \\ //",
"// self.settings # def multi(key, cmd): return (S('{', '},') //",
"f'\"type\": \"shell\",' // f'\"command\": \"make {cmd}\",' // f'\"problemMatcher\": []' )",
"V, ext='.json', comment='//'): super().__init__(V, ext, comment=comment) class Meta(Object): pass class",
"// 'code meld' \\ // 'python3 python3-venv' \\ // 'build-essential",
"= [] def box(self, that): if isinstance(that, Object): return that",
"pass class S(Primitive): def __init__(self, V=None, end=None, pfx=None, sfx=None): super().__init__(V)",
"\\ // f'from {self} import *' \\ // 'def test_any():",
"// '*~' // '*.swp' // '*.log'; self.giti.top.sfx = '' self.giti",
"self.readme self.readme \\ // f'#  `{self}`' // f'## {self.TITLE}'",
"// '\"workbench.action.files.saveAll\",' // (S('{\"command\": \"workbench.action.terminal.sendSequence\",') // f'\"args\": {{\"text\": \"\\\\u000D {cmd}",
"self.d // self.config self.config \\ // f\"{'SECURE_KEY':<11} = {os.urandom(0x22)}\" \\",
"$(CWD)/lib' \\ // f'{\"SRC\":<11} = $(CWD)/src' \\ // f'{\"TMP\":<11} =",
"not None: ret += f'{to.tab*depth}{to.comment} / {self}\\n' if self.sfx is",
"self.d.sync() def readme(self): self.readme = File('README', '.md'); self.d // self.readme",
"None: ret += f'{to.tab*depth}{to.comment} \\\\ {self}\\n' for i in self:",
"self.mk.tool self.mk.tool \\ // f'CURL = curl -L -o' \\",
"\\ // (S('install: $(OS)_install doc') // '$(MAKE) test' ) self.mk.install",
"return iter(self.nest) def __floordiv__(self, that): self.nest.append(self.box(that)); return self class Primitive(Object):",
"py(self): self.py = pyFile(f'{self}'); self.d // self.py self.py \\ //",
"= C; self.sup = sup def gen(self, to, depth=0): ret",
"task('project', 'install') // task('project', 'update') // task('git', 'dev') // task('git',",
"self.d // self.apt self.apt \\ // 'git make curl' //",
"= pad(depth) + self.head(prefix, test) # subtree return ret def",
"self.mk.var \\ // f'{\"MODULE\":<11} = $(notdir $(CURDIR))' \\ // f'{\"OS\":<11}",
"// self.mk.doc self.mk.doc \\ // S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc') self.mk.doc",
"pfx='\\n.PHONY: dev') // 'git push -v' // 'git checkout $@'",
"(S('\"files.associations\": {', '},'))) self.files = (Sec('files', pfx='') // self.exclude //",
"pfx=''); self.mk // self.mk.dir self.mk.dir \\ // f'{\"CWD\":<11} = $(CURDIR)'",
"to, depth=0): ret = '' if self.pfx is not None:",
"self.d // self.test self.test \\ // 'import pytest' \\ //",
"sync(self): self.readme() self.d.sync() def readme(self): self.readme = File('README', '.md'); self.d",
"grep processor /proc/cpuinfo | wc -l)' # self.mk.dir = Sec('dir',",
"\\ // f'PEP = $(shell which autopep8)' # self.mk.package =",
"= pfx; self.sfx = sfx def gen(self, to, depth=0): ret",
"= \\ (Sec() // (S('\"files.associations\": {', '},'))) self.files = (Sec('files',",
"box(self, that): if isinstance(that, Object): return that if isinstance(that, str):",
"assert isinstance(that, IO) that.path = f'{self.path}/{that.path}' return super().__floordiv__(that) def sync(self):",
"// '$(PYT) test_$(MODULE).py') # self.mk.rule = Sec('rule', pfx=''); self.mk //",
"doc/pyMorphic.pdf', pfx='.PHONY: doc') self.mk.doc \\ // (S('doc/pyMorphic.pdf:') // '$(CURL) $@",
"self.pfx is not None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.value is",
"$(CWD)/bin' \\ // f'{\"DOC\":<11} = $(CWD)/doc' \\ // f'{\"LIB\":<11} =",
"def multi(key, cmd): return (S('{', '},') // f'\"command\": \"multiCommand.{key}\",' //",
"// task('project', 'install') // task('project', 'update') // task('git', 'dev') //",
"f'{to.tab*depth}{self.pfx}\\n' if self.pfx else '\\n' if self.nest and self.value is",
"for i in [Object, S, Sec, IO, Dir, File, Meta,",
"'https://github.com/ponyatov' self.YEAR = 2020 self.LICENSE = 'All rights reserved' self.COPYRIGHT",
"# self.mk.src = Sec('src', pfx=''); self.mk // self.mk.src self.mk.src \\",
"# self.mk.install = Sec('install', pfx=''); self.mk // self.mk.install self.mk.install //",
"def gen(self, to, depth=0): ret = S(f'class {self}:', pfx='') //",
"'' # self.bin = Dir('bin'); self.d // self.bin def mk(self):",
"$(CWD)/src' \\ // f'{\"TMP\":<11} = $(CWD)/tmp' # self.mk.tool = Sec('tool',",
"'}') // '\"version\": \"2.0.0\",' // (S('\"tasks\": [', ']') // task('project',",
"'},') // f'\"command\": \"multiCommand.{key}\",' // (S('\"sequence\": [', ']') // '\"workbench.action.files.saveAll\",'",
"= $(shell which pytest)' \\ // f'PEP = $(shell which",
"\\ (Sec() // (S('\"files.exclude\": {', '},') // self.files)) self.watcher =",
"{', '},') // self.files)) self.watcher = \\ (Sec() // (S('\"files.watcherExclude\":",
"self.mk.rule # self.mk.doc = Sec('doc', pfx=''); self.mk // self.mk.doc self.mk.doc",
"\\ // f'{\"CORES\":<11} = $(shell grep processor /proc/cpuinfo | wc",
"not None: ret += f'{to.tab*depth}{self.end}\\n' if self.sfx is not None:",
"def head(self, prefix='', test=False): gid = '' if test else",
"self.LICENSE = 'All rights reserved' self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}>",
"'\"editor.tabSize\": 4,' // '\"editor.rulers\": [80],' // '\"workbench.tree.indent\": 32,' ) #",
"\\ // (S('{', '}') // '\"version\": \"2.0.0\",' // (S('\"tasks\": [',",
"= $(notdir $(CURDIR))' \\ // f'{\"OS\":<11} = $(shell uname -s)'",
"[80],' // '\"workbench.tree.indent\": 32,' ) # self.settings \\ // (S('{',",
"uname -s)' \\ // f'{\"CORES\":<11} = $(shell grep processor /proc/cpuinfo",
"(Sec() // (S('\"files.watcherExclude\": {', '},') // self.files)) self.assoc = \\",
"\\ // S('Project().sync()', pfx='') def test(self): self.test = pyFile(f'test_{self}'); self.d",
"return super().__floordiv__(that) def sync(self): try: os.mkdir(self.path) except FileExistsError: pass for",
"config(self): self.config = pyFile('config'); self.d // self.config self.config \\ //",
"Sec(); self.bot = Sec() self.tab = tab; self.comment = comment",
"pfx='.PHONY: meta') // '$(MAKE) test' // '$(PY) $(MODULE).py' // '$(PEP)",
"= Sec('rule', pfx=''); self.mk // self.mk.rule # self.mk.doc = Sec('doc',",
"f'\"problemMatcher\": []' ) self.tasks \\ // (S('{', '}') // '\"version\":",
"f'{self.value}' ## @name operator def __iter__(self): return iter(self.nest) def __floordiv__(self,",
"TypeError(['box', type(that), that]) ## @name dump / string def test(self):",
"update' self.mk.install \\ // (S('install: $(OS)_install doc') // '$(MAKE) test'",
"+= f'{to.tab*depth}{self.pfx}\\n' if self.value is not None: ret += f'{to.tab*depth}{self.value}\\n'",
"test=False): # head def pad(depth): return '\\n' + '\\t' *",
"__init__(self, V, ext='.json', comment='//'): super().__init__(V, ext, comment=comment) class Meta(Object): pass",
"\\ // f'{\"OS\":<11} = $(shell uname -s)' \\ // f'{\"CORES\":<11}",
"pfx=''); self.mk // self.mk.rule # self.mk.doc = Sec('doc', pfx=''); self.mk",
"S, Sec, IO, Dir, File, Meta, Class, Project]: self.py //",
"for i in self: ret += i.gen(to, depth + 1)",
"'git make curl' // 'code meld' \\ // 'python3 python3-venv'",
"// '*.log'; self.giti.top.sfx = '' self.giti // f'/{self}/' // '/__pycache__/'",
"return f'{prefix}<{self.tag()}:{self.val()}>{gid}' def __format__(self, spec=''): if not spec: return self.val()",
"f'\"{self}/**\":true,' ) self.exclude = \\ (Sec() // (S('\"files.exclude\": {', '},')",
"// self.mk.install self.mk.install // '.PHONY: install update' self.mk.install \\ //",
"def dump(self, cycle=[], depth=0, prefix='', test=False): # head def pad(depth):",
"self.exclude // self.watcher // self.assoc) # self.editor = (Sec('editor', pfx='')",
"= (Sec('editor', pfx='') // '\"editor.tabSize\": 4,' // '\"editor.rulers\": [80],' //",
"self.mk // self.mk.rule # self.mk.doc = Sec('doc', pfx=''); self.mk //",
"= curl -L -o' \\ // f'PY = $(shell which",
"Linux_update') // 'sudo apt update' // 'sudo apt install -u",
"// 'git push -v' // 'git checkout $(SHADOW)' ) self.mk.merge",
"ret def head(self, prefix='', test=False): gid = '' if test",
"def settings(self): self.settings = jsonFile('settings'); self.vscode // self.settings # def",
"pfx='\\n.PHONY: release') ) self.mk.merge \\ // (S('zip:', pfx='\\n.PHONY: zip') )",
"sync(self): try: os.mkdir(self.path) except FileExistsError: pass for i in self:",
"self.files)) self.assoc = \\ (Sec() // (S('\"files.associations\": {', '},'))) self.files",
"f'{\"MODULE\":<11} = $(notdir $(CURDIR))' \\ // f'{\"OS\":<11} = $(shell uname",
"return ret.gen(to, depth) class Project(Meta): def __init__(self, V=None, title='', about=''):",
"// f'{\"DOC\":<11} = $(CWD)/doc' \\ // f'{\"LIB\":<11} = $(CWD)/lib' \\",
"[Object]) self.py \\ // S('Project().sync()', pfx='') def test(self): self.test =",
"ret += f'{to.tab*depth}{self.end}\\n' if self.sfx is not None: ret +=",
"class jsonFile(File): def __init__(self, V, ext='.json', comment='//'): super().__init__(V, ext, comment=comment)",
"(S('\"files.watcherExclude\": {', '},') // self.files)) self.assoc = \\ (Sec() //",
"os, sys' for i in [Object, S, Sec, IO, Dir,",
"spec]) def tag(self): return self.__class__.__name__.lower() def val(self): return f'{self.value}' ##",
"pfx=''); self.mk // self.mk.tool self.mk.tool \\ // f'CURL = curl",
"self.pfx is not None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.pfx else",
"'python3 python3-venv' \\ // 'build-essential g++' def vscode(self): self.vscode =",
"'*~' // '*.swp' // '*.log'; self.giti.top.sfx = '' self.giti //",
"ext='', tab=' ' * 4, comment='#'): super().__init__(V + ext) self.top",
"for i in self: F.write(i.gen(self)) F.write(self.bot.gen(self)) class giti(File): def __init__(self,",
"// '$(PEP) --ignore=$(PEPS) --in-place $?') self.mk.all \\ // (S('test: $(Y)',",
"// (S('\"files.exclude\": {', '},') // self.files)) self.watcher = \\ (Sec()",
"that): assert isinstance(that, IO) that.path = f'{self.path}/{that.path}' return super().__floordiv__(that) def",
"= f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}' # self.dirs() self.mk() self.src()",
"class Project(Meta): def __init__(self, V=None, title='', about=''): if not V:",
"\\ // 'python3 python3-venv' \\ // 'build-essential g++' def vscode(self):",
"self.vscode = Dir('.vscode'); self.d // self.vscode self.settings() self.tasks() def settings(self):",
"// self.mk.all self.mk.all \\ // (S('meta: $(Y)', pfx='.PHONY: meta') //",
"Dir('bin'); self.d // self.bin def mk(self): self.mk = Makefile(); self.d",
"self.mk.doc \\ // (S('doc/pyMorphic.pdf:') // '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') # self.mk.install",
"test_any(): assert True' def dirs(self): self.d = Dir(f'{self}'); self.giti =",
"def __init__(self, V): super().__init__(V) self.path = V class Dir(IO): def",
"= '' # self.bin = Dir('bin'); self.d // self.bin def",
"'$(MAKE) test' ) self.mk.install \\ // (S('update: $(OS)_update doc') //",
"self.dirs() self.mk() self.src() self.vscode() self.apt() def apt(self): self.apt = File('apt',",
"\\ // (S('update: $(OS)_update doc') // '$(MAKE) test' ) self.mk.install",
"if isinstance(that, Object): return that if isinstance(that, str): return S(that)",
"S(Primitive): def __init__(self, V=None, end=None, pfx=None, sfx=None): super().__init__(V) self.end =",
"in self: i.sync() class File(IO): def __init__(self, V, ext='', tab='",
"self.TITLE = title if title else f'{self}' self.ABOUT = about",
"'\"workbench.tree.indent\": 32,' ) # self.settings \\ // (S('{', '}') //",
"// '\"editor.rulers\": [80],' // '\"workbench.tree.indent\": 32,' ) # self.settings \\",
"def task(clazz, cmd): return (S('{', '},') // f'\"label\": \"{clazz}: {cmd}\",'",
"pytest' \\ // f'from {self} import *' \\ // 'def",
"to, depth=0): ret = S(f'class {self}:', pfx='') // 'pass' return",
"4,' // '\"editor.rulers\": [80],' // '\"workbench.tree.indent\": 32,' ) # self.settings",
"val(self): return f'{self.value}' ## @name operator def __iter__(self): return iter(self.nest)",
"except FileExistsError: pass for i in self: i.sync() class File(IO):",
"self.settings() self.tasks() def settings(self): self.settings = jsonFile('settings'); self.vscode // self.settings",
"self.mk.dir self.mk.dir \\ // f'{\"CWD\":<11} = $(CURDIR)' \\ // f'{\"BIN\":<11}",
"Dir('.vscode'); self.d // self.vscode self.settings() self.tasks() def settings(self): self.settings =",
"// self.mk.rule # self.mk.doc = Sec('doc', pfx=''); self.mk // self.mk.doc",
"V=None, title='', about=''): if not V: V = os.getcwd().split('/')[-1] super().__init__(V)",
"+= config.py' \\ // f'S += $(Y)' # self.mk.cfg =",
"= 12345\" def py(self): self.py = pyFile(f'{self}'); self.d // self.py",
"$(shell which python3)' \\ // f'PYT = $(shell which pytest)'",
"return ret def head(self, prefix='', test=False): gid = '' if",
"test(self): self.test = pyFile(f'test_{self}'); self.d // self.test self.test \\ //",
"try: os.mkdir(self.path) except FileExistsError: pass for i in self: i.sync()",
"pfx='.PHONY: doc') self.mk.doc \\ // (S('doc/pyMorphic.pdf:') // '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf')",
"self.settings = jsonFile('settings'); self.vscode // self.settings # def multi(key, cmd):",
"tag(self): return self.__class__.__name__.lower() def val(self): return f'{self.value}' ## @name operator",
"__init__(self, V, ext='.py'): super().__init__(V, ext) class jsonFile(File): def __init__(self, V,",
"0) if self.nest and self.value is not None: ret +=",
"self: ret += i.gen(to, depth + 1) if self.end is",
") self.mk.merge \\ // (S('release:', pfx='\\n.PHONY: release') ) self.mk.merge \\",
"// f'{\"CWD\":<11} = $(CURDIR)' \\ // f'{\"BIN\":<11} = $(CWD)/bin' \\",
"depth=0): ret = '' if self.pfx is not None: ret",
"// f'P += config.py' \\ // f'S += $(Y)' #",
"// self.giti self.giti.top // '*~' // '*.swp' // '*.log'; self.giti.top.sfx",
"= Sec('install', pfx=''); self.mk // self.mk.install self.mk.install // '.PHONY: install",
"if self.pfx else '\\n' return ret class IO(Object): def __init__(self,",
"self.mk = Makefile(); self.d // self.mk # self.mk.var = Sec('var',",
"(Sec('editor', pfx='') // '\"editor.tabSize\": 4,' // '\"editor.rulers\": [80],' // '\"workbench.tree.indent\":",
"self.mk.src \\ // f'Y += $(MODULE).py test_$(MODULE).py' \\ // f'P",
"2020 self.LICENSE = 'All rights reserved' self.COPYRIGHT = f'(c) {self.AUTHOR}",
"// f'from {self} import *' \\ // 'def test_any(): assert",
"'*.swp' // '*.log'; self.giti.top.sfx = '' self.giti // f'/{self}/' //",
"\\ // f'PEPS = E26,E302,E305,E401,E402,E701,E702' # self.mk.all = Sec('all', pfx='');",
"self.py() self.test() self.config() def config(self): self.config = pyFile('config'); self.d //",
"self.mk.package \\ // f'SYSLINUX_VER = 6.0.3' # self.mk.src = Sec('src',",
"return self class Primitive(Object): pass class S(Primitive): def __init__(self, V=None,",
"sfx def gen(self, to, depth=0): ret = '' if self.pfx",
"super().__floordiv__(that) def sync(self): try: os.mkdir(self.path) except FileExistsError: pass for i",
"self.mk.cfg = Sec('cfg', pfx=''); self.mk // self.mk.cfg self.mk.cfg \\ //",
"Sec('all', pfx=''); self.mk // self.mk.all self.mk.all \\ // (S('meta: $(Y)',",
"pfx='\\n.PHONY: test') // '$(PYT) test_$(MODULE).py') # self.mk.rule = Sec('rule', pfx='');",
"// (S('zip:', pfx='\\n.PHONY: zip') ) def sync(self): self.readme() self.d.sync() def",
"super().__init__(V, ext) class jsonFile(File): def __init__(self, V, ext='.json', comment='//'): super().__init__(V,",
"/ string def test(self): return self.dump(test=True) def __repr__(self): return self.dump(test=False)",
"dev') // 'git push -v' // 'git checkout $@' //",
"$(OS)_update doc') // '$(MAKE) test' ) self.mk.install \\ // (S('Linux_install",
"self.mk // self.mk.all self.mk.all \\ // (S('meta: $(Y)', pfx='.PHONY: meta')",
"os.mkdir(self.path) except FileExistsError: pass for i in self: i.sync() class",
") self.tasks \\ // (S('{', '}') // '\"version\": \"2.0.0\",' //",
"ret = pad(depth) + self.head(prefix, test) # subtree return ret",
"def tasks(self): self.tasks = jsonFile('tasks'); self.vscode // self.tasks def task(clazz,",
"f'{to.tab*depth}{to.comment} \\\\ {self}\\n' for i in self: ret += i.gen(to,",
"self.value is not None: ret += f'{to.tab*depth}{self.value}\\n' for i in",
"+= $(Y)' # self.mk.cfg = Sec('cfg', pfx=''); self.mk // self.mk.cfg",
"\"2.0.0\",' // (S('\"tasks\": [', ']') // task('project', 'install') // task('project',",
"class pyFile(File): def __init__(self, V, ext='.py'): super().__init__(V, ext) class jsonFile(File):",
"subtree return ret def head(self, prefix='', test=False): gid = ''",
"\\ // f'{\"LIB\":<11} = $(CWD)/lib' \\ // f'{\"SRC\":<11} = $(CWD)/src'",
"'MERGE += .vscode bin doc lib src tmp' self.mk.merge \\",
"f'{to.tab*depth}{self.end}\\n' if self.sfx is not None: ret += f'{to.tab*depth}{self.sfx}\\n' return",
"\\ // f'PY = $(shell which python3)' \\ // f'PYT",
"self.d // self.readme self.readme \\ // f'#  `{self}`' //",
"// (S('\"sequence\": [', ']') // '\"workbench.action.files.saveAll\",' // (S('{\"command\": \"workbench.action.terminal.sendSequence\",') //",
"self.assoc) # self.editor = (Sec('editor', pfx='') // '\"editor.tabSize\": 4,' //",
"readme(self): self.readme = File('README', '.md'); self.d // self.readme self.readme \\",
"// f'{\"BIN\":<11} = $(CWD)/bin' \\ // f'{\"DOC\":<11} = $(CWD)/doc' \\",
"pfx='') // self.exclude // self.watcher // self.assoc) # self.editor =",
"// Class(Primitive, [Object]) self.py \\ // S('Project().sync()', pfx='') def test(self):",
"Project(Meta): def __init__(self, V=None, title='', about=''): if not V: V",
"f'CURL = curl -L -o' \\ // f'PY = $(shell",
"assert callable(C) super().__init__(C.__name__) self.clazz = C; self.sup = sup def",
"def test(self): self.test = pyFile(f'test_{self}'); self.d // self.test self.test \\",
"self.mk.install // '.PHONY: install update' self.mk.install \\ // (S('install: $(OS)_install",
"self.apt() def apt(self): self.apt = File('apt', '.txt'); self.d // self.apt",
"meld' \\ // 'python3 python3-venv' \\ // 'build-essential g++' def",
"// task('project', 'update') // task('git', 'dev') // task('git', 'shadow') ))",
"// self.mk.dir self.mk.dir \\ // f'{\"CWD\":<11} = $(CURDIR)' \\ //",
"'\\t' * depth ret = pad(depth) + self.head(prefix, test) #",
"multi('f11', 'make meta') // multi('f12', 'make all') )) # self.files",
"str): return S(that) raise TypeError(['box', type(that), that]) ## @name dump",
"// f'\"label\": \"{clazz}: {cmd}\",' // f'\"type\": \"shell\",' // f'\"command\": \"make",
"# self.mk.doc = Sec('doc', pfx=''); self.mk // self.mk.doc self.mk.doc \\",
"// Class(i) self.py // Class(Primitive, [Object]) self.py \\ // S('Project().sync()',",
"$(CURDIR))' \\ // f'{\"OS\":<11} = $(shell uname -s)' \\ //",
"// self.test self.test \\ // 'import pytest' \\ // f'from",
"\\ // (S('shadow:', pfx='\\n.PHONY: shadow') // 'git push -v' //",
"i.gen(to, depth + 1) if self.end is not None: ret",
"sfx=None): super().__init__(V) self.end = end; self.pfx = pfx; self.sfx =",
"# self.dirs() self.mk() self.src() self.vscode() self.apt() def apt(self): self.apt =",
"'$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') # self.mk.install = Sec('install', pfx=''); self.mk //",
"// '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') # self.mk.install = Sec('install', pfx=''); self.mk",
"// 'git make curl' // 'code meld' \\ // 'python3",
"{self}:', pfx='') // 'pass' return ret.gen(to, depth) class Project(Meta): def",
"self.watcher = \\ (Sec() // (S('\"files.watcherExclude\": {', '},') // self.files))",
"f'{prefix}<{self.tag()}:{self.val()}>{gid}' def __format__(self, spec=''): if not spec: return self.val() raise",
"# self.mk.var = Sec('var', pfx=''); self.mk // self.mk.var self.mk.var \\",
"[', ']') // '\"workbench.action.files.saveAll\",' // (S('{\"command\": \"workbench.action.terminal.sendSequence\",') // f'\"args\": {{\"text\":",
"\\ // (S('meta: $(Y)', pfx='.PHONY: meta') // '$(MAKE) test' //",
"= pyFile('config'); self.d // self.config self.config \\ // f\"{'SECURE_KEY':<11} =",
"class Meta(Object): pass class Class(Meta): def __init__(self, C, sup=[]): assert",
"self.value is not None: ret += f'{to.tab*depth}{to.comment} / {self}\\n' if",
"@name constructor def __init__(self, V): self.value = V self.nest =",
"self.tasks \\ // (S('{', '}') // '\"version\": \"2.0.0\",' // (S('\"tasks\":",
"class giti(File): def __init__(self, V='.gitignore'): super().__init__(V) self.bot // f'!{self}' class",
"self.mk.dir \\ // f'{\"CWD\":<11} = $(CURDIR)' \\ // f'{\"BIN\":<11} =",
"self.settings \\ // (S('{', '}') // self.multi // self.files //",
"return self.val() raise TypeError(['__format__', spec]) def tag(self): return self.__class__.__name__.lower() def",
"if self.nest and self.value is not None: ret += f'{to.tab*depth}{to.comment}",
"Project]: self.py // Class(i) self.py // Class(Primitive, [Object]) self.py \\",
"'\\n' if self.nest and self.value is not None: ret +=",
"// multi('f12', 'make all') )) # self.files = (Sec() //",
"= \\ (Sec() // (S('\"files.exclude\": {', '},') // self.files)) self.watcher",
"assert True' def dirs(self): self.d = Dir(f'{self}'); self.giti = giti();",
"Class, Project]: self.py // Class(i) self.py // Class(Primitive, [Object]) self.py",
"$(CURDIR)' \\ // f'{\"BIN\":<11} = $(CWD)/bin' \\ // f'{\"DOC\":<11} =",
"self.mk.cfg self.mk.cfg \\ // f'PEPS = E26,E302,E305,E401,E402,E701,E702' # self.mk.all =",
"\\ // S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc') self.mk.doc \\ // (S('doc/pyMorphic.pdf:')",
"def src(self): self.py() self.test() self.config() def config(self): self.config = pyFile('config');",
"= '' self.giti // f'/{self}/' // '/__pycache__/' self.giti.bot.pfx = ''",
"(S('shadow:', pfx='\\n.PHONY: shadow') // 'git push -v' // 'git checkout",
"// '' // self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}' self.readme",
") self.mk.install \\ // (S('Linux_install Linux_update:', pfx='.PHONY: Linux_install Linux_update') //",
"end; self.pfx = pfx; self.sfx = sfx def gen(self, to,",
") def sync(self): self.readme() self.d.sync() def readme(self): self.readme = File('README',",
"apt.txt $(S)' \\ // 'MERGE += .vscode bin doc lib",
"// f'github: {self.GITHUB}/{self}' self.readme // self.ABOUT Project( title='ViZual language environment',",
"f'{to.tab*depth}{self.sfx}\\n' return ret class Sec(S): def gen(self, to, depth=0): ret",
"+= i.gen(to, depth + 1) if self.end is not None:",
"F.write(i.gen(self)) F.write(self.bot.gen(self)) class giti(File): def __init__(self, V='.gitignore'): super().__init__(V) self.bot //",
"= pyFile(f'{self}'); self.d // self.py self.py \\ // 'import os,",
"V): self.value = V self.nest = [] def box(self, that):",
"f'{to.tab*depth}{to.comment} / {self}\\n' if self.sfx is not None: ret +=",
"raise TypeError(['__format__', spec]) def tag(self): return self.__class__.__name__.lower() def val(self): return",
"// f\"{'PORT':<11} = 12345\" def py(self): self.py = pyFile(f'{self}'); self.d",
"Class(i) self.py // Class(Primitive, [Object]) self.py \\ // S('Project().sync()', pfx='')",
"self.d // self.bin def mk(self): self.mk = Makefile(); self.d //",
"'\"version\": \"2.0.0\",' // (S('\"tasks\": [', ']') // task('project', 'install') //",
"V): super().__init__(V) self.path = V class Dir(IO): def __floordiv__(self, that):",
"'install') // task('project', 'update') // task('git', 'dev') // task('git', 'shadow')",
"self.giti = giti(); self.d // self.giti self.giti.top // '*~' //",
"python3-venv' \\ // 'build-essential g++' def vscode(self): self.vscode = Dir('.vscode');",
"i in self: ret += i.gen(to, depth + 1) if",
"File('README', '.md'); self.d // self.readme self.readme \\ // f'# ",
"else '\\n' if self.nest and self.value is not None: ret",
"= $(CWD)/tmp' # self.mk.tool = Sec('tool', pfx=''); self.mk // self.mk.tool",
"'},') // self.files)) self.assoc = \\ (Sec() // (S('\"files.associations\": {',",
"test_$(MODULE).py') # self.mk.rule = Sec('rule', pfx=''); self.mk // self.mk.rule #",
"return ret class Sec(S): def gen(self, to, depth=0): ret =",
"{', '},'))) self.files = (Sec('files', pfx='') // self.exclude // self.watcher",
"// f'{\"LIB\":<11} = $(CWD)/lib' \\ // f'{\"SRC\":<11} = $(CWD)/src' \\",
"(S('update: $(OS)_update doc') // '$(MAKE) test' ) self.mk.install \\ //",
"def test(self): return self.dump(test=True) def __repr__(self): return self.dump(test=False) def dump(self,",
"self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}' # self.dirs() self.mk()",
"'' if test else f' @{id(self):x}' return f'{prefix}<{self.tag()}:{self.val()}>{gid}' def __format__(self,",
"None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.pfx else '\\n' if self.nest",
"\\ // 'build-essential g++' def vscode(self): self.vscode = Dir('.vscode'); self.d",
"pfx='') // '\"editor.tabSize\": 4,' // '\"editor.rulers\": [80],' // '\"workbench.tree.indent\": 32,'",
"// (S('meta: $(Y)', pfx='.PHONY: meta') // '$(MAKE) test' // '$(PY)",
"self.editor) def tasks(self): self.tasks = jsonFile('tasks'); self.vscode // self.tasks def",
"'$(PY) $(MODULE).py' // '$(PEP) --ignore=$(PEPS) --in-place $?') self.mk.all \\ //",
"\\ // f'{\"TMP\":<11} = $(CWD)/tmp' # self.mk.tool = Sec('tool', pfx='');",
"return (S('{', '},') // f'\"command\": \"multiCommand.{key}\",' // (S('\"sequence\": [', ']')",
"self.mk // self.mk.src self.mk.src \\ // f'Y += $(MODULE).py test_$(MODULE).py'",
"self.mk.all \\ // (S('meta: $(Y)', pfx='.PHONY: meta') // '$(MAKE) test'",
"# head def pad(depth): return '\\n' + '\\t' * depth",
"V=None, end=None, pfx=None, sfx=None): super().__init__(V) self.end = end; self.pfx =",
"depth + 0) if self.nest and self.value is not None:",
"F.write(self.top.gen(self)) for i in self: F.write(i.gen(self)) F.write(self.bot.gen(self)) class giti(File): def",
"# self.mk.all = Sec('all', pfx=''); self.mk // self.mk.all self.mk.all \\",
"def __init__(self, V=None, end=None, pfx=None, sfx=None): super().__init__(V) self.end = end;",
"def tag(self): return self.__class__.__name__.lower() def val(self): return f'{self.value}' ## @name",
"= pyFile(f'test_{self}'); self.d // self.test self.test \\ // 'import pytest'",
"pfx=''); self.mk // self.mk.all self.mk.all \\ // (S('meta: $(Y)', pfx='.PHONY:",
"self.nest.append(self.box(that)); return self class Primitive(Object): pass class S(Primitive): def __init__(self,",
"self.d // self.vscode self.settings() self.tasks() def settings(self): self.settings = jsonFile('settings');",
"$(CWD)/tmp' # self.mk.tool = Sec('tool', pfx=''); self.mk // self.mk.tool self.mk.tool",
"// task('git', 'dev') // task('git', 'shadow') )) def src(self): self.py()",
"// self.mk.src self.mk.src \\ // f'Y += $(MODULE).py test_$(MODULE).py' \\",
"S(f'class {self}:', pfx='') // 'pass' return ret.gen(to, depth) class Project(Meta):",
"self.tab = tab; self.comment = comment def sync(self): with open(self.path,",
"F: F.write(self.top.gen(self)) for i in self: F.write(i.gen(self)) F.write(self.bot.gen(self)) class giti(File):",
"Dir(f'{self}'); self.giti = giti(); self.d // self.giti self.giti.top // '*~'",
"'' // self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}' self.readme //",
"pyFile(f'{self}'); self.d // self.py self.py \\ // 'import os, sys'",
"\\ // f'{\"BIN\":<11} = $(CWD)/bin' \\ // f'{\"DOC\":<11} = $(CWD)/doc'",
"tab=' ' * 4, comment='#'): super().__init__(V + ext) self.top =",
"title if title else f'{self}' self.ABOUT = about self.AUTHOR =",
"is not None: ret += f'{to.tab*depth}{self.sfx}\\n' if self.pfx else '\\n'",
"'git checkout $(SHADOW)' ) self.mk.merge \\ // (S('release:', pfx='\\n.PHONY: release')",
"self.readme // self.ABOUT Project( title='ViZual language environment', about=''' * object",
"self.mk.var self.mk.var \\ // f'{\"MODULE\":<11} = $(notdir $(CURDIR))' \\ //",
"self.mk.dir = Sec('dir', pfx=''); self.mk // self.mk.dir self.mk.dir \\ //",
") # self.settings \\ // (S('{', '}') // self.multi //",
"ponymuck' self.mk.merge \\ // 'MERGE = Makefile .gitignore README.md apt.txt",
"@name operator def __iter__(self): return iter(self.nest) def __floordiv__(self, that): self.nest.append(self.box(that));",
"{cmd} \\\\u000D\"}}}}' ))) self.multi = \\ (Sec('multi') // (S('\"multiCommand.commands\": [',",
"{os.urandom(0x22)}\" \\ // f\"{'HOST':<11} = '127..0.0.1'\" \\ // f\"{'PORT':<11} =",
"self.files = (Sec() // f'\"{self}/**\":true,' ) self.exclude = \\ (Sec()",
"self: ret += i.gen(to, depth + 0) if self.nest and",
"__init__(self, V): self.value = V self.nest = [] def box(self,",
"= Sec() self.tab = tab; self.comment = comment def sync(self):",
"// (S('update: $(OS)_update doc') // '$(MAKE) test' ) self.mk.install \\",
"True' def dirs(self): self.d = Dir(f'{self}'); self.giti = giti(); self.d",
"f' @{id(self):x}' return f'{prefix}<{self.tag()}:{self.val()}>{gid}' def __format__(self, spec=''): if not spec:",
"# self.files = (Sec() // f'\"{self}/**\":true,' ) self.exclude = \\",
"(S('test: $(Y)', pfx='\\n.PHONY: test') // '$(PYT) test_$(MODULE).py') # self.mk.rule =",
"self.giti.top // '*~' // '*.swp' // '*.log'; self.giti.top.sfx = ''",
"config.py' \\ // f'S += $(Y)' # self.mk.cfg = Sec('cfg',",
"+= f'{to.tab*depth}{to.comment} / {self}\\n' if self.sfx is not None: ret",
"self.readme() self.d.sync() def readme(self): self.readme = File('README', '.md'); self.d //",
"(S('\"multiCommand.commands\": [', '],') // multi('f11', 'make meta') // multi('f12', 'make",
"-- $(MERGE)' ) self.mk.merge \\ // (S('shadow:', pfx='\\n.PHONY: shadow') //",
"return self.dump(test=True) def __repr__(self): return self.dump(test=False) def dump(self, cycle=[], depth=0,",
"// '/__pycache__/' self.giti.bot.pfx = '' # self.bin = Dir('bin'); self.d",
"[]' ) self.tasks \\ // (S('{', '}') // '\"version\": \"2.0.0\",'",
"that]) ## @name dump / string def test(self): return self.dump(test=True)",
"= '127..0.0.1'\" \\ // f\"{'PORT':<11} = 12345\" def py(self): self.py",
"push -v' // 'git checkout $@' // 'git checkout $(SHADOW)",
"super().__init__(V) self.end = end; self.pfx = pfx; self.sfx = sfx",
"Makefile .gitignore README.md apt.txt $(S)' \\ // 'MERGE += .vscode",
"// 'git push -v' // 'git checkout $@' // 'git",
"self.dump(test=True) def __repr__(self): return self.dump(test=False) def dump(self, cycle=[], depth=0, prefix='',",
"is not None: ret += f'{to.tab*depth}{to.comment} \\\\ {self}\\n' for i",
"def sync(self): try: os.mkdir(self.path) except FileExistsError: pass for i in",
"pyFile(f'test_{self}'); self.d // self.test self.test \\ // 'import pytest' \\",
"{self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}' # self.dirs() self.mk() self.src() self.vscode() self.apt()",
"with open(self.path, 'w') as F: F.write(self.top.gen(self)) for i in self:",
"__floordiv__(self, that): self.nest.append(self.box(that)); return self class Primitive(Object): pass class S(Primitive):",
"def val(self): return f'{self.value}' ## @name operator def __iter__(self): return",
"f\"{'HOST':<11} = '127..0.0.1'\" \\ // f\"{'PORT':<11} = 12345\" def py(self):",
"self.vscode // self.tasks def task(clazz, cmd): return (S('{', '},') //",
"Sec('package', pfx=''); self.mk // self.mk.package self.mk.package \\ // f'SYSLINUX_VER =",
"not None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.value is not None:",
"// f'\"command\": \"make {cmd}\",' // f'\"problemMatcher\": []' ) self.tasks \\",
"jsonFile(File): def __init__(self, V, ext='.json', comment='//'): super().__init__(V, ext, comment=comment) class",
"C, sup=[]): assert callable(C) super().__init__(C.__name__) self.clazz = C; self.sup =",
"ret += i.gen(to, depth + 1) if self.end is not",
"return f'{self.value}' ## @name operator def __iter__(self): return iter(self.nest) def",
"(S('{', '}') // '\"version\": \"2.0.0\",' // (S('\"tasks\": [', ']') //",
"self.mk.merge \\ // (S('dev:', pfx='\\n.PHONY: dev') // 'git push -v'",
"= jsonFile('tasks'); self.vscode // self.tasks def task(clazz, cmd): return (S('{',",
"self.assoc = \\ (Sec() // (S('\"files.associations\": {', '},'))) self.files =",
"= tab; self.comment = comment def sync(self): with open(self.path, 'w')",
"processor /proc/cpuinfo | wc -l)' # self.mk.dir = Sec('dir', pfx='');",
"self.d = Dir(f'{self}'); self.giti = giti(); self.d // self.giti self.giti.top",
"cmd): return (S('{', '},') // f'\"command\": \"multiCommand.{key}\",' // (S('\"sequence\": [',",
"None: ret += f'{to.tab*depth}{self.end}\\n' if self.sfx is not None: ret",
"= Sec('all', pfx=''); self.mk // self.mk.all self.mk.all \\ // (S('meta:",
"$@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf') # self.mk.install = Sec('install', pfx=''); self.mk // self.mk.install",
"\\ (Sec('multi') // (S('\"multiCommand.commands\": [', '],') // multi('f11', 'make meta')",
"class S(Primitive): def __init__(self, V=None, end=None, pfx=None, sfx=None): super().__init__(V) self.end",
"self.top = Sec(); self.bot = Sec() self.tab = tab; self.comment",
"$@' // 'git checkout $(SHADOW) -- $(MERGE)' ) self.mk.merge \\",
"'git checkout $@' // 'git checkout $(SHADOW) -- $(MERGE)' )",
"class IO(Object): def __init__(self, V): super().__init__(V) self.path = V class",
"'$(PYT) test_$(MODULE).py') # self.mk.rule = Sec('rule', pfx=''); self.mk // self.mk.rule",
"// 'build-essential g++' def vscode(self): self.vscode = Dir('.vscode'); self.d //",
"not None: ret += f'{to.tab*depth}{to.comment} \\\\ {self}\\n' for i in",
"'/__pycache__/' self.giti.bot.pfx = '' # self.bin = Dir('bin'); self.d //",
"i.gen(to, depth + 0) if self.nest and self.value is not",
"gen(self, to, depth=0): ret = S(f'class {self}:', pfx='') // 'pass'",
"(S('\"sequence\": [', ']') // '\"workbench.action.files.saveAll\",' // (S('{\"command\": \"workbench.action.terminal.sendSequence\",') // f'\"args\":",
"= File('apt', '.txt'); self.d // self.apt self.apt \\ // 'git",
"self.tasks def task(clazz, cmd): return (S('{', '},') // f'\"label\": \"{clazz}:",
"spec=''): if not spec: return self.val() raise TypeError(['__format__', spec]) def",
"is not None: ret += f'{to.tab*depth}{self.sfx}\\n' return ret class Sec(S):",
"= jsonFile('settings'); self.vscode // self.settings # def multi(key, cmd): return",
"task('git', 'dev') // task('git', 'shadow') )) def src(self): self.py() self.test()",
"f'## {self.TITLE}' self.readme \\ // '' // self.COPYRIGHT // ''",
"'' if self.pfx is not None: ret += f'{to.tab*depth}{self.pfx}\\n' if",
"__iter__(self): return iter(self.nest) def __floordiv__(self, that): self.nest.append(self.box(that)); return self class",
"6.0.3' # self.mk.src = Sec('src', pfx=''); self.mk // self.mk.src self.mk.src",
"cycle=[], depth=0, prefix='', test=False): # head def pad(depth): return '\\n'",
"// self.config self.config \\ // f\"{'SECURE_KEY':<11} = {os.urandom(0x22)}\" \\ //",
"// 'MERGE += .vscode bin doc lib src tmp' self.mk.merge",
"// '.PHONY: install update' self.mk.install \\ // (S('install: $(OS)_install doc')",
"self.mk.src = Sec('src', pfx=''); self.mk // self.mk.src self.mk.src \\ //",
"ret = '' if self.pfx is not None: ret +=",
"']') // '\"workbench.action.files.saveAll\",' // (S('{\"command\": \"workbench.action.terminal.sendSequence\",') // f'\"args\": {{\"text\": \"\\\\u000D",
"pad(depth): return '\\n' + '\\t' * depth ret = pad(depth)",
"\\\\u000D\"}}}}' ))) self.multi = \\ (Sec('multi') // (S('\"multiCommand.commands\": [', '],')",
"// self.multi // self.files // self.editor) def tasks(self): self.tasks =",
"isinstance(that, Object): return that if isinstance(that, str): return S(that) raise",
"'$(MAKE) test' // '$(PY) $(MODULE).py' // '$(PEP) --ignore=$(PEPS) --in-place $?')",
"'},'))) self.files = (Sec('files', pfx='') // self.exclude // self.watcher //",
"// 'git checkout $@' // 'git checkout $(SHADOW) -- $(MERGE)'",
"= Sec('package', pfx=''); self.mk // self.mk.package self.mk.package \\ // f'SYSLINUX_VER",
"sync(self): with open(self.path, 'w') as F: F.write(self.top.gen(self)) for i in",
"self.mk.merge \\ // 'SHADOW ?= ponymuck' self.mk.merge \\ // 'MERGE",
"self.apt \\ // 'git make curl' // 'code meld' \\",
"f'\"args\": {{\"text\": \"\\\\u000D {cmd} \\\\u000D\"}}}}' ))) self.multi = \\ (Sec('multi')",
"= (Sec('files', pfx='') // self.exclude // self.watcher // self.assoc) #",
"// 'SHADOW ?= ponymuck' self.mk.merge \\ // 'MERGE = Makefile",
"self.mk.tool \\ // f'CURL = curl -L -o' \\ //",
"'\\n' return ret class IO(Object): def __init__(self, V): super().__init__(V) self.path",
"comment='#'): super().__init__(V + ext) self.top = Sec(); self.bot = Sec()",
"tab='\\t') class pyFile(File): def __init__(self, V, ext='.py'): super().__init__(V, ext) class",
"FileExistsError: pass for i in self: i.sync() class File(IO): def",
"(S('\"files.exclude\": {', '},') // self.files)) self.watcher = \\ (Sec() //",
"self.test() self.config() def config(self): self.config = pyFile('config'); self.d // self.config",
"+ ext) self.top = Sec(); self.bot = Sec() self.tab =",
"self.EMAIL = '<EMAIL>' self.GITHUB = 'https://github.com/ponyatov' self.YEAR = 2020 self.LICENSE",
"Meta, Class, Project]: self.py // Class(i) self.py // Class(Primitive, [Object])",
"'\\n' + '\\t' * depth ret = pad(depth) + self.head(prefix,",
"in self: F.write(i.gen(self)) F.write(self.bot.gen(self)) class giti(File): def __init__(self, V='.gitignore'): super().__init__(V)",
"+ 1) if self.end is not None: ret += f'{to.tab*depth}{self.end}\\n'",
"self.mk() self.src() self.vscode() self.apt() def apt(self): self.apt = File('apt', '.txt');",
"task(clazz, cmd): return (S('{', '},') // f'\"label\": \"{clazz}: {cmd}\",' //",
"= Sec('dir', pfx=''); self.mk // self.mk.dir self.mk.dir \\ // f'{\"CWD\":<11}",
"= 'All rights reserved' self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR}",
"$(Y)', pfx='\\n.PHONY: test') // '$(PYT) test_$(MODULE).py') # self.mk.rule = Sec('rule',",
"// 'git checkout $(SHADOW) -- $(MERGE)' ) self.mk.merge \\ //",
"prefix='', test=False): gid = '' if test else f' @{id(self):x}'",
"self.mk.all = Sec('all', pfx=''); self.mk // self.mk.all self.mk.all \\ //",
"if self.sfx is not None: ret += f'{to.tab*depth}{self.sfx}\\n' if self.pfx",
"self.tasks() def settings(self): self.settings = jsonFile('settings'); self.vscode // self.settings #",
"(Sec() // f'\"{self}/**\":true,' ) self.exclude = \\ (Sec() // (S('\"files.exclude\":",
"test=False): gid = '' if test else f' @{id(self):x}' return",
"// (S('{\"command\": \"workbench.action.terminal.sendSequence\",') // f'\"args\": {{\"text\": \"\\\\u000D {cmd} \\\\u000D\"}}}}' )))",
"f'from {self} import *' \\ // 'def test_any(): assert True'",
"# self.mk.cfg = Sec('cfg', pfx=''); self.mk // self.mk.cfg self.mk.cfg \\",
"open(self.path, 'w') as F: F.write(self.top.gen(self)) for i in self: F.write(i.gen(self))",
"tasks(self): self.tasks = jsonFile('tasks'); self.vscode // self.tasks def task(clazz, cmd):",
"= Makefile .gitignore README.md apt.txt $(S)' \\ // 'MERGE +=",
"if self.end is not None: ret += f'{to.tab*depth}{self.end}\\n' if self.sfx",
"// self.readme self.readme \\ // f'#  `{self}`' // f'##",
"// 'import pytest' \\ // f'from {self} import *' \\",
"\"{clazz}: {cmd}\",' // f'\"type\": \"shell\",' // f'\"command\": \"make {cmd}\",' //",
"// self.mk.merge self.mk.merge \\ // 'SHADOW ?= ponymuck' self.mk.merge \\",
"'w') as F: F.write(self.top.gen(self)) for i in self: F.write(i.gen(self)) F.write(self.bot.gen(self))",
"$(shell uname -s)' \\ // f'{\"CORES\":<11} = $(shell grep processor",
"self.mk.tool = Sec('tool', pfx=''); self.mk // self.mk.tool self.mk.tool \\ //",
"// f'!{self}' class Makefile(File): def __init__(self, V='Makefile'): super().__init__(V, tab='\\t') class",
"__init__(self, V='Makefile'): super().__init__(V, tab='\\t') class pyFile(File): def __init__(self, V, ext='.py'):",
"// multi('f11', 'make meta') // multi('f12', 'make all') )) #",
"self.nest and self.value is not None: ret += f'{to.tab*depth}{to.comment} /",
"f'{self}' self.ABOUT = about self.AUTHOR = '<NAME>' self.EMAIL = '<EMAIL>'",
"// f'\"args\": {{\"text\": \"\\\\u000D {cmd} \\\\u000D\"}}}}' ))) self.multi = \\",
"= $(CWD)/src' \\ // f'{\"TMP\":<11} = $(CWD)/tmp' # self.mk.tool =",
"self.editor = (Sec('editor', pfx='') // '\"editor.tabSize\": 4,' // '\"editor.rulers\": [80],'",
"f'{self.path}/{that.path}' return super().__floordiv__(that) def sync(self): try: os.mkdir(self.path) except FileExistsError: pass",
"def __init__(self, V='.gitignore'): super().__init__(V) self.bot // f'!{self}' class Makefile(File): def",
"// (S('\"files.associations\": {', '},'))) self.files = (Sec('files', pfx='') // self.exclude",
"'import pytest' \\ // f'from {self} import *' \\ //",
"ret += f'{to.tab*depth}{self.pfx}\\n' if self.pfx else '\\n' if self.nest and",
"sup=[]): assert callable(C) super().__init__(C.__name__) self.clazz = C; self.sup = sup",
"string def test(self): return self.dump(test=True) def __repr__(self): return self.dump(test=False) def",
"test_$(MODULE).py' \\ // f'P += config.py' \\ // f'S +=",
"rights reserved' self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}' #",
"super().__init__(V + ext) self.top = Sec(); self.bot = Sec() self.tab",
"self.end = end; self.pfx = pfx; self.sfx = sfx def",
"in self: ret += i.gen(to, depth + 1) if self.end",
"pfx=''); self.mk // self.mk.cfg self.mk.cfg \\ // f'PEPS = E26,E302,E305,E401,E402,E701,E702'",
"doc') // '$(MAKE) test' ) self.mk.install \\ // (S('update: $(OS)_update",
"f'{to.tab*depth}{self.pfx}\\n' if self.value is not None: ret += f'{to.tab*depth}{self.value}\\n' for",
"self.readme = File('README', '.md'); self.d // self.readme self.readme \\ //",
"self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}' self.readme // self.ABOUT Project(",
"is not None: ret += f'{to.tab*depth}{self.value}\\n' for i in self:",
"self.mk.doc self.mk.doc \\ // S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc') self.mk.doc \\",
"ext, comment=comment) class Meta(Object): pass class Class(Meta): def __init__(self, C,",
"\\ // f\"{'PORT':<11} = 12345\" def py(self): self.py = pyFile(f'{self}');",
"self.src() self.vscode() self.apt() def apt(self): self.apt = File('apt', '.txt'); self.d",
"$(Y)', pfx='.PHONY: meta') // '$(MAKE) test' // '$(PY) $(MODULE).py' //",
"// (S('install: $(OS)_install doc') // '$(MAKE) test' ) self.mk.install \\",
"# def multi(key, cmd): return (S('{', '},') // f'\"command\": \"multiCommand.{key}\",'",
"\\ // f'Y += $(MODULE).py test_$(MODULE).py' \\ // f'P +=",
"__init__(self, V): super().__init__(V) self.path = V class Dir(IO): def __floordiv__(self,",
"\\ (Sec() // (S('\"files.watcherExclude\": {', '},') // self.files)) self.assoc =",
"is not None: ret += f'{to.tab*depth}{self.pfx}\\n' if self.value is not",
"{', '},') // self.files)) self.assoc = \\ (Sec() // (S('\"files.associations\":",
"\\ // 'git make curl' // 'code meld' \\ //",
"not V: V = os.getcwd().split('/')[-1] super().__init__(V) # self.TITLE = title",
"// self.files)) self.assoc = \\ (Sec() // (S('\"files.associations\": {', '},')))",
"test' ) self.mk.install \\ // (S('update: $(OS)_update doc') // '$(MAKE)",
"f'PY = $(shell which python3)' \\ // f'PYT = $(shell",
"// self.mk.var self.mk.var \\ // f'{\"MODULE\":<11} = $(notdir $(CURDIR))' \\",
"Primitive(Object): pass class S(Primitive): def __init__(self, V=None, end=None, pfx=None, sfx=None):",
"src(self): self.py() self.test() self.config() def config(self): self.config = pyFile('config'); self.d",
"// (S('Linux_install Linux_update:', pfx='.PHONY: Linux_install Linux_update') // 'sudo apt update'",
"and self.value is not None: ret += f'{to.tab*depth}{to.comment} \\\\ {self}\\n'",
"# self.TITLE = title if title else f'{self}' self.ABOUT =",
"= '<NAME>' self.EMAIL = '<EMAIL>' self.GITHUB = 'https://github.com/ponyatov' self.YEAR =",
"12345\" def py(self): self.py = pyFile(f'{self}'); self.d // self.py self.py",
"+= $(MODULE).py test_$(MODULE).py' \\ // f'P += config.py' \\ //",
"f'Y += $(MODULE).py test_$(MODULE).py' \\ // f'P += config.py' \\",
"pfx=''); self.mk // self.mk.doc self.mk.doc \\ // S('doc: doc/pyMorphic.pdf', pfx='.PHONY:",
"\\ // (S('dev:', pfx='\\n.PHONY: dev') // 'git push -v' //",
"self.config() def config(self): self.config = pyFile('config'); self.d // self.config self.config",
"sys class Object: ## @name constructor def __init__(self, V): self.value",
"= $(shell uname -s)' \\ // f'{\"CORES\":<11} = $(shell grep",
"// self.bin def mk(self): self.mk = Makefile(); self.d // self.mk",
"install -u `cat apt.txt`') # self.mk.merge = Sec('merge', pfx=''); self.mk",
"self.vscode self.settings() self.tasks() def settings(self): self.settings = jsonFile('settings'); self.vscode //",
"self.d // self.mk # self.mk.var = Sec('var', pfx=''); self.mk //",
"pad(depth) + self.head(prefix, test) # subtree return ret def head(self,",
"\"multiCommand.{key}\",' // (S('\"sequence\": [', ']') // '\"workbench.action.files.saveAll\",' // (S('{\"command\": \"workbench.action.terminal.sendSequence\",')",
"f'P += config.py' \\ // f'S += $(Y)' # self.mk.cfg",
"bin doc lib src tmp' self.mk.merge \\ // (S('dev:', pfx='\\n.PHONY:",
"giti(); self.d // self.giti self.giti.top // '*~' // '*.swp' //",
"{self} import *' \\ // 'def test_any(): assert True' def",
"= Sec('src', pfx=''); self.mk // self.mk.src self.mk.src \\ // f'Y",
"32,' ) # self.settings \\ // (S('{', '}') // self.multi",
"self class Primitive(Object): pass class S(Primitive): def __init__(self, V=None, end=None,",
"self.py self.py \\ // 'import os, sys' for i in",
"*' \\ // 'def test_any(): assert True' def dirs(self): self.d",
"// (S('{', '}') // self.multi // self.files // self.editor) def",
"F.write(self.bot.gen(self)) class giti(File): def __init__(self, V='.gitignore'): super().__init__(V) self.bot // f'!{self}'",
"self.mk.install = Sec('install', pfx=''); self.mk // self.mk.install self.mk.install // '.PHONY:",
"comment=comment) class Meta(Object): pass class Class(Meta): def __init__(self, C, sup=[]):",
"doc') // '$(MAKE) test' ) self.mk.install \\ // (S('Linux_install Linux_update:',"
] |
[
"prob > 0.01: record = {\"value\": classes[idx], \"prob\": prob} results.append(record)",
"= data['text'] predict_class, predict_idx, predict_values = model.predict(text) results = []",
"allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type']) def hashsum(path, hex=True, hash_type=hashlib.md5): hashinst = hash_type()",
"else: logging.info(\"File %s already exists will reuse md5sum: %s\", model_file,",
"setup_learner(): model_file = path.parent / 'models' / f'{model_file_name}.pkl' if not",
"using fastai's load_learner method model = load_learner(model_file.parent, f'{model_file_name}.pkl') classes =",
"sortByProb(val): return val[\"prob\"] @app.route('/predict', methods=['POST']) async def analyze(request): data =",
"return JSONResponse(results[:5]) if __name__ == '__main__': if 'serve' in sys.argv:",
"'wb') as f: f.write(data) async def setup_learner(): model_file = path.parent",
"model, classes loop = asyncio.get_event_loop() tasks = [asyncio.ensure_future(setup_learner())] model, classes",
"results.sort(key=sortByProb, reverse=True) return JSONResponse(results[:5]) if __name__ == '__main__': if 'serve'",
"logging import aiohttp import uvicorn from fastai.vision import * from",
"hex else hashinst.digest() async def download_file(url, dest): if dest.exists(): return",
"enumerate(predict_values): prob = val.item() if prob > 0.01: record =",
"= model.predict(text) results = [] for idx, val in enumerate(predict_values):",
"* 128), b''): hashinst.update(chunk) return hashinst.hexdigest() if hex else hashinst.digest()",
"CORSMiddleware from starlette.responses import JSONResponse # put your url here",
"classes = loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close() def sortByProb(val): return val[\"prob\"] @app.route('/predict', methods=['POST'])",
"f.write(data) async def setup_learner(): model_file = path.parent / 'models' /",
"# put your url here here model_file_url = 'https://www.dropbox.com/s/...?raw=1' model_file_name",
"md5sum: %s\", hashsum(model_file)) else: logging.info(\"File %s already exists will reuse",
"starlette.middleware.cors import CORSMiddleware from starlette.responses import JSONResponse # put your",
"0.01: record = {\"value\": classes[idx], \"prob\": prob} results.append(record) results.sort(key=sortByProb, reverse=True)",
"= Path(__file__).parent logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.INFO) app",
"level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.INFO) app = Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'],",
"file %s from %s\", model_file, model_file_url) await download_file(model_file_url, model_file) logging.info(\"Downloaded",
"b''): hashinst.update(chunk) return hashinst.hexdigest() if hex else hashinst.digest() async def",
"here here model_file_url = 'https://www.dropbox.com/s/...?raw=1' model_file_name = 'model' path =",
"session: async with session.get(url) as response: data = await response.read()",
"url here here model_file_url = 'https://www.dropbox.com/s/...?raw=1' model_file_name = 'model' path",
"for chunk in iter(lambda: f.read(hashinst.block_size * 128), b''): hashinst.update(chunk) return",
"import uvicorn from fastai.vision import * from starlette.applications import Starlette",
"= 'model' path = Path(__file__).parent logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO) logger =",
"@app.route('/predict', methods=['POST']) async def analyze(request): data = await request.form() text",
"val.item() if prob > 0.01: record = {\"value\": classes[idx], \"prob\":",
"classes[idx], \"prob\": prob} results.append(record) results.sort(key=sortByProb, reverse=True) return JSONResponse(results[:5]) if __name__",
"'model' path = Path(__file__).parent logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO) logger = logging.getLogger()",
"%s\", hashsum(model_file)) else: logging.info(\"File %s already exists will reuse md5sum:",
"logging.info(\"File %s already exists will reuse md5sum: %s\", model_file, hashsum(model_file))",
"dest): if dest.exists(): return async with aiohttp.ClientSession() as session: async",
"f: f.write(data) async def setup_learner(): model_file = path.parent / 'models'",
"import * from starlette.applications import Starlette from starlette.middleware.cors import CORSMiddleware",
"md5sum: %s\", model_file, hashsum(model_file)) # Loading the saved model using",
"= load_learner(model_file.parent, f'{model_file_name}.pkl') classes = model.data.classes return model, classes loop",
"async def analyze(request): data = await request.form() text = data['text']",
"'Content-Type']) def hashsum(path, hex=True, hash_type=hashlib.md5): hashinst = hash_type() with open(path,",
"as f: f.write(data) async def setup_learner(): model_file = path.parent /",
"= path.parent / 'models' / f'{model_file_name}.pkl' if not model_file.exists(): logging.info(\"Will",
"model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close() def sortByProb(val): return val[\"prob\"] @app.route('/predict',",
"= {\"value\": classes[idx], \"prob\": prob} results.append(record) results.sort(key=sortByProb, reverse=True) return JSONResponse(results[:5])",
"async def setup_learner(): model_file = path.parent / 'models' / f'{model_file_name}.pkl'",
"iter(lambda: f.read(hashinst.block_size * 128), b''): hashinst.update(chunk) return hashinst.hexdigest() if hex",
"import Starlette from starlette.middleware.cors import CORSMiddleware from starlette.responses import JSONResponse",
"hashsum(model_file)) else: logging.info(\"File %s already exists will reuse md5sum: %s\",",
"will reuse md5sum: %s\", model_file, hashsum(model_file)) # Loading the saved",
"reverse=True) return JSONResponse(results[:5]) if __name__ == '__main__': if 'serve' in",
"f'{model_file_name}.pkl') classes = model.data.classes return model, classes loop = asyncio.get_event_loop()",
"analyze(request): data = await request.form() text = data['text'] predict_class, predict_idx,",
"async def download_file(url, dest): if dest.exists(): return async with aiohttp.ClientSession()",
"return val[\"prob\"] @app.route('/predict', methods=['POST']) async def analyze(request): data = await",
"def analyze(request): data = await request.form() text = data['text'] predict_class,",
"loop.close() def sortByProb(val): return val[\"prob\"] @app.route('/predict', methods=['POST']) async def analyze(request):",
"Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type']) def hashsum(path, hex=True, hash_type=hashlib.md5): hashinst",
"with open(dest, 'wb') as f: f.write(data) async def setup_learner(): model_file",
"chunk in iter(lambda: f.read(hashinst.block_size * 128), b''): hashinst.update(chunk) return hashinst.hexdigest()",
"app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type']) def hashsum(path, hex=True, hash_type=hashlib.md5): hashinst =",
"hash_type() with open(path, 'rb') as f: for chunk in iter(lambda:",
"def sortByProb(val): return val[\"prob\"] @app.route('/predict', methods=['POST']) async def analyze(request): data",
"predict_idx, predict_values = model.predict(text) results = [] for idx, val",
"model.predict(text) results = [] for idx, val in enumerate(predict_values): prob",
"starlette.responses import JSONResponse # put your url here here model_file_url",
"saved model using fastai's load_learner method model = load_learner(model_file.parent, f'{model_file_name}.pkl')",
"= val.item() if prob > 0.01: record = {\"value\": classes[idx],",
"= hash_type() with open(path, 'rb') as f: for chunk in",
"prob} results.append(record) results.sort(key=sortByProb, reverse=True) return JSONResponse(results[:5]) if __name__ == '__main__':",
"model_file_url) await download_file(model_file_url, model_file) logging.info(\"Downloaded file md5sum: %s\", hashsum(model_file)) else:",
"exists will reuse md5sum: %s\", model_file, hashsum(model_file)) # Loading the",
"starlette.applications import Starlette from starlette.middleware.cors import CORSMiddleware from starlette.responses import",
"fastai.vision import * from starlette.applications import Starlette from starlette.middleware.cors import",
"logger.setLevel(logging.INFO) app = Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type']) def hashsum(path,",
"app = Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type']) def hashsum(path, hex=True,",
"__name__ == '__main__': if 'serve' in sys.argv: uvicorn.run(app, host='0.0.0.0' port=4000)",
"%s\", model_file, hashsum(model_file)) # Loading the saved model using fastai's",
"from fastai.vision import * from starlette.applications import Starlette from starlette.middleware.cors",
"128), b''): hashinst.update(chunk) return hashinst.hexdigest() if hex else hashinst.digest() async",
"model_file.exists(): logging.info(\"Will download file %s from %s\", model_file, model_file_url) await",
"logging.info(\"Downloaded file md5sum: %s\", hashsum(model_file)) else: logging.info(\"File %s already exists",
"'rb') as f: for chunk in iter(lambda: f.read(hashinst.block_size * 128),",
"dest.exists(): return async with aiohttp.ClientSession() as session: async with session.get(url)",
"model_file, hashsum(model_file)) # Loading the saved model using fastai's load_learner",
"else hashinst.digest() async def download_file(url, dest): if dest.exists(): return async",
"path = Path(__file__).parent logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.INFO)",
"hashinst.update(chunk) return hashinst.hexdigest() if hex else hashinst.digest() async def download_file(url,",
"as f: for chunk in iter(lambda: f.read(hashinst.block_size * 128), b''):",
"model_file = path.parent / 'models' / f'{model_file_name}.pkl' if not model_file.exists():",
"as session: async with session.get(url) as response: data = await",
"prob = val.item() if prob > 0.01: record = {\"value\":",
"with open(path, 'rb') as f: for chunk in iter(lambda: f.read(hashinst.block_size",
"text = data['text'] predict_class, predict_idx, predict_values = model.predict(text) results =",
"idx, val in enumerate(predict_values): prob = val.item() if prob >",
"results = [] for idx, val in enumerate(predict_values): prob =",
"= 'https://www.dropbox.com/s/...?raw=1' model_file_name = 'model' path = Path(__file__).parent logging.basicConfig(format=\"%(levelname)s: %(message)s\",",
"logging.info(\"Will download file %s from %s\", model_file, model_file_url) await download_file(model_file_url,",
"results.append(record) results.sort(key=sortByProb, reverse=True) return JSONResponse(results[:5]) if __name__ == '__main__': if",
"f'{model_file_name}.pkl' if not model_file.exists(): logging.info(\"Will download file %s from %s\",",
"await request.form() text = data['text'] predict_class, predict_idx, predict_values = model.predict(text)",
"model using fastai's load_learner method model = load_learner(model_file.parent, f'{model_file_name}.pkl') classes",
"hashinst.hexdigest() if hex else hashinst.digest() async def download_file(url, dest): if",
"as response: data = await response.read() with open(dest, 'wb') as",
"[asyncio.ensure_future(setup_learner())] model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close() def sortByProb(val): return val[\"prob\"]",
"uvicorn from fastai.vision import * from starlette.applications import Starlette from",
"request.form() text = data['text'] predict_class, predict_idx, predict_values = model.predict(text) results",
"from starlette.responses import JSONResponse # put your url here here",
"data = await request.form() text = data['text'] predict_class, predict_idx, predict_values",
"def hashsum(path, hex=True, hash_type=hashlib.md5): hashinst = hash_type() with open(path, 'rb')",
"response: data = await response.read() with open(dest, 'wb') as f:",
"return hashinst.hexdigest() if hex else hashinst.digest() async def download_file(url, dest):",
"hash_type=hashlib.md5): hashinst = hash_type() with open(path, 'rb') as f: for",
"val in enumerate(predict_values): prob = val.item() if prob > 0.01:",
"aiohttp.ClientSession() as session: async with session.get(url) as response: data =",
"loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close() def sortByProb(val): return val[\"prob\"] @app.route('/predict', methods=['POST']) async def",
"file md5sum: %s\", hashsum(model_file)) else: logging.info(\"File %s already exists will",
"asyncio.get_event_loop() tasks = [asyncio.ensure_future(setup_learner())] model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close() def",
"def setup_learner(): model_file = path.parent / 'models' / f'{model_file_name}.pkl' if",
"open(path, 'rb') as f: for chunk in iter(lambda: f.read(hashinst.block_size *",
"method model = load_learner(model_file.parent, f'{model_file_name}.pkl') classes = model.data.classes return model,",
"session.get(url) as response: data = await response.read() with open(dest, 'wb')",
"from starlette.middleware.cors import CORSMiddleware from starlette.responses import JSONResponse # put",
"if prob > 0.01: record = {\"value\": classes[idx], \"prob\": prob}",
"hashinst = hash_type() with open(path, 'rb') as f: for chunk",
"data = await response.read() with open(dest, 'wb') as f: f.write(data)",
"open(dest, 'wb') as f: f.write(data) async def setup_learner(): model_file =",
"reuse md5sum: %s\", model_file, hashsum(model_file)) # Loading the saved model",
"allow_headers=['X-Requested-With', 'Content-Type']) def hashsum(path, hex=True, hash_type=hashlib.md5): hashinst = hash_type() with",
"in iter(lambda: f.read(hashinst.block_size * 128), b''): hashinst.update(chunk) return hashinst.hexdigest() if",
"return async with aiohttp.ClientSession() as session: async with session.get(url) as",
"the saved model using fastai's load_learner method model = load_learner(model_file.parent,",
"load_learner(model_file.parent, f'{model_file_name}.pkl') classes = model.data.classes return model, classes loop =",
"logging.getLogger() logger.setLevel(logging.INFO) app = Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type']) def",
"async with session.get(url) as response: data = await response.read() with",
"model_file_url = 'https://www.dropbox.com/s/...?raw=1' model_file_name = 'model' path = Path(__file__).parent logging.basicConfig(format=\"%(levelname)s:",
"in enumerate(predict_values): prob = val.item() if prob > 0.01: record",
"import logging import aiohttp import uvicorn from fastai.vision import *",
"/ 'models' / f'{model_file_name}.pkl' if not model_file.exists(): logging.info(\"Will download file",
"# Loading the saved model using fastai's load_learner method model",
"tasks = [asyncio.ensure_future(setup_learner())] model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close() def sortByProb(val):",
"if dest.exists(): return async with aiohttp.ClientSession() as session: async with",
"put your url here here model_file_url = 'https://www.dropbox.com/s/...?raw=1' model_file_name =",
"methods=['POST']) async def analyze(request): data = await request.form() text =",
"f: for chunk in iter(lambda: f.read(hashinst.block_size * 128), b''): hashinst.update(chunk)",
"asyncio import logging import aiohttp import uvicorn from fastai.vision import",
"hashsum(path, hex=True, hash_type=hashlib.md5): hashinst = hash_type() with open(path, 'rb') as",
"import JSONResponse # put your url here here model_file_url =",
"Starlette from starlette.middleware.cors import CORSMiddleware from starlette.responses import JSONResponse #",
"response.read() with open(dest, 'wb') as f: f.write(data) async def setup_learner():",
"fastai's load_learner method model = load_learner(model_file.parent, f'{model_file_name}.pkl') classes = model.data.classes",
"hashinst.digest() async def download_file(url, dest): if dest.exists(): return async with",
"await download_file(model_file_url, model_file) logging.info(\"Downloaded file md5sum: %s\", hashsum(model_file)) else: logging.info(\"File",
"> 0.01: record = {\"value\": classes[idx], \"prob\": prob} results.append(record) results.sort(key=sortByProb,",
"return model, classes loop = asyncio.get_event_loop() tasks = [asyncio.ensure_future(setup_learner())] model,",
"/ f'{model_file_name}.pkl' if not model_file.exists(): logging.info(\"Will download file %s from",
"'https://www.dropbox.com/s/...?raw=1' model_file_name = 'model' path = Path(__file__).parent logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO)",
"from %s\", model_file, model_file_url) await download_file(model_file_url, model_file) logging.info(\"Downloaded file md5sum:",
"Path(__file__).parent logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.INFO) app =",
"download_file(url, dest): if dest.exists(): return async with aiohttp.ClientSession() as session:",
"JSONResponse(results[:5]) if __name__ == '__main__': if 'serve' in sys.argv: uvicorn.run(app,",
"loop = asyncio.get_event_loop() tasks = [asyncio.ensure_future(setup_learner())] model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0]",
"model_file) logging.info(\"Downloaded file md5sum: %s\", hashsum(model_file)) else: logging.info(\"File %s already",
"model = load_learner(model_file.parent, f'{model_file_name}.pkl') classes = model.data.classes return model, classes",
"%s\", model_file, model_file_url) await download_file(model_file_url, model_file) logging.info(\"Downloaded file md5sum: %s\",",
"f.read(hashinst.block_size * 128), b''): hashinst.update(chunk) return hashinst.hexdigest() if hex else",
"{\"value\": classes[idx], \"prob\": prob} results.append(record) results.sort(key=sortByProb, reverse=True) return JSONResponse(results[:5]) if",
"download file %s from %s\", model_file, model_file_url) await download_file(model_file_url, model_file)",
"here model_file_url = 'https://www.dropbox.com/s/...?raw=1' model_file_name = 'model' path = Path(__file__).parent",
"= logging.getLogger() logger.setLevel(logging.INFO) app = Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])",
"[] for idx, val in enumerate(predict_values): prob = val.item() if",
"= loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close() def sortByProb(val): return val[\"prob\"] @app.route('/predict', methods=['POST']) async",
"already exists will reuse md5sum: %s\", model_file, hashsum(model_file)) # Loading",
"with session.get(url) as response: data = await response.read() with open(dest,",
"download_file(model_file_url, model_file) logging.info(\"Downloaded file md5sum: %s\", hashsum(model_file)) else: logging.info(\"File %s",
"await response.read() with open(dest, 'wb') as f: f.write(data) async def",
"%s already exists will reuse md5sum: %s\", model_file, hashsum(model_file)) #",
"predict_class, predict_idx, predict_values = model.predict(text) results = [] for idx,",
"for idx, val in enumerate(predict_values): prob = val.item() if prob",
"= await response.read() with open(dest, 'wb') as f: f.write(data) async",
"import asyncio import logging import aiohttp import uvicorn from fastai.vision",
"logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.INFO) app = Starlette()",
"model_file_name = 'model' path = Path(__file__).parent logging.basicConfig(format=\"%(levelname)s: %(message)s\", level=logging.INFO) logger",
"hashsum(model_file)) # Loading the saved model using fastai's load_learner method",
"* from starlette.applications import Starlette from starlette.middleware.cors import CORSMiddleware from",
"aiohttp import uvicorn from fastai.vision import * from starlette.applications import",
"data['text'] predict_class, predict_idx, predict_values = model.predict(text) results = [] for",
"val[\"prob\"] @app.route('/predict', methods=['POST']) async def analyze(request): data = await request.form()",
"from starlette.applications import Starlette from starlette.middleware.cors import CORSMiddleware from starlette.responses",
"\"prob\": prob} results.append(record) results.sort(key=sortByProb, reverse=True) return JSONResponse(results[:5]) if __name__ ==",
"async with aiohttp.ClientSession() as session: async with session.get(url) as response:",
"with aiohttp.ClientSession() as session: async with session.get(url) as response: data",
"= asyncio.get_event_loop() tasks = [asyncio.ensure_future(setup_learner())] model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close()",
"predict_values = model.predict(text) results = [] for idx, val in",
"record = {\"value\": classes[idx], \"prob\": prob} results.append(record) results.sort(key=sortByProb, reverse=True) return",
"= [asyncio.ensure_future(setup_learner())] model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0] loop.close() def sortByProb(val): return",
"model_file, model_file_url) await download_file(model_file_url, model_file) logging.info(\"Downloaded file md5sum: %s\", hashsum(model_file))",
"hex=True, hash_type=hashlib.md5): hashinst = hash_type() with open(path, 'rb') as f:",
"Loading the saved model using fastai's load_learner method model =",
"= model.data.classes return model, classes loop = asyncio.get_event_loop() tasks =",
"your url here here model_file_url = 'https://www.dropbox.com/s/...?raw=1' model_file_name = 'model'",
"path.parent / 'models' / f'{model_file_name}.pkl' if not model_file.exists(): logging.info(\"Will download",
"= Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type']) def hashsum(path, hex=True, hash_type=hashlib.md5):",
"if not model_file.exists(): logging.info(\"Will download file %s from %s\", model_file,",
"= await request.form() text = data['text'] predict_class, predict_idx, predict_values =",
"%(message)s\", level=logging.INFO) logger = logging.getLogger() logger.setLevel(logging.INFO) app = Starlette() app.add_middleware(CORSMiddleware,",
"if hex else hashinst.digest() async def download_file(url, dest): if dest.exists():",
"def download_file(url, dest): if dest.exists(): return async with aiohttp.ClientSession() as",
"not model_file.exists(): logging.info(\"Will download file %s from %s\", model_file, model_file_url)",
"import aiohttp import uvicorn from fastai.vision import * from starlette.applications",
"if __name__ == '__main__': if 'serve' in sys.argv: uvicorn.run(app, host='0.0.0.0'",
"load_learner method model = load_learner(model_file.parent, f'{model_file_name}.pkl') classes = model.data.classes return",
"= [] for idx, val in enumerate(predict_values): prob = val.item()",
"classes = model.data.classes return model, classes loop = asyncio.get_event_loop() tasks",
"logger = logging.getLogger() logger.setLevel(logging.INFO) app = Starlette() app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With',",
"'models' / f'{model_file_name}.pkl' if not model_file.exists(): logging.info(\"Will download file %s",
"%s from %s\", model_file, model_file_url) await download_file(model_file_url, model_file) logging.info(\"Downloaded file",
"import CORSMiddleware from starlette.responses import JSONResponse # put your url",
"classes loop = asyncio.get_event_loop() tasks = [asyncio.ensure_future(setup_learner())] model, classes =",
"JSONResponse # put your url here here model_file_url = 'https://www.dropbox.com/s/...?raw=1'",
"model.data.classes return model, classes loop = asyncio.get_event_loop() tasks = [asyncio.ensure_future(setup_learner())]"
] |
[
"to receive\") while True: connectionSocket,addr = serverSocket.accept() sentence = connectionSocket.recv(1024).decode()",
"connectionSocket,addr = serverSocket.accept() sentence = connectionSocket.recv(1024).decode() sentence = sentence.upper() connectionSocket.send(sentence.encode())",
"socket import * serverPort = 12001 serverSocket = socket(AF_INET, SOCK_STREAM)",
"print(\"the server is ready to receive\") while True: connectionSocket,addr =",
"from socket import * serverPort = 12001 serverSocket = socket(AF_INET,",
"server is ready to receive\") while True: connectionSocket,addr = serverSocket.accept()",
"socket(AF_INET, SOCK_STREAM) serverSocket.bind(('', serverPort)) serverSocket.listen(1) print(\"the server is ready to",
"while True: connectionSocket,addr = serverSocket.accept() sentence = connectionSocket.recv(1024).decode() sentence =",
"= 12001 serverSocket = socket(AF_INET, SOCK_STREAM) serverSocket.bind(('', serverPort)) serverSocket.listen(1) print(\"the",
"import * serverPort = 12001 serverSocket = socket(AF_INET, SOCK_STREAM) serverSocket.bind(('',",
"SOCK_STREAM) serverSocket.bind(('', serverPort)) serverSocket.listen(1) print(\"the server is ready to receive\")",
"serverSocket.listen(1) print(\"the server is ready to receive\") while True: connectionSocket,addr",
"= socket(AF_INET, SOCK_STREAM) serverSocket.bind(('', serverPort)) serverSocket.listen(1) print(\"the server is ready",
"serverPort)) serverSocket.listen(1) print(\"the server is ready to receive\") while True:",
"12001 serverSocket = socket(AF_INET, SOCK_STREAM) serverSocket.bind(('', serverPort)) serverSocket.listen(1) print(\"the server",
"receive\") while True: connectionSocket,addr = serverSocket.accept() sentence = connectionSocket.recv(1024).decode() sentence",
"serverSocket.bind(('', serverPort)) serverSocket.listen(1) print(\"the server is ready to receive\") while",
"* serverPort = 12001 serverSocket = socket(AF_INET, SOCK_STREAM) serverSocket.bind(('', serverPort))",
"serverPort = 12001 serverSocket = socket(AF_INET, SOCK_STREAM) serverSocket.bind(('', serverPort)) serverSocket.listen(1)",
"serverSocket = socket(AF_INET, SOCK_STREAM) serverSocket.bind(('', serverPort)) serverSocket.listen(1) print(\"the server is",
"True: connectionSocket,addr = serverSocket.accept() sentence = connectionSocket.recv(1024).decode() sentence = sentence.upper()",
"is ready to receive\") while True: connectionSocket,addr = serverSocket.accept() sentence",
"= serverSocket.accept() sentence = connectionSocket.recv(1024).decode() sentence = sentence.upper() connectionSocket.send(sentence.encode()) connectionSocket.close()",
"ready to receive\") while True: connectionSocket,addr = serverSocket.accept() sentence ="
] |
[
"class has no additional functionality. Args: None. Returns: True. \"\"\"",
"self._parsed_job_work_uri = parsed_job_work_uri def initialize(self): \"\"\" Initialize the LocalWorkflow class.",
"the Local Workflow objects. \"\"\" def __init__( self, job, config,",
"the LocalWorkflow class. This workflow class has no additional functionality.",
"return True def get_context_options(self): \"\"\" Return dict of options specific",
"\"\"\" Initialize the LocalWorkflow class. This workflow class has no",
"Returns: True. \"\"\" return True def init_data(self): \"\"\" Initialize any",
"no additional functionality. Args: None. Returns: True. \"\"\" return True",
"class. This workflow class has no additional functionality. Args: None.",
"True def init_data(self): \"\"\" Initialize any data specific to this",
"for this context. Args: None. Returns: {} - no options",
"get_context_options(self): \"\"\" Return dict of options specific for this context.",
"= config self._parsed_job_work_uri = parsed_job_work_uri def initialize(self): \"\"\" Initialize the",
"config, parsed_job_work_uri ): \"\"\" Instantiate LocalWorkflow class. \"\"\" self._job =",
"LocalWorkflow class. \"\"\" self._job = job self._config = config self._parsed_job_work_uri",
"functionality. Args: None. Returns: True. \"\"\" return True def init_data(self):",
"that represents the Local Workflow objects. \"\"\" def __init__( self,",
"this context. Args: None. Returns: {} - no options specific",
"job, config, parsed_job_work_uri ): \"\"\" Instantiate LocalWorkflow class. \"\"\" self._job",
"GeneFlow LocalWorkflow class.\"\"\" class LocalWorkflow: \"\"\" A class that represents",
"objects. \"\"\" def __init__( self, job, config, parsed_job_work_uri ): \"\"\"",
"init_data(self): \"\"\" Initialize any data specific to this context. \"\"\"",
"parsed_job_work_uri def initialize(self): \"\"\" Initialize the LocalWorkflow class. This workflow",
"): \"\"\" Instantiate LocalWorkflow class. \"\"\" self._job = job self._config",
"Workflow objects. \"\"\" def __init__( self, job, config, parsed_job_work_uri ):",
"\"\"\" def __init__( self, job, config, parsed_job_work_uri ): \"\"\" Instantiate",
"self._job = job self._config = config self._parsed_job_work_uri = parsed_job_work_uri def",
"return True def init_data(self): \"\"\" Initialize any data specific to",
"\"\"\" Return dict of options specific for this context. Args:",
"module contains the GeneFlow LocalWorkflow class.\"\"\" class LocalWorkflow: \"\"\" A",
"Initialize any data specific to this context. \"\"\" return True",
"the GeneFlow LocalWorkflow class.\"\"\" class LocalWorkflow: \"\"\" A class that",
"def init_data(self): \"\"\" Initialize any data specific to this context.",
"additional functionality. Args: None. Returns: True. \"\"\" return True def",
"represents the Local Workflow objects. \"\"\" def __init__( self, job,",
"context. \"\"\" return True def get_context_options(self): \"\"\" Return dict of",
"\"\"\" Initialize any data specific to this context. \"\"\" return",
"class LocalWorkflow: \"\"\" A class that represents the Local Workflow",
"= job self._config = config self._parsed_job_work_uri = parsed_job_work_uri def initialize(self):",
"- no options specific for this context. \"\"\" return {}",
"workflow class has no additional functionality. Args: None. Returns: True.",
"Args: None. Returns: {} - no options specific for this",
"LocalWorkflow class. This workflow class has no additional functionality. Args:",
"A class that represents the Local Workflow objects. \"\"\" def",
"\"\"\" return True def init_data(self): \"\"\" Initialize any data specific",
"this context. \"\"\" return True def get_context_options(self): \"\"\" Return dict",
"\"\"\"This module contains the GeneFlow LocalWorkflow class.\"\"\" class LocalWorkflow: \"\"\"",
"Local Workflow objects. \"\"\" def __init__( self, job, config, parsed_job_work_uri",
"class that represents the Local Workflow objects. \"\"\" def __init__(",
"True def get_context_options(self): \"\"\" Return dict of options specific for",
"config self._parsed_job_work_uri = parsed_job_work_uri def initialize(self): \"\"\" Initialize the LocalWorkflow",
"\"\"\" A class that represents the Local Workflow objects. \"\"\"",
"to this context. \"\"\" return True def get_context_options(self): \"\"\" Return",
"dict of options specific for this context. Args: None. Returns:",
"\"\"\" Instantiate LocalWorkflow class. \"\"\" self._job = job self._config =",
"initialize(self): \"\"\" Initialize the LocalWorkflow class. This workflow class has",
"= parsed_job_work_uri def initialize(self): \"\"\" Initialize the LocalWorkflow class. This",
"any data specific to this context. \"\"\" return True def",
"of options specific for this context. Args: None. Returns: {}",
"self._config = config self._parsed_job_work_uri = parsed_job_work_uri def initialize(self): \"\"\" Initialize",
"context. Args: None. Returns: {} - no options specific for",
"{} - no options specific for this context. \"\"\" return",
"\"\"\" self._job = job self._config = config self._parsed_job_work_uri = parsed_job_work_uri",
"self, job, config, parsed_job_work_uri ): \"\"\" Instantiate LocalWorkflow class. \"\"\"",
"Initialize the LocalWorkflow class. This workflow class has no additional",
"parsed_job_work_uri ): \"\"\" Instantiate LocalWorkflow class. \"\"\" self._job = job",
"options specific for this context. Args: None. Returns: {} -",
"Returns: {} - no options specific for this context. \"\"\"",
"data specific to this context. \"\"\" return True def get_context_options(self):",
"def __init__( self, job, config, parsed_job_work_uri ): \"\"\" Instantiate LocalWorkflow",
"specific to this context. \"\"\" return True def get_context_options(self): \"\"\"",
"None. Returns: {} - no options specific for this context.",
"None. Returns: True. \"\"\" return True def init_data(self): \"\"\" Initialize",
"contains the GeneFlow LocalWorkflow class.\"\"\" class LocalWorkflow: \"\"\" A class",
"LocalWorkflow class.\"\"\" class LocalWorkflow: \"\"\" A class that represents the",
"specific for this context. Args: None. Returns: {} - no",
"def initialize(self): \"\"\" Initialize the LocalWorkflow class. This workflow class",
"__init__( self, job, config, parsed_job_work_uri ): \"\"\" Instantiate LocalWorkflow class.",
"True. \"\"\" return True def init_data(self): \"\"\" Initialize any data",
"Return dict of options specific for this context. Args: None.",
"has no additional functionality. Args: None. Returns: True. \"\"\" return",
"class.\"\"\" class LocalWorkflow: \"\"\" A class that represents the Local",
"Args: None. Returns: True. \"\"\" return True def init_data(self): \"\"\"",
"\"\"\" return True def get_context_options(self): \"\"\" Return dict of options",
"Instantiate LocalWorkflow class. \"\"\" self._job = job self._config = config",
"job self._config = config self._parsed_job_work_uri = parsed_job_work_uri def initialize(self): \"\"\"",
"class. \"\"\" self._job = job self._config = config self._parsed_job_work_uri =",
"LocalWorkflow: \"\"\" A class that represents the Local Workflow objects.",
"This workflow class has no additional functionality. Args: None. Returns:",
"def get_context_options(self): \"\"\" Return dict of options specific for this"
] |
[
"the learning rate of all param groups or each group",
"epoch. (default: -1) Returns: StepLR: Learning rate scheduler. \"\"\" return",
"(int): The number of epochs to train for. This is",
"is used along with epochs in order to infer the",
"(default: 0.1) patience (int, optional): Number of epoch with no",
"param groups or each group respectively. (default: 0) Returns: ReduceLROnPlateau",
"return ReduceLROnPlateau( optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr ) def one_cycle_lr(",
"\"\"\" return ReduceLROnPlateau( optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr ) def",
"Frequency for changing learning rate. gamma (float): Factor for changing",
"min_lr=0): \"\"\"Create LR plateau reduction scheduler. Args: optimizer (torch.optim): Model",
"number of steps in the cycle. steps_per_epoch (int): The number",
"reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0): \"\"\"Create LR plateau reduction scheduler.",
"number of epochs to train for. This is used along",
"with steps_per_epoch in order to infer the total number of",
"epochs in order to infer the total number of steps",
"Returns: OneCycleLR instance. \"\"\" return OneCycleLR( optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch,",
"scalars. A lower bound on the learning rate of all",
"step_size (int): Frequency for changing learning rate. gamma (float): Factor",
"OneCycleLR def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1): \"\"\"Create LR step scheduler.",
"max_lr (float): Upper learning rate boundary in the cycle. epochs",
"increasing the learning rate. (default: 0.5) div_factor (float, optional): Determines",
"cycle. pct_start (float, optional): The percentage of the cycle (in",
"optional): A scalar or a list of scalars. A lower",
"Returns: ReduceLROnPlateau instance. \"\"\" return ReduceLROnPlateau( optimizer, factor=factor, patience=patience, verbose=verbose,",
"optimizer. factor (float, optional): Factor by which the learning rate",
"(torch.optim): Model optimizer. factor (float, optional): Factor by which the",
"optional): Number of epoch with no improvement after which learning",
"ReduceLROnPlateau instance. \"\"\" return ReduceLROnPlateau( optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr",
"of the cycle (in number of steps) spent increasing the",
"rate of all param groups or each group respectively. (default:",
"OneCycleLR instance. \"\"\" return OneCycleLR( optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch, pct_start=pct_start,",
"in the cycle. epochs (int): The number of epochs to",
"optional): The percentage of the cycle (in number of steps)",
"False) min_lr (float, optional): A scalar or a list of",
"initial learning rate via initial_lr = max_lr / div_factor. (default:",
"(default: 0) Returns: ReduceLROnPlateau instance. \"\"\" return ReduceLROnPlateau( optimizer, factor=factor,",
"last_epoch=-1): \"\"\"Create LR step scheduler. Args: optimizer (torch.optim): Model optimizer.",
"index of last epoch. (default: -1) Returns: StepLR: Learning rate",
"on the learning rate of all param groups or each",
"epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000 ): \"\"\"Create One Cycle Policy",
"Number of epoch with no improvement after which learning rate",
"div_factor (float, optional): Determines the initial learning rate via initial_lr",
"be will be reduced. (default: 10) verbose (bool, optional): If",
"learning rate via min_lr = initial_lr / final_div_factor. (default: 1e4)",
"patience=10, verbose=False, min_lr=0): \"\"\"Create LR plateau reduction scheduler. Args: optimizer",
"step scheduler. Args: optimizer (torch.optim): Model optimizer. step_size (int): Frequency",
"gamma (float): Factor for changing learning rate. (default: 0.1) last_epoch",
"to train for. This is used along with epochs in",
"Factor by which the learning rate will be reduced. (default:",
"to stdout for each update. (default: False) min_lr (float, optional):",
"the cycle (in number of steps) spent increasing the learning",
"prints a message to stdout for each update. (default: False)",
"learning rate. gamma (float): Factor for changing learning rate. (default:",
"respectively. (default: 0) Returns: ReduceLROnPlateau instance. \"\"\" return ReduceLROnPlateau( optimizer,",
"learning rate will be reduced. (default: 0.1) patience (int, optional):",
"steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000 ): \"\"\"Create One Cycle Policy for",
"div_factor=10.0, final_div_factor=10000 ): \"\"\"Create One Cycle Policy for Learning Rate.",
"rate. (default: 0.1) last_epoch (int): The index of last epoch.",
"(default: 1e4) Returns: OneCycleLR instance. \"\"\" return OneCycleLR( optimizer, max_lr,",
"scheduler. \"\"\" return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer, factor=0.1,",
"One Cycle Policy for Learning Rate. Args: optimizer (torch.optim): Model",
"The index of last epoch. (default: -1) Returns: StepLR: Learning",
"a message to stdout for each update. (default: False) min_lr",
"plateau reduction scheduler. Args: optimizer (torch.optim): Model optimizer. factor (float,",
"scalar or a list of scalars. A lower bound on",
"the cycle. steps_per_epoch (int): The number of steps per epoch",
"number of steps in the cycle. pct_start (float, optional): The",
"number of steps) spent increasing the learning rate. (default: 0.5)",
"(in number of steps) spent increasing the learning rate. (default:",
"\"\"\"Create One Cycle Policy for Learning Rate. Args: optimizer (torch.optim):",
"learning rate boundary in the cycle. epochs (int): The number",
"be reduced. (default: 10) verbose (bool, optional): If True, prints",
"optional): If True, prints a message to stdout for each",
"for. This is used along with steps_per_epoch in order to",
"the initial learning rate via initial_lr = max_lr / div_factor.",
"(int): The index of last epoch. (default: -1) Returns: StepLR:",
"optional): Determines the minimum learning rate via min_lr = initial_lr",
"learning rate will be will be reduced. (default: 10) verbose",
"changing learning rate. (default: 0.1) last_epoch (int): The index of",
"of steps per epoch to train for. This is used",
"bound on the learning rate of all param groups or",
"Args: optimizer (torch.optim): Model optimizer. step_size (int): Frequency for changing",
"minimum learning rate via min_lr = initial_lr / final_div_factor. (default:",
"steps_per_epoch in order to infer the total number of steps",
"factor (float, optional): Factor by which the learning rate will",
"changing learning rate. gamma (float): Factor for changing learning rate.",
"-1) Returns: StepLR: Learning rate scheduler. \"\"\" return StepLR(optimizer, step_size=step_size,",
"min_lr = initial_lr / final_div_factor. (default: 1e4) Returns: OneCycleLR instance.",
"for. This is used along with epochs in order to",
"of steps in the cycle. pct_start (float, optional): The percentage",
"steps_per_epoch (int): The number of steps per epoch to train",
"return OneCycleLR( optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch, pct_start=pct_start, div_factor=div_factor, final_div_factor=final_div_factor )",
"(int, optional): Number of epoch with no improvement after which",
"0.1) patience (int, optional): Number of epoch with no improvement",
"or a list of scalars. A lower bound on the",
"to infer the total number of steps in the cycle.",
"stdout for each update. (default: False) min_lr (float, optional): A",
"reduced. (default: 10) verbose (bool, optional): If True, prints a",
"rate via min_lr = initial_lr / final_div_factor. (default: 1e4) Returns:",
"along with steps_per_epoch in order to infer the total number",
"the minimum learning rate via min_lr = initial_lr / final_div_factor.",
"used along with epochs in order to infer the total",
"learning rate of all param groups or each group respectively.",
"in order to infer the total number of steps in",
"patience=patience, verbose=verbose, min_lr=min_lr ) def one_cycle_lr( optimizer, max_lr, epochs, steps_per_epoch,",
"from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR def step_lr(optimizer, step_size, gamma=0.1,",
"(float): Factor for changing learning rate. (default: 0.1) last_epoch (int):",
"rate will be will be reduced. (default: 10) verbose (bool,",
"along with epochs in order to infer the total number",
"/ final_div_factor. (default: 1e4) Returns: OneCycleLR instance. \"\"\" return OneCycleLR(",
"\"\"\" return OneCycleLR( optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch, pct_start=pct_start, div_factor=div_factor, final_div_factor=final_div_factor",
"def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1): \"\"\"Create LR step scheduler. Args:",
"Model optimizer. factor (float, optional): Factor by which the learning",
"optimizer. step_size (int): Frequency for changing learning rate. gamma (float):",
"torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1):",
"A lower bound on the learning rate of all param",
"Policy for Learning Rate. Args: optimizer (torch.optim): Model optimizer. max_lr",
"StepLR, ReduceLROnPlateau, OneCycleLR def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1): \"\"\"Create LR",
"total number of steps in the cycle. steps_per_epoch (int): The",
"min_lr (float, optional): A scalar or a list of scalars.",
"of epochs to train for. This is used along with",
"of steps) spent increasing the learning rate. (default: 0.5) div_factor",
"Determines the initial learning rate via initial_lr = max_lr /",
"0) Returns: ReduceLROnPlateau instance. \"\"\" return ReduceLROnPlateau( optimizer, factor=factor, patience=patience,",
"/ div_factor. (default: 10.0) final_div_factor (float, optional): Determines the minimum",
") def one_cycle_lr( optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000",
"improvement after which learning rate will be will be reduced.",
"final_div_factor=10000 ): \"\"\"Create One Cycle Policy for Learning Rate. Args:",
"0.5) div_factor (float, optional): Determines the initial learning rate via",
"one_cycle_lr( optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000 ): \"\"\"Create",
"(float, optional): Factor by which the learning rate will be",
"by which the learning rate will be reduced. (default: 0.1)",
"gamma=gamma, last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0): \"\"\"Create LR",
"def one_cycle_lr( optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000 ):",
"rate will be reduced. (default: 0.1) patience (int, optional): Number",
"after which learning rate will be will be reduced. (default:",
"Model optimizer. max_lr (float): Upper learning rate boundary in the",
"cycle. steps_per_epoch (int): The number of steps per epoch to",
"total number of steps in the cycle. pct_start (float, optional):",
"spent increasing the learning rate. (default: 0.5) div_factor (float, optional):",
"Model optimizer. step_size (int): Frequency for changing learning rate. gamma",
"(default: 10) verbose (bool, optional): If True, prints a message",
"(int): The number of steps per epoch to train for.",
"scheduler. Args: optimizer (torch.optim): Model optimizer. factor (float, optional): Factor",
"The number of steps per epoch to train for. This",
"with epochs in order to infer the total number of",
"10.0) final_div_factor (float, optional): Determines the minimum learning rate via",
"(bool, optional): If True, prints a message to stdout for",
"step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1): \"\"\"Create LR step scheduler. Args: optimizer",
"for Learning Rate. Args: optimizer (torch.optim): Model optimizer. max_lr (float):",
"Learning rate scheduler. \"\"\" return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) def",
"def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0): \"\"\"Create LR plateau reduction",
"reduction scheduler. Args: optimizer (torch.optim): Model optimizer. factor (float, optional):",
"scheduler. Args: optimizer (torch.optim): Model optimizer. step_size (int): Frequency for",
"learning rate. (default: 0.1) last_epoch (int): The index of last",
"step_size=step_size, gamma=gamma, last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0): \"\"\"Create",
"a list of scalars. A lower bound on the learning",
"list of scalars. A lower bound on the learning rate",
"): \"\"\"Create One Cycle Policy for Learning Rate. Args: optimizer",
"If True, prints a message to stdout for each update.",
"(float): Upper learning rate boundary in the cycle. epochs (int):",
"train for. This is used along with epochs in order",
"each group respectively. (default: 0) Returns: ReduceLROnPlateau instance. \"\"\" return",
"LR plateau reduction scheduler. Args: optimizer (torch.optim): Model optimizer. factor",
"ReduceLROnPlateau( optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr ) def one_cycle_lr( optimizer,",
"lower bound on the learning rate of all param groups",
"max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000 ): \"\"\"Create One Cycle",
"(torch.optim): Model optimizer. step_size (int): Frequency for changing learning rate.",
"with no improvement after which learning rate will be will",
"Cycle Policy for Learning Rate. Args: optimizer (torch.optim): Model optimizer.",
"\"\"\"Create LR plateau reduction scheduler. Args: optimizer (torch.optim): Model optimizer.",
"instance. \"\"\" return OneCycleLR( optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch, pct_start=pct_start, div_factor=div_factor,",
"(default: -1) Returns: StepLR: Learning rate scheduler. \"\"\" return StepLR(optimizer,",
"of last epoch. (default: -1) Returns: StepLR: Learning rate scheduler.",
"update. (default: False) min_lr (float, optional): A scalar or a",
"number of steps per epoch to train for. This is",
"div_factor. (default: 10.0) final_div_factor (float, optional): Determines the minimum learning",
"rate scheduler. \"\"\" return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer,",
"all param groups or each group respectively. (default: 0) Returns:",
"used along with steps_per_epoch in order to infer the total",
"epoch to train for. This is used along with epochs",
"Args: optimizer (torch.optim): Model optimizer. factor (float, optional): Factor by",
"Rate. Args: optimizer (torch.optim): Model optimizer. max_lr (float): Upper learning",
"steps in the cycle. pct_start (float, optional): The percentage of",
"no improvement after which learning rate will be will be",
"optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000 ): \"\"\"Create One",
"Returns: StepLR: Learning rate scheduler. \"\"\" return StepLR(optimizer, step_size=step_size, gamma=gamma,",
"epoch with no improvement after which learning rate will be",
"Factor for changing learning rate. (default: 0.1) last_epoch (int): The",
"\"\"\" return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10,",
"which the learning rate will be reduced. (default: 0.1) patience",
"pct_start (float, optional): The percentage of the cycle (in number",
"epochs to train for. This is used along with steps_per_epoch",
"optional): Factor by which the learning rate will be reduced.",
"for each update. (default: False) min_lr (float, optional): A scalar",
"instance. \"\"\" return ReduceLROnPlateau( optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr )",
"the total number of steps in the cycle. steps_per_epoch (int):",
"cycle (in number of steps) spent increasing the learning rate.",
"final_div_factor. (default: 1e4) Returns: OneCycleLR instance. \"\"\" return OneCycleLR( optimizer,",
"group respectively. (default: 0) Returns: ReduceLROnPlateau instance. \"\"\" return ReduceLROnPlateau(",
"The number of epochs to train for. This is used",
"= max_lr / div_factor. (default: 10.0) final_div_factor (float, optional): Determines",
"LR step scheduler. Args: optimizer (torch.optim): Model optimizer. step_size (int):",
"is used along with steps_per_epoch in order to infer the",
"rate. (default: 0.5) div_factor (float, optional): Determines the initial learning",
"This is used along with steps_per_epoch in order to infer",
"of steps in the cycle. steps_per_epoch (int): The number of",
"Upper learning rate boundary in the cycle. epochs (int): The",
"(float, optional): Determines the minimum learning rate via min_lr =",
"(torch.optim): Model optimizer. max_lr (float): Upper learning rate boundary in",
"via min_lr = initial_lr / final_div_factor. (default: 1e4) Returns: OneCycleLR",
"will be reduced. (default: 10) verbose (bool, optional): If True,",
"groups or each group respectively. (default: 0) Returns: ReduceLROnPlateau instance.",
"of all param groups or each group respectively. (default: 0)",
"optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr ) def one_cycle_lr( optimizer, max_lr,",
"train for. This is used along with steps_per_epoch in order",
"rate via initial_lr = max_lr / div_factor. (default: 10.0) final_div_factor",
"(default: 0.5) div_factor (float, optional): Determines the initial learning rate",
"factor=0.1, patience=10, verbose=False, min_lr=0): \"\"\"Create LR plateau reduction scheduler. Args:",
"epochs (int): The number of epochs to train for. This",
"the cycle. pct_start (float, optional): The percentage of the cycle",
"each update. (default: False) min_lr (float, optional): A scalar or",
"import StepLR, ReduceLROnPlateau, OneCycleLR def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1): \"\"\"Create",
"(float, optional): The percentage of the cycle (in number of",
"for changing learning rate. gamma (float): Factor for changing learning",
"= initial_lr / final_div_factor. (default: 1e4) Returns: OneCycleLR instance. \"\"\"",
"True, prints a message to stdout for each update. (default:",
"(int): Frequency for changing learning rate. gamma (float): Factor for",
"A scalar or a list of scalars. A lower bound",
"steps in the cycle. steps_per_epoch (int): The number of steps",
"step_size, gamma=0.1, last_epoch=-1): \"\"\"Create LR step scheduler. Args: optimizer (torch.optim):",
"ReduceLROnPlateau, OneCycleLR def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1): \"\"\"Create LR step",
"steps per epoch to train for. This is used along",
"initial_lr / final_div_factor. (default: 1e4) Returns: OneCycleLR instance. \"\"\" return",
"optimizer (torch.optim): Model optimizer. step_size (int): Frequency for changing learning",
"1e4) Returns: OneCycleLR instance. \"\"\" return OneCycleLR( optimizer, max_lr, epochs=epochs,",
"optimizer (torch.optim): Model optimizer. factor (float, optional): Factor by which",
"patience (int, optional): Number of epoch with no improvement after",
"verbose=verbose, min_lr=min_lr ) def one_cycle_lr( optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5,",
"last_epoch (int): The index of last epoch. (default: -1) Returns:",
"boundary in the cycle. epochs (int): The number of epochs",
"the learning rate will be reduced. (default: 0.1) patience (int,",
"(float, optional): A scalar or a list of scalars. A",
"(float, optional): Determines the initial learning rate via initial_lr =",
"via initial_lr = max_lr / div_factor. (default: 10.0) final_div_factor (float,",
"to train for. This is used along with steps_per_epoch in",
"steps) spent increasing the learning rate. (default: 0.5) div_factor (float,",
"last epoch. (default: -1) Returns: StepLR: Learning rate scheduler. \"\"\"",
"return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False,",
"verbose=False, min_lr=0): \"\"\"Create LR plateau reduction scheduler. Args: optimizer (torch.optim):",
"of epoch with no improvement after which learning rate will",
"which learning rate will be will be reduced. (default: 10)",
"or each group respectively. (default: 0) Returns: ReduceLROnPlateau instance. \"\"\"",
"0.1) last_epoch (int): The index of last epoch. (default: -1)",
"learning rate via initial_lr = max_lr / div_factor. (default: 10.0)",
"10) verbose (bool, optional): If True, prints a message to",
"\"\"\"Create LR step scheduler. Args: optimizer (torch.optim): Model optimizer. step_size",
"the total number of steps in the cycle. pct_start (float,",
"min_lr=min_lr ) def one_cycle_lr( optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0,",
"This is used along with epochs in order to infer",
"the learning rate. (default: 0.5) div_factor (float, optional): Determines the",
"will be reduced. (default: 0.1) patience (int, optional): Number of",
"message to stdout for each update. (default: False) min_lr (float,",
"factor=factor, patience=patience, verbose=verbose, min_lr=min_lr ) def one_cycle_lr( optimizer, max_lr, epochs,",
"Args: optimizer (torch.optim): Model optimizer. max_lr (float): Upper learning rate",
"last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0): \"\"\"Create LR plateau",
"the cycle. epochs (int): The number of epochs to train",
"in the cycle. steps_per_epoch (int): The number of steps per",
"for changing learning rate. (default: 0.1) last_epoch (int): The index",
"Learning Rate. Args: optimizer (torch.optim): Model optimizer. max_lr (float): Upper",
"rate boundary in the cycle. epochs (int): The number of",
"in the cycle. pct_start (float, optional): The percentage of the",
"reduced. (default: 0.1) patience (int, optional): Number of epoch with",
"learning rate. (default: 0.5) div_factor (float, optional): Determines the initial",
"StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0):",
"percentage of the cycle (in number of steps) spent increasing",
"rate. gamma (float): Factor for changing learning rate. (default: 0.1)",
"optional): Determines the initial learning rate via initial_lr = max_lr",
"optimizer. max_lr (float): Upper learning rate boundary in the cycle.",
"will be will be reduced. (default: 10) verbose (bool, optional):",
"The percentage of the cycle (in number of steps) spent",
"cycle. epochs (int): The number of epochs to train for.",
"infer the total number of steps in the cycle. steps_per_epoch",
"(default: False) min_lr (float, optional): A scalar or a list",
"of scalars. A lower bound on the learning rate of",
"per epoch to train for. This is used along with",
"verbose (bool, optional): If True, prints a message to stdout",
"initial_lr = max_lr / div_factor. (default: 10.0) final_div_factor (float, optional):",
"(default: 0.1) last_epoch (int): The index of last epoch. (default:",
"max_lr / div_factor. (default: 10.0) final_div_factor (float, optional): Determines the",
"be reduced. (default: 0.1) patience (int, optional): Number of epoch",
"pct_start=0.5, div_factor=10.0, final_div_factor=10000 ): \"\"\"Create One Cycle Policy for Learning",
"StepLR: Learning rate scheduler. \"\"\" return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch)",
"optimizer (torch.optim): Model optimizer. max_lr (float): Upper learning rate boundary",
"Determines the minimum learning rate via min_lr = initial_lr /",
"gamma=0.1, last_epoch=-1): \"\"\"Create LR step scheduler. Args: optimizer (torch.optim): Model",
"order to infer the total number of steps in the",
"infer the total number of steps in the cycle. pct_start",
"(default: 10.0) final_div_factor (float, optional): Determines the minimum learning rate",
"final_div_factor (float, optional): Determines the minimum learning rate via min_lr"
] |
[
"zone.hotZone: hotAssemLocation = daZones.getZoneLocations(zone.name) hotAssem = r.core.getLocationContents( hotAssemLocation, assemblyLevel=True )[0]",
"= [5, 8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 2) zone",
"= self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0] ) # get assem from first zone",
"zonez[\"ring-1\"] self.assertEqual(len(zone), (5 * (5 - 1) + 1)) zone",
"hot zone can not be created from a single assembly",
"zones. hotCount = 0 normalCount = 0 for zone in",
"0.0 peakZonePFRatios = [] # Create a single assembly zone",
"2.0 (the \"License\"); # you may not use this file",
"at least it's being # tested properly now. self.assertEqual(len(zone), (9",
"7, 8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 3) zone =",
"zone a = self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0] ) # get assem from",
"in daZones: locs = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(locs, assemblyLevel=True) for",
"the power in the old zone and the new zone",
"0 normalCount = 0 for zone in daZones: if zone.hotZone:",
"* (5 - 1) + 1)) zone = zonez[\"ring-2\"] #",
"zones.buildZones(self.r.core, cs) for zone in daZones: a = self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone",
"and power. for zone in daZones: powerToFlow = [] zoneLocations",
"Flags from armi.reactor.tests import test_reactors from armi.utils import pathTools from",
"zoneName != diverseZone: daZones.removeZone(zoneName) # this should split diverseZone into",
"armi.utils import pathTools from armi.settings.fwSettings import globalSettings THIS_DIR = pathTools.armiAbsDirFromName(__name__)",
"zone: self.assertIn(aLoc, locs) def test_addRing(self): zone = zones.Zone(\"TestZone\") zone.addRing(5) self.assertIn(\"A5003\",",
"zone) zone.addRing(6, 3, 9) self.assertIn(\"A6003\", zone) self.assertIn(\"A6009\", zone) self.assertNotIn(\"A6002\", zone)",
"the only other remaining zone now self.assertEqual([\"ring-2\"], daZones.names) # if",
"the names ringzone0 and ringzone1 daZones = zones.buildZones(r.core, cs) daZones.removeZone(\"ring-1\")",
"\"\"\"Test for Zones\"\"\" import copy import unittest import armi from",
"reactors.Reactor(settings.getMasterCs(), bp) r.add(reactors.Core(\"Core\", settings.getMasterCs(), geom)) r.core.spatialGrid = grids.hexGridFromPitch(1.0) aList =",
"zone.addRing(6, 3, 9) self.assertIn(\"A6003\", zone) self.assertIn(\"A6009\", zone) self.assertNotIn(\"A6002\", zone) self.assertNotIn(\"A6010\",",
"= geometry.SystemLayoutInput() geom.symmetry = \"third core periodic\" r = reactors.Reactor(settings.getMasterCs(),",
"1) self.assertEqual(9, r.core.numRings) cs[\"ringZones\"] = [5, 8] zonez = zones.buildZones(r.core,",
"single assembly zone to verify that it will not create",
"for name in daZones.names: aZone = daZones[name] def test_findZoneAssemblyIsIn(self): cs",
"to verify that it will not create a hot zone",
"9) self.assertIn(\"A6003\", zone) self.assertIn(\"A6009\", zone) self.assertNotIn(\"A6002\", zone) self.assertNotIn(\"A6010\", zone) class",
"r = reactors.Reactor(settings.getMasterCs(), bp) r.add(reactors.Core(\"Core\", settings.getMasterCs(), geom)) r.core.spatialGrid = grids.hexGridFromPitch(1.0)",
"aList def test_addAssemblyLocations(self): zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for a in",
"# should contain a zone for every ring zone #",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"= [] originalPower = 0.0 peakZonePFRatios = [] # Create",
"# tested properly now. self.assertEqual(len(zone), (9 * (9 - 1)",
"1, 0] a.parent = r.core aList.append(a) self.aList = aList def",
"zone containing control and fuel assemblies. # Also test that",
"[5, 8] # produce 2 zones, with the names ringzone0",
"give a key error from the removed zone with self.assertRaises(KeyError):",
"cs[\"ringZones\"] = [5, 8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 2)",
"def test_buildRingZones(self): o, r = self.o, self.r cs = o.cs",
"= True zones.splitZones(r.core, cs, daZones) # test to make sure",
"least it's being # tested properly now. self.assertEqual(len(zone), (9 *",
"cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [] zonez",
"that the power in the old zone and the new",
"# Test that if a hot zone can not be",
"to make sure it iterates nicely. for aLoc in zone:",
"through our zones object for name in daZones.names: aZone =",
"asked for the last zone to to to 8, the",
"hotzone self.assertEqual(single.hotZone, False) # check that we have the correct",
"locs = [a.getLocation() for a in self.aList] zone = zones.Zone(\"TestZone\")",
"# Note that the actual number of rings in the",
"setUp(self): bp = blueprints.Blueprints() geom = geometry.SystemLayoutInput() geom.symmetry = \"third",
"= zones.buildZones(self.r.core, cs) for zone in daZones: a = self.r.core.getAssemblyWithStringLocation(zone.locList[0])",
"block zoneLocations = daZones.getZoneLocations(diverseZone) originalAssemblies = r.core.getLocationContents( zoneLocations, assemblyLevel=True )",
"for zoneName in zoneTup: if zoneName != diverseZone: daZones.removeZone(zoneName) #",
"type. cs[\"splitZones\"] = True zones.splitZones(r.core, cs, daZones) # test to",
"actual number of rings in the reactor model is 9.",
"key error from the removed zone with self.assertRaises(KeyError): daZones[\"ring-1\"] #",
"= zones.createHotZones(r.core, daZones) # Test that the hot zones have",
"zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 2) zone = zonez[\"ring-1\"] self.assertEqual(len(zone), (5 *",
"remaining zone now self.assertEqual([\"ring-2\"], daZones.names) # if indexed like a",
"assemblyLevel=True) for a in assems: assembliesAfterHotZoning.append(a) powerAfterHotZoning += a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning,",
"if __name__ == \"__main__\": # import sys;sys.argv = ['', 'Zones_InReactor.test_buildRingZones']",
"blueprints from armi.reactor import geometry from armi.reactor import grids from",
"use this file except in compliance with the License. #",
"names ringzone0 and ringzone1 daZones = zones.buildZones(r.core, cs) daZones.removeZone(\"ring-1\") #",
"# lets make one of the assemblies have an extra",
"not exist in a zone a = self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0] )",
"def test_removeZone(self): o, r = self.o, self.r cs = o.cs",
"zoneName in zoneTup: if zoneName != diverseZone: daZones.removeZone(zoneName) # this",
"location in a zone # Test to make sure createHotZones",
"Test that if a hot zone can not be created",
"# Ensure we can still iterate through our zones object",
"the highest p/f location in a zone # Test to",
"0 for zone in daZones: if zone.hotZone: hotAssemLocation = daZones.getZoneLocations(zone.name)",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"cs[\"ringZones\"] = [5, 7, 8] daZones = zones.buildZones(self.r.core, cs) for",
"make sure createHotZones can remove the peak assembly from that",
"test_reactors from armi.utils import pathTools from armi.settings.fwSettings import globalSettings THIS_DIR",
"import armi from armi import settings from armi.reactor import assemblies",
"License. # You may obtain a copy of the License",
"armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.utils import",
"# sure if this is behavior that we want to",
"30) # rings 8 and 9. See above comment def",
"one ring zone for this test, containing assemblies of different",
"self.assertEqual(9, r.core.numRings) cs[\"ringZones\"] = [5, 8] zonez = zones.buildZones(r.core, cs)",
"3, 4, 5, 6, 7, 8, 9] diverseZone = \"ring-4\"",
"make sure that we split the ring zone correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]),",
"[5, 7, 8] daZones = zones.buildZones(self.r.core, cs) for zone in",
"= [5, 7, 8] daZones = zones.buildZones(self.r.core, cs) for zone",
"assembly does not exist in a zone a = self.r.core.getAssemblyWithStringLocation(",
"under the License is distributed on an \"AS IS\" BASIS,",
"r.core.getLocationContents( hotAssemLocation, assemblyLevel=True )[0] self.assertEqual( peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\") / hotAssem[-1].p.THmassFlowRate, )",
"= power assemblyPower = a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate = flow powerToFlow.append(assemblyPower /",
"License for the specific language governing permissions and # limitations",
"from armi.reactor import zones from armi.reactor.flags import Flags from armi.reactor.tests",
"= aList def test_addAssemblyLocations(self): zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for a",
"aLoc in zone: self.assertIn(aLoc, locs) def test_addRing(self): zone = zones.Zone(\"TestZone\")",
"separate out assemblies with differing numbers of blocks. o, r",
"o.cs cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [1,",
"unittest import armi from armi import settings from armi.reactor import",
"self.assertEqual(single.hotZone, False) # check that we have the correct number",
"hotAssemLocation, assemblyLevel=True )[0] self.assertEqual( peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\") / hotAssem[-1].p.THmassFlowRate, ) i",
"that it will not create a hot zone single =",
"self.aList] zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for aLoc in zone: self.assertIn(aLoc,",
"self.assertIn(\"A5003\", zone) self.assertNotIn(\"A6002\", zone) zone.addRing(6, 3, 9) self.assertIn(\"A6003\", zone) self.assertIn(\"A6009\",",
"= r.core.getLocationContents( zoneLocations, assemblyLevel=True ) fuel = [a for a",
"indexed like a dict, the zones object should give a",
"assemblyPower = a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate = flow powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate) originalPower",
"and 9. See above comment def test_removeZone(self): o, r =",
"for Zones\"\"\" import copy import unittest import armi from armi",
"+= assemblyPower originalassemblies.append(a) power += 1 flow -= 1 peakZonePFRatios.append(max(powerToFlow))",
"False for hotzone self.assertEqual(single.hotZone, False) # check that we have",
"power assemblyPower = a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate = flow powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate)",
"cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [1, 2, 3, 4, 5,",
"Test that the hot zones have the peak P/F from",
"The names list should only house the only other remaining",
"daZones.removeZone( daZones.names[0] ) # remove a zone to ensure that",
"cs) self.assertEqual(len(list(zonez)), 3) zone = zonez[\"ring-3\"] self.assertEqual(len(zone), 30) # rings",
"1) - (5 * (5 - 1) + 1)) cs[\"ringZones\"]",
"hotCount += 1 else: normalCount += 1 self.assertEqual(hotCount, 1) self.assertEqual(normalCount,",
"a warning if the assembly does not exist in a",
"armi.reactor.tests import test_reactors from armi.utils import pathTools from armi.settings.fwSettings import",
"0 for zone in daZones: if zone.hotZone: hotCount += 1",
"ring zone for this test, containing assemblies of different types.",
"self.assertIn(\"A6003\", zone) self.assertIn(\"A6009\", zone) self.assertNotIn(\"A6002\", zone) self.assertNotIn(\"A6010\", zone) class Zones_InReactor(unittest.TestCase):",
"(5 - 1) + 1)) zone = zonez[\"ring-2\"] # Note",
"zone = zonez[\"ring-2\"] # Note that the actual number of",
"zones object for name in daZones.names: aZone = daZones[name] def",
"# asked for the last zone to to to 8,",
"a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning, originalPower) self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies)) # check that the original",
"self.assertNotIn(\"A6002\", zone) self.assertNotIn(\"A6010\", zone) class Zones_InReactor(unittest.TestCase): def setUp(self): self.o, self.r",
"containing control and fuel assemblies. # Also test that we",
"settings.getMasterCs(), geom)) r.core.spatialGrid = grids.hexGridFromPitch(1.0) aList = [] for ring",
"identifies the highest p/f location in a zone # Test",
"assemblyLevel=True) power = 300.0 flow = 300.0 for a in",
"conserved and that we did not lose any assemblies for",
"if we get a none and a warning if the",
"in compliance with the License. # You may obtain a",
"daZones = zones.createHotZones(r.core, daZones) # Test that the hot zones",
"zone in daZones: if zone.hotZone: hotAssemLocation = daZones.getZoneLocations(zone.name) hotAssem =",
"assems: assembliesAfterHotZoning.append(a) powerAfterHotZoning += a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning, originalPower) self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies)) #",
"self.assertEqual(normalCount, 2) if __name__ == \"__main__\": # import sys;sys.argv =",
"reactor model is 9. Even though we # asked for",
"get a none and a warning if the assembly does",
"software # distributed under the License is distributed on an",
"import Flags from armi.reactor.tests import test_reactors from armi.utils import pathTools",
"fuel assemblies. # Also test that we can separate out",
"assemblies of different types. zoneTup = tuple(daZones.names) for zoneName in",
"one of the assemblies have an extra block zoneLocations =",
"self.assertRaises(KeyError): daZones[\"ring-1\"] # Ensure we can still iterate through our",
"zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 2) zone = zonez[\"ring-1\"] self.assertEqual(len(zone),",
"assems = r.core.getLocationContents(locs, assemblyLevel=True) for a in assems: assembliesAfterHotZoning.append(a) powerAfterHotZoning",
"of the assemblies have an extra block zoneLocations = daZones.getZoneLocations(diverseZone)",
"# Test that the hot zones have the peak P/F",
"sure it iterates nicely. for aLoc in zone: self.assertIn(aLoc, locs)",
"with the names ringzone0 and ringzone1 daZones = zones.buildZones(r.core, cs)",
"have an extra block zoneLocations = daZones.getZoneLocations(diverseZone) originalAssemblies = r.core.getLocationContents(",
"lets test if we get a none and a warning",
"zoneLocations = daZones.getZoneLocations(diverseZone) originalAssemblies = r.core.getLocationContents( zoneLocations, assemblyLevel=True ) fuel",
"verify that it will not create a hot zone single",
"daZones: powerToFlow = [] zoneLocations = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(zoneLocations,",
"(9 * (9 - 1) + 1) - (5 *",
"= zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for aLoc in zone: self.assertIn(aLoc, locs) #",
"flow -= 1 peakZonePFRatios.append(max(powerToFlow)) daZones = zones.createHotZones(r.core, daZones) # Test",
"7, 8] daZones = zones.buildZones(self.r.core, cs) for zone in daZones:",
"we can still iterate through our zones object for name",
"flow powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate) originalPower += assemblyPower originalassemblies.append(a) power +=",
"single = zones.Zone(\"single\") daZones.add(single) aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc) # Set",
"a = self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone = daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone, zone) # lets",
"a[-1].p.THmassFlowRate = flow powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate) originalPower += assemblyPower originalassemblies.append(a)",
"# Test to make sure that we can split a",
"for a in assems: assembliesAfterHotZoning.append(a) powerAfterHotZoning += a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning, originalPower)",
"originalassemblies = [] originalPower = 0.0 peakZonePFRatios = [] #",
"2019 TerraPower, LLC # # Licensed under the Apache License,",
"in zoneTup: if zoneName != diverseZone: daZones.removeZone(zoneName) # this should",
"in daZones: powerToFlow = [] zoneLocations = daZones.getZoneLocations(zone.name) assems =",
"not be created from a single assembly zone. o, r",
"aLoc in zone: self.assertIn(aLoc, locs) # loop twice to make",
"of hot and normal zones. hotCount = 0 normalCount =",
"bp) r.add(reactors.Core(\"Core\", settings.getMasterCs(), geom)) r.core.spatialGrid = grids.hexGridFromPitch(1.0) aList = []",
"self.aList = aList def test_addAssemblyLocations(self): zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for",
"cs) for zone in daZones: a = self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone =",
"zone.addAssemblyLocations(self.aList) for aLoc in zone: self.assertIn(aLoc, locs) # loop twice",
"lose any assemblies for zone in daZones: locs = daZones.getZoneLocations(zone.name)",
"zonez[\"ring-3\"] self.assertEqual(len(zone), 30) # rings 8 and 9. See above",
"zone # Test to make sure createHotZones can remove the",
"daZones.add(single) aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc) # Set power and flow.",
"pathTools from armi.settings.fwSettings import globalSettings THIS_DIR = pathTools.armiAbsDirFromName(__name__) class Zone_TestCase(unittest.TestCase):",
"def setUp(self): self.o, self.r = test_reactors.loadTestReactor() def test_buildRingZones(self): o, r",
"zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for a in self.aList: self.assertIn(a.getLocation(), zone)",
"peakZonePFRatios.append(max(powerToFlow)) daZones = zones.createHotZones(r.core, daZones) # Test that the hot",
"hotAssem.calcTotalParam(\"power\") / hotAssem[-1].p.THmassFlowRate, ) i += 1 powerAfterHotZoning = 0.0",
"daZones.removeZone(zoneName) # this should split diverseZone into multiple zones by",
"originalassemblies.append(a) power += 1 flow -= 1 peakZonePFRatios.append(max(powerToFlow)) daZones =",
"and flow. # Also gather channel peak P/F ratios, assemblies",
"def test_findZoneAssemblyIsIn(self): cs = self.o.cs cs[\"ringZones\"] = [5, 7, 8]",
"zone) self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList) def test_iteration(self): locs = [a.getLocation() for",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"zoneLocations, assemblyLevel=True ) fuel = [a for a in originalAssemblies",
"power = 300.0 flow = 300.0 for a in assems:",
"not have a zone anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a), None) class Zones_InRZReactor(unittest.TestCase): def",
"zone for every ring zone # we only want one",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"every ring zone # we only want one ring zone",
"= [] zoneLocations = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True) power",
"= a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate = flow powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate) originalPower +=",
"self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies)) # check that the original zone with 1",
"make sure createHotZones identifies the highest p/f location in a",
"test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\"",
"Also gather channel peak P/F ratios, assemblies and power. for",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"if a.hasFlags(Flags.FUEL)][0] newBlock = copy.deepcopy(fuel[-1]) fuel.add(newBlock) # should contain a",
"in assems: a.getFirstBlock().p.power = power assemblyPower = a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate =",
"= blueprints.Blueprints() geom = geometry.SystemLayoutInput() geom.symmetry = \"third core periodic\"",
"zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 1) self.assertEqual(9, r.core.numRings) cs[\"ringZones\"] = [5, 8]",
"ring in range(10): a = assemblies.HexAssembly(\"fuel\") a.spatialLocator = r.core.spatialGrid[ring, 1,",
"to in writing, software # distributed under the License is",
"8] # produce 2 zones, with the names ringzone0 and",
"conserved. # Test that if a hot zone can not",
"for a in assems: a.getFirstBlock().p.power = power assemblyPower = a.calcTotalParam(\"power\")",
"= False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [9] # build",
"__name__ == \"__main__\": # import sys;sys.argv = ['', 'Zones_InReactor.test_buildRingZones'] unittest.main()",
"comment def test_removeZone(self): o, r = self.o, self.r cs =",
"self.assertIn(aLoc, locs) def test_addRing(self): zone = zones.Zone(\"TestZone\") zone.addRing(5) self.assertIn(\"A5003\", zone)",
"# See the License for the specific language governing permissions",
"1)) zone = zonez[\"ring-2\"] # Note that the actual number",
"for aLoc in zone: self.assertIn(aLoc, locs) def test_addRing(self): zone =",
"copy import unittest import armi from armi import settings from",
"multiple zones by nodalization type. cs[\"splitZones\"] = True zones.splitZones(r.core, cs,",
"a.getFirstBlock().p.power = power assemblyPower = a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate = flow powerToFlow.append(assemblyPower",
"aList.append(a) self.aList = aList def test_addAssemblyLocations(self): zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList)",
"to to to 8, the zone engine should bump it",
"peak P/F ratios, assemblies and power. for zone in daZones:",
")[0] self.assertEqual( peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\") / hotAssem[-1].p.THmassFlowRate, ) i += 1",
"names list should only house the only other remaining zone",
"or agreed to in writing, software # distributed under the",
"flow = 300.0 for a in assems: a.getFirstBlock().p.power = power",
"this test, containing assemblies of different types. zoneTup = tuple(daZones.names)",
"required by applicable law or agreed to in writing, software",
"r.add(reactors.Core(\"Core\", settings.getMasterCs(), geom)) r.core.spatialGrid = grids.hexGridFromPitch(1.0) aList = [] for",
"assembly from that zone and place it in a new",
"1 self.assertEqual(hotCount, 1) self.assertEqual(normalCount, 2) if __name__ == \"__main__\": #",
"and normal zones. hotCount = 0 normalCount = 0 for",
"assem from first zone daZones.removeZone( daZones.names[0] ) # remove a",
"iterates nicely. for aLoc in zone: self.assertIn(aLoc, locs) def test_addRing(self):",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"Zones_InReactor(unittest.TestCase): def setUp(self): self.o, self.r = test_reactors.loadTestReactor() def test_buildRingZones(self): o,",
"that we want to preserve, but at least it's being",
"powerToFlow = [] zoneLocations = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True)",
"zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 1) self.assertEqual(9, r.core.numRings) cs[\"ringZones\"] =",
"with the License. # You may obtain a copy of",
"governing permissions and # limitations under the License. \"\"\"Test for",
"zones object should give a key error from the removed",
"geometry.SystemLayoutInput() geom.symmetry = \"third core periodic\" r = reactors.Reactor(settings.getMasterCs(), bp)",
"house the only other remaining zone now self.assertEqual([\"ring-2\"], daZones.names) #",
"a.parent = r.core aList.append(a) self.aList = aList def test_addAssemblyLocations(self): zone",
"0.0 assembliesAfterHotZoning = [] # Check that power is conserved",
"r.core aList.append(a) self.aList = aList def test_addAssemblyLocations(self): zone = zones.Zone(\"TestZone\")",
"\"byRingZone\" cs[\"ringZones\"] = [1, 2, 3, 4, 5, 6, 7,",
"[] # Check that power is conserved and that we",
"from armi.reactor import reactors from armi.reactor import zones from armi.reactor.flags",
"zone for this test, containing assemblies of different types. zoneTup",
"# Create a single assembly zone to verify that it",
"channels i = 0 for zone in daZones: if zone.hotZone:",
"= zones.Zone(\"TestZone\") zone.addRing(5) self.assertIn(\"A5003\", zone) self.assertNotIn(\"A6002\", zone) zone.addRing(6, 3, 9)",
"zoneTup: if zoneName != diverseZone: daZones.removeZone(zoneName) # this should split",
"LLC # # Licensed under the Apache License, Version 2.0",
"for the last zone to to to 8, the zone",
"cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [1, 2,",
"= \"byRingZone\" cs[\"ringZones\"] = [] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)),",
"= \"byRingZone\" cs[\"ringZones\"] = [5, 8] # produce 2 zones,",
"daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True) power = 300.0 flow =",
"False) # check that we have the correct number of",
"r = self.o, self.r cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\"",
"cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [5, 8]",
"should split diverseZone into multiple zones by nodalization type. cs[\"splitZones\"]",
"compliance with the License. # You may obtain a copy",
"zones.Zone(\"TestZone\") zone.addRing(5) self.assertIn(\"A5003\", zone) self.assertNotIn(\"A6002\", zone) zone.addRing(6, 3, 9) self.assertIn(\"A6003\",",
"agreed to in writing, software # distributed under the License",
"a in self.aList] zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for aLoc in",
"class Zone_TestCase(unittest.TestCase): def setUp(self): bp = blueprints.Blueprints() geom = geometry.SystemLayoutInput()",
"the License. \"\"\"Test for Zones\"\"\" import copy import unittest import",
"= zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 2) zone = zonez[\"ring-1\"] self.assertEqual(len(zone), (5",
"* (9 - 1) + 1) - (5 * (5",
"distributed under the License is distributed on an \"AS IS\"",
"error from the removed zone with self.assertRaises(KeyError): daZones[\"ring-1\"] # Ensure",
"class Zones_InRZReactor(unittest.TestCase): def test_splitZones(self): # Test to make sure that",
"numbers of blocks. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs",
"only want one ring zone for this test, containing assemblies",
"zone.addAssemblyLocations(self.aList) for a in self.aList: self.assertIn(a.getLocation(), zone) self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList)",
"/ hotAssem[-1].p.THmassFlowRate, ) i += 1 powerAfterHotZoning = 0.0 assembliesAfterHotZoning",
"make sure it iterates nicely. for aLoc in zone: self.assertIn(aLoc,",
"zone) class Zones_InReactor(unittest.TestCase): def setUp(self): self.o, self.r = test_reactors.loadTestReactor() def",
"and place it in a new zone # Test that",
"flow. # Also gather channel peak P/F ratios, assemblies and",
"+ 1) - (5 * (5 - 1) + 1))",
"to to 8, the zone engine should bump it out.",
"reactors from armi.reactor import zones from armi.reactor.flags import Flags from",
"express or implied. # See the License for the specific",
"= daZones.getZoneLocations(zone.name) hotAssem = r.core.getLocationContents( hotAssemLocation, assemblyLevel=True )[0] self.assertEqual( peakZonePFRatios[i],",
"assemblies.HexAssembly(\"fuel\") a.spatialLocator = r.core.spatialGrid[ring, 1, 0] a.parent = r.core aList.append(a)",
"in the old zone and the new zone is conserved.",
"except in compliance with the License. # You may obtain",
"= [9] # build one giant zone r.core.buildZones(cs) daZones =",
"hot zone single = zones.Zone(\"single\") daZones.add(single) aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc)",
"in daZones: a = self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone = daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone, zone)",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"Test to make sure createHotZones can remove the peak assembly",
"we only want one ring zone for this test, containing",
"# Check that power is conserved and that we did",
"cs) daZones.removeZone(\"ring-1\") # The names list should only house the",
"not use this file except in compliance with the License.",
"armi.reactor import grids from armi.reactor import reactors from armi.reactor import",
"3) zone = zonez[\"ring-3\"] self.assertEqual(len(zone), 30) # rings 8 and",
"\"ring-4\" r.core.buildZones(cs) daZones = r.core.zones # lets make one of",
"TerraPower, LLC # # Licensed under the Apache License, Version",
"= \"third core periodic\" r = reactors.Reactor(settings.getMasterCs(), bp) r.add(reactors.Core(\"Core\", settings.getMasterCs(),",
"from the removed zone with self.assertRaises(KeyError): daZones[\"ring-1\"] # Ensure we",
"\"byRingZone\" cs[\"ringZones\"] = [9] # build one giant zone r.core.buildZones(cs)",
"daZones) # Test that the hot zones have the peak",
"# remove a zone to ensure that our assem does",
"- 1) + 1)) cs[\"ringZones\"] = [5, 7, 8] zonez",
"writing, software # distributed under the License is distributed on",
"extra block zoneLocations = daZones.getZoneLocations(diverseZone) originalAssemblies = r.core.getLocationContents( zoneLocations, assemblyLevel=True",
"a zone for every ring zone # we only want",
"peakZonePFRatios = [] # Create a single assembly zone to",
"we did not lose any assemblies for zone in daZones:",
"+= a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning, originalPower) self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies)) # check that the",
"[a for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0] newBlock = copy.deepcopy(fuel[-1])",
"you may not use this file except in compliance with",
"split the ring zone correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]),",
"Test that the power in the old zone and the",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"out assemblies with differing numbers of blocks. o, r =",
"and the new zone is conserved. # Test that if",
"for every ring zone # we only want one ring",
"hot zones have the peak P/F from the host channels",
"= o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [5, 8] #",
"the assembly does not exist in a zone a =",
"differing numbers of blocks. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs =",
"normal zones. hotCount = 0 normalCount = 0 for zone",
"test_addAssemblyLocations(self): zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for a in self.aList: self.assertIn(a.getLocation(),",
"import unittest import armi from armi import settings from armi.reactor",
"other remaining zone now self.assertEqual([\"ring-2\"], daZones.names) # if indexed like",
"that we did not lose any assemblies for zone in",
"1)) cs[\"ringZones\"] = [5, 7, 8] zonez = zones.buildZones(r.core, cs)",
"one giant zone r.core.buildZones(cs) daZones = r.core.zones originalassemblies = []",
"in a new zone # Test that the power in",
"= self.o, self.r cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"]",
"[] originalPower = 0.0 peakZonePFRatios = [] # Create a",
"zoneTup = tuple(daZones.names) for zoneName in zoneTup: if zoneName !=",
"zonez[\"ring-2\"] # Note that the actual number of rings in",
"is conserved and that we did not lose any assemblies",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"Note that the actual number of rings in the reactor",
"if zone.hotZone: hotAssemLocation = daZones.getZoneLocations(zone.name) hotAssem = r.core.getLocationContents( hotAssemLocation, assemblyLevel=True",
"- 1) + 1) - (5 * (5 - 1)",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"self.assertEqual(len(list(zonez)), 2) zone = zonez[\"ring-1\"] self.assertEqual(len(zone), (5 * (5 -",
"behavior that we want to preserve, but at least it's",
"new zone # Test that the power in the old",
"zones.createHotZones(r.core, daZones) # Test that the hot zones have the",
") # get assem from first zone daZones.removeZone( daZones.names[0] )",
"still iterate through our zones object for name in daZones.names:",
"can remove the peak assembly from that zone and place",
"r.core.buildZones(cs) daZones = r.core.zones originalassemblies = [] originalPower = 0.0",
"with self.assertRaises(KeyError): daZones[\"ring-1\"] # Ensure we can still iterate through",
"test that we can separate out assemblies with differing numbers",
"our assem does not have a zone anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a), None)",
"import blueprints from armi.reactor import geometry from armi.reactor import grids",
"in the reactor model is 9. Even though we #",
"warning if the assembly does not exist in a zone",
"= [5, 8] # produce 2 zones, with the names",
"for a in self.aList] zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for aLoc",
"have the correct number of hot and normal zones. hotCount",
"cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [] zonez = zones.buildZones(r.core, cs)",
"self.assertNotIn(\"A6002\", zone) zone.addRing(6, 3, 9) self.assertIn(\"A6003\", zone) self.assertIn(\"A6009\", zone) self.assertNotIn(\"A6002\",",
"peak assembly from that zone and place it in a",
"5, 6, 7, 8, 9] diverseZone = \"ring-4\" r.core.buildZones(cs) daZones",
"self.r cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [5,",
"we can separate out assemblies with differing numbers of blocks.",
"r.core.getLocationContents( zoneLocations, assemblyLevel=True ) fuel = [a for a in",
"ring zone correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1) def",
"assemblies and power. for zone in daZones: powerToFlow = []",
"\"third core periodic\" r = reactors.Reactor(settings.getMasterCs(), bp) r.add(reactors.Core(\"Core\", settings.getMasterCs(), geom))",
"# Also gather channel peak P/F ratios, assemblies and power.",
"def test_splitZones(self): # Test to make sure that we can",
"the hot zones have the peak P/F from the host",
"to 8, the zone engine should bump it out. Not",
"cs[\"ringZones\"] = [] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 1) self.assertEqual(9,",
"= 0 normalCount = 0 for zone in daZones: if",
"peak P/F from the host channels i = 0 for",
"assemblies. # Also test that we can separate out assemblies",
"we # asked for the last zone to to to",
"zone # Test that the power in the old zone",
"nodalization type. cs[\"splitZones\"] = True zones.splitZones(r.core, cs, daZones) # test",
"to make sure createHotZones identifies the highest p/f location in",
"self.assertEqual(len(zone), 30) # rings 8 and 9. See above comment",
"powerAfterHotZoning = 0.0 assembliesAfterHotZoning = [] # Check that power",
"= r.core aList.append(a) self.aList = aList def test_addAssemblyLocations(self): zone =",
"a zone a = self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0] ) # get assem",
"zone daZones.removeZone( daZones.names[0] ) # remove a zone to ensure",
"ringzone1 daZones = zones.buildZones(r.core, cs) daZones.removeZone(\"ring-1\") # The names list",
"else: normalCount += 1 self.assertEqual(hotCount, 1) self.assertEqual(normalCount, 2) if __name__",
"geom.symmetry = \"third core periodic\" r = reactors.Reactor(settings.getMasterCs(), bp) r.add(reactors.Core(\"Core\",",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"[] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 1) self.assertEqual(9, r.core.numRings) cs[\"ringZones\"]",
"that we can split a zone containing control and fuel",
"of different types. zoneTup = tuple(daZones.names) for zoneName in zoneTup:",
"zones by nodalization type. cs[\"splitZones\"] = True zones.splitZones(r.core, cs, daZones)",
"from armi.reactor import blueprints from armi.reactor import geometry from armi.reactor",
"the License is distributed on an \"AS IS\" BASIS, #",
"number of rings in the reactor model is 9. Even",
"but at least it's being # tested properly now. self.assertEqual(len(zone),",
"remove the peak assembly from that zone and place it",
"# The names list should only house the only other",
"from armi.reactor import geometry from armi.reactor import grids from armi.reactor",
"object should give a key error from the removed zone",
"core periodic\" r = reactors.Reactor(settings.getMasterCs(), bp) r.add(reactors.Core(\"Core\", settings.getMasterCs(), geom)) r.core.spatialGrid",
"types. zoneTup = tuple(daZones.names) for zoneName in zoneTup: if zoneName",
"zones have the peak P/F from the host channels i",
"for hotzone self.assertEqual(single.hotZone, False) # check that we have the",
"that zone and place it in a new zone #",
"6, 7, 8, 9] diverseZone = \"ring-4\" r.core.buildZones(cs) daZones =",
"a new zone # Test that the power in the",
"a[-1].p.THmassFlowRate) originalPower += assemblyPower originalassemblies.append(a) power += 1 flow -=",
"into multiple zones by nodalization type. cs[\"splitZones\"] = True zones.splitZones(r.core,",
"in range(10): a = assemblies.HexAssembly(\"fuel\") a.spatialLocator = r.core.spatialGrid[ring, 1, 0]",
"self.assertEqual(len(list(zonez)), 3) zone = zonez[\"ring-3\"] self.assertEqual(len(zone), 30) # rings 8",
"in self.aList: self.assertIn(a.getLocation(), zone) self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList) def test_iteration(self): locs",
"does not exist in a zone a = self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0]",
"Not # sure if this is behavior that we want",
"Test to make sure createHotZones identifies the highest p/f location",
"copy.deepcopy(fuel[-1]) fuel.add(newBlock) # should contain a zone for every ring",
"removed zone with self.assertRaises(KeyError): daZones[\"ring-1\"] # Ensure we can still",
"the peak assembly from that zone and place it in",
"assemblies from armi.reactor import blueprints from armi.reactor import geometry from",
"- (5 * (5 - 1) + 1)) cs[\"ringZones\"] =",
"assem does not have a zone anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a), None) class",
"1) self.assertEqual(normalCount, 2) if __name__ == \"__main__\": # import sys;sys.argv",
"armi.reactor import blueprints from armi.reactor import geometry from armi.reactor import",
"diverseZone = \"ring-4\" r.core.buildZones(cs) daZones = r.core.zones # lets make",
"the old zone and the new zone is conserved. #",
"with differing numbers of blocks. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs",
"assemblyLevel=True )[0] self.assertEqual( peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\") / hotAssem[-1].p.THmassFlowRate, ) i +=",
"createHotZones identifies the highest p/f location in a zone #",
"daZones: a = self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone = daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone, zone) #",
"law or agreed to in writing, software # distributed under",
"zone) # lets test if we get a none and",
"daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(locs, assemblyLevel=True) for a in assems: assembliesAfterHotZoning.append(a)",
"# rings 8 and 9. See above comment def test_removeZone(self):",
"a hot zone single = zones.Zone(\"single\") daZones.add(single) aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation()",
"grids.hexGridFromPitch(1.0) aList = [] for ring in range(10): a =",
"= reactors.Reactor(settings.getMasterCs(), bp) r.add(reactors.Core(\"Core\", settings.getMasterCs(), geom)) r.core.spatialGrid = grids.hexGridFromPitch(1.0) aList",
"a key error from the removed zone with self.assertRaises(KeyError): daZones[\"ring-1\"]",
"limitations under the License. \"\"\"Test for Zones\"\"\" import copy import",
"daZones[name] def test_findZoneAssemblyIsIn(self): cs = self.o.cs cs[\"ringZones\"] = [5, 7,",
"split a zone containing control and fuel assemblies. # Also",
"did not lose any assemblies for zone in daZones: locs",
"= 0.0 peakZonePFRatios = [] # Create a single assembly",
"it out. Not # sure if this is behavior that",
"giant zone r.core.buildZones(cs) daZones = r.core.zones originalassemblies = [] originalPower",
"(9 - 1) + 1) - (5 * (5 -",
"zone.addRing(5) self.assertIn(\"A5003\", zone) self.assertNotIn(\"A6002\", zone) zone.addRing(6, 3, 9) self.assertIn(\"A6003\", zone)",
"(5 - 1) + 1)) cs[\"ringZones\"] = [5, 7, 8]",
"cs[\"ringZones\"] = [5, 7, 8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)),",
"P/F from the host channels i = 0 for zone",
"hotAssem = r.core.getLocationContents( hotAssemLocation, assemblyLevel=True )[0] self.assertEqual( peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\") /",
"r.core.zones # lets make one of the assemblies have an",
"daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone, zone) # lets test if we get a",
"test_addRing(self): zone = zones.Zone(\"TestZone\") zone.addRing(5) self.assertIn(\"A5003\", zone) self.assertNotIn(\"A6002\", zone) zone.addRing(6,",
"any assemblies for zone in daZones: locs = daZones.getZoneLocations(zone.name) assems",
"8, 9] diverseZone = \"ring-4\" r.core.buildZones(cs) daZones = r.core.zones #",
"self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1) def test_createHotZones(self): # Test",
"for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0] newBlock = copy.deepcopy(fuel[-1]) fuel.add(newBlock)",
"channel has False for hotzone self.assertEqual(single.hotZone, False) # check that",
"False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [1, 2, 3, 4,",
"blocks. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs cs[\"splitZones\"] =",
"assemblies have an extra block zoneLocations = daZones.getZoneLocations(diverseZone) originalAssemblies =",
"= [a.getLocation() for a in self.aList] zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList)",
"a single assembly zone. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs =",
"from a single assembly zone. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs",
"may obtain a copy of the License at # #",
"Also test that we can separate out assemblies with differing",
"can not be created from a single assembly zone. o,",
"Zone_TestCase(unittest.TestCase): def setUp(self): bp = blueprints.Blueprints() geom = geometry.SystemLayoutInput() geom.symmetry",
"= \"byRingZone\" cs[\"ringZones\"] = [1, 2, 3, 4, 5, 6,",
"that if a hot zone can not be created from",
"# Test to make sure createHotZones can remove the peak",
"control and fuel assemblies. # Also test that we can",
"daZones) # test to make sure that we split the",
"i = 0 for zone in daZones: if zone.hotZone: hotAssemLocation",
"daZones: if zone.hotZone: hotAssemLocation = daZones.getZoneLocations(zone.name) hotAssem = r.core.getLocationContents( hotAssemLocation,",
"assembly zone. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs cs[\"splitZones\"]",
"zone with self.assertRaises(KeyError): daZones[\"ring-1\"] # Ensure we can still iterate",
"= \"ring-4\" r.core.buildZones(cs) daZones = r.core.zones # lets make one",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"Even though we # asked for the last zone to",
"a.hasFlags(Flags.FUEL)][0] newBlock = copy.deepcopy(fuel[-1]) fuel.add(newBlock) # should contain a zone",
"= copy.deepcopy(fuel[-1]) fuel.add(newBlock) # should contain a zone for every",
"locs) def test_addRing(self): zone = zones.Zone(\"TestZone\") zone.addRing(5) self.assertIn(\"A5003\", zone) self.assertNotIn(\"A6002\",",
"daZones = zones.buildZones(r.core, cs) daZones.removeZone(\"ring-1\") # The names list should",
"cs[\"ringZones\"] = [9] # build one giant zone r.core.buildZones(cs) daZones",
"import test_reactors from armi.utils import pathTools from armi.settings.fwSettings import globalSettings",
"r.core.getLocationContents(zoneLocations, assemblyLevel=True) power = 300.0 flow = 300.0 for a",
"check that we have the correct number of hot and",
"from armi.reactor import grids from armi.reactor import reactors from armi.reactor",
"may not use this file except in compliance with the",
"it iterates nicely. for aLoc in zone: self.assertIn(aLoc, locs) def",
"def test_iteration(self): locs = [a.getLocation() for a in self.aList] zone",
"name in daZones.names: aZone = daZones[name] def test_findZoneAssemblyIsIn(self): cs =",
"assems: a.getFirstBlock().p.power = power assemblyPower = a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate = flow",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"o.cs cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [9]",
"daZones[\"ring-1\"] # Ensure we can still iterate through our zones",
"this file except in compliance with the License. # You",
"none and a warning if the assembly does not exist",
"zone # we only want one ring zone for this",
"that we have the correct number of hot and normal",
"should give a key error from the removed zone with",
"/ a[-1].p.THmassFlowRate) originalPower += assemblyPower originalassemblies.append(a) power += 1 flow",
"a = assemblies.HexAssembly(\"fuel\") a.spatialLocator = r.core.spatialGrid[ring, 1, 0] a.parent =",
"in self.aList] zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for aLoc in zone:",
"zone and place it in a new zone # Test",
"daZones.getZoneLocations(zone.name) hotAssem = r.core.getLocationContents( hotAssemLocation, assemblyLevel=True )[0] self.assertEqual( peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\")",
"under the License. \"\"\"Test for Zones\"\"\" import copy import unittest",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"= daZones[name] def test_findZoneAssemblyIsIn(self): cs = self.o.cs cs[\"ringZones\"] = [5,",
"out. Not # sure if this is behavior that we",
"zone. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs cs[\"splitZones\"] =",
"# # Licensed under the Apache License, Version 2.0 (the",
"armi.reactor import zones from armi.reactor.flags import Flags from armi.reactor.tests import",
"if this is behavior that we want to preserve, but",
"+= 1 self.assertEqual(hotCount, 1) self.assertEqual(normalCount, 2) if __name__ == \"__main__\":",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"a = self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0] ) # get assem from first",
"the zone engine should bump it out. Not # sure",
"globalSettings THIS_DIR = pathTools.armiAbsDirFromName(__name__) class Zone_TestCase(unittest.TestCase): def setUp(self): bp =",
"can split a zone containing control and fuel assemblies. #",
"the assemblies have an extra block zoneLocations = daZones.getZoneLocations(diverseZone) originalAssemblies",
"has False for hotzone self.assertEqual(single.hotZone, False) # check that we",
"= zones.Zone(\"single\") daZones.add(single) aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc) # Set power",
"3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1) def test_createHotZones(self): # Test to make sure",
"a hot zone can not be created from a single",
"the correct number of hot and normal zones. hotCount =",
"sure createHotZones can remove the peak assembly from that zone",
"zone to ensure that our assem does not have a",
"like a dict, the zones object should give a key",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"self.assertEqual(aZone, zone) # lets test if we get a none",
"to ensure that our assem does not have a zone",
"gather channel peak P/F ratios, assemblies and power. for zone",
"Copyright 2019 TerraPower, LLC # # Licensed under the Apache",
"locs = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(locs, assemblyLevel=True) for a in",
"grids from armi.reactor import reactors from armi.reactor import zones from",
"in a zone a = self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0] ) # get",
"will not create a hot zone single = zones.Zone(\"single\") daZones.add(single)",
"bp = blueprints.Blueprints() geom = geometry.SystemLayoutInput() geom.symmetry = \"third core",
"r.core.getLocationContents(locs, assemblyLevel=True) for a in assems: assembliesAfterHotZoning.append(a) powerAfterHotZoning += a.calcTotalParam(\"power\")",
"being # tested properly now. self.assertEqual(len(zone), (9 * (9 -",
"= zonez[\"ring-2\"] # Note that the actual number of rings",
"= [5, 7, 8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 3)",
"1) + 1)) zone = zonez[\"ring-2\"] # Note that the",
"- 1) + 1)) zone = zonez[\"ring-2\"] # Note that",
"sure createHotZones identifies the highest p/f location in a zone",
"ringzone0 and ringzone1 daZones = zones.buildZones(r.core, cs) daZones.removeZone(\"ring-1\") # The",
"aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc) # Set power and flow. #",
"def test_addAssemblyLocations(self): zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for a in self.aList:",
"in daZones: if zone.hotZone: hotCount += 1 else: normalCount +=",
"originalAssemblies if a.hasFlags(Flags.FUEL)][0] newBlock = copy.deepcopy(fuel[-1]) fuel.add(newBlock) # should contain",
"= \"byRingZone\" cs[\"ringZones\"] = [9] # build one giant zone",
"self.assertEqual(len(zone), (9 * (9 - 1) + 1) - (5",
"hotCount = 0 normalCount = 0 for zone in daZones:",
"and ringzone1 daZones = zones.buildZones(r.core, cs) daZones.removeZone(\"ring-1\") # The names",
"zone r.core.buildZones(cs) daZones = r.core.zones originalassemblies = [] originalPower =",
"a in assems: assembliesAfterHotZoning.append(a) powerAfterHotZoning += a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning, originalPower) self.assertEqual(len(assembliesAfterHotZoning),",
"zone and the new zone is conserved. # Test that",
"cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [9] #",
"have a zone anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a), None) class Zones_InRZReactor(unittest.TestCase): def test_splitZones(self):",
"cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [9] # build one giant",
"+= 1 flow -= 1 peakZonePFRatios.append(max(powerToFlow)) daZones = zones.createHotZones(r.core, daZones)",
"locs) # loop twice to make sure it iterates nicely.",
") i += 1 powerAfterHotZoning = 0.0 assembliesAfterHotZoning = []",
"zone engine should bump it out. Not # sure if",
"cs[\"ringZones\"] = [1, 2, 3, 4, 5, 6, 7, 8,",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"self.assertIn(aLoc, locs) # loop twice to make sure it iterates",
"import settings from armi.reactor import assemblies from armi.reactor import blueprints",
"powerAfterHotZoning += a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning, originalPower) self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies)) # check that",
"= zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 1) self.assertEqual(9, r.core.numRings) cs[\"ringZones\"] = [5,",
"cs) self.assertEqual(len(list(zonez)), 2) zone = zonez[\"ring-1\"] self.assertEqual(len(zone), (5 * (5",
"# Copyright 2019 TerraPower, LLC # # Licensed under the",
"geom)) r.core.spatialGrid = grids.hexGridFromPitch(1.0) aList = [] for ring in",
"Create a single assembly zone to verify that it will",
"the peak P/F from the host channels i = 0",
"newBlock = copy.deepcopy(fuel[-1]) fuel.add(newBlock) # should contain a zone for",
"zone now self.assertEqual([\"ring-2\"], daZones.names) # if indexed like a dict,",
"= flow powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate) originalPower += assemblyPower originalassemblies.append(a) power",
"to make sure that we split the ring zone correctly",
"zones from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from",
"self.assertEqual(powerAfterHotZoning, originalPower) self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies)) # check that the original zone",
"r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc) # Set power and flow. # Also gather",
"daZones.getZoneLocations(diverseZone) originalAssemblies = r.core.getLocationContents( zoneLocations, assemblyLevel=True ) fuel = [a",
"or implied. # See the License for the specific language",
"should only house the only other remaining zone now self.assertEqual([\"ring-2\"],",
"daZones.names) # if indexed like a dict, the zones object",
"self.assertIn(\"A6009\", zone) self.assertNotIn(\"A6002\", zone) self.assertNotIn(\"A6010\", zone) class Zones_InReactor(unittest.TestCase): def setUp(self):",
") fuel = [a for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0]",
"loop twice to make sure it iterates nicely. for aLoc",
"daZones: if zone.hotZone: hotCount += 1 else: normalCount += 1",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"createHotZones can remove the peak assembly from that zone and",
"a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate = flow powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate) originalPower += assemblyPower",
"r.core.buildZones(cs) daZones = r.core.zones # lets make one of the",
"lets make one of the assemblies have an extra block",
"highest p/f location in a zone # Test to make",
"Test to make sure that we can split a zone",
"o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs cs[\"splitZones\"] = False",
"in daZones: if zone.hotZone: hotAssemLocation = daZones.getZoneLocations(zone.name) hotAssem = r.core.getLocationContents(",
"our zones object for name in daZones.names: aZone = daZones[name]",
"import zones from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"zones, with the names ringzone0 and ringzone1 daZones = zones.buildZones(r.core,",
"the host channels i = 0 for zone in daZones:",
"2) if __name__ == \"__main__\": # import sys;sys.argv = ['',",
"only other remaining zone now self.assertEqual([\"ring-2\"], daZones.names) # if indexed",
"that power is conserved and that we did not lose",
"hot and normal zones. hotCount = 0 normalCount = 0",
"daZones: locs = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(locs, assemblyLevel=True) for a",
"a single assembly zone to verify that it will not",
"the removed zone with self.assertRaises(KeyError): daZones[\"ring-1\"] # Ensure we can",
"should bump it out. Not # sure if this is",
"class Zones_InReactor(unittest.TestCase): def setUp(self): self.o, self.r = test_reactors.loadTestReactor() def test_buildRingZones(self):",
"= zonez[\"ring-3\"] self.assertEqual(len(zone), 30) # rings 8 and 9. See",
"now self.assertEqual([\"ring-2\"], daZones.names) # if indexed like a dict, the",
"the reactor model is 9. Even though we # asked",
"new zone is conserved. # Test that if a hot",
"armi import settings from armi.reactor import assemblies from armi.reactor import",
"= daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone, zone) # lets test if we get",
"(the \"License\"); # you may not use this file except",
"r.core.spatialGrid = grids.hexGridFromPitch(1.0) aList = [] for ring in range(10):",
"# get assem from first zone daZones.removeZone( daZones.names[0] ) #",
"= False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [1, 2, 3,",
"= r.core.zones # lets make one of the assemblies have",
"= [a for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0] newBlock =",
"# you may not use this file except in compliance",
"from armi.utils import pathTools from armi.settings.fwSettings import globalSettings THIS_DIR =",
"is conserved. # Test that if a hot zone can",
"# build one giant zone r.core.buildZones(cs) daZones = r.core.zones originalassemblies",
"300.0 flow = 300.0 for a in assems: a.getFirstBlock().p.power =",
"want to preserve, but at least it's being # tested",
"cs = o.cs cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"]",
"zone in daZones: if zone.hotZone: hotCount += 1 else: normalCount",
"+= 1 powerAfterHotZoning = 0.0 assembliesAfterHotZoning = [] # Check",
"# loop twice to make sure it iterates nicely. for",
"old zone and the new zone is conserved. # Test",
"periodic\" r = reactors.Reactor(settings.getMasterCs(), bp) r.add(reactors.Core(\"Core\", settings.getMasterCs(), geom)) r.core.spatialGrid =",
"a zone containing control and fuel assemblies. # Also test",
"armi.reactor import reactors from armi.reactor import zones from armi.reactor.flags import",
"= [1, 2, 3, 4, 5, 6, 7, 8, 9]",
"self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone = daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone, zone) # lets test if",
"we can split a zone containing control and fuel assemblies.",
"= zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 3) zone = zonez[\"ring-3\"] self.assertEqual(len(zone), 30)",
"True zones.splitZones(r.core, cs, daZones) # test to make sure that",
"bump it out. Not # sure if this is behavior",
"cs[\"splitZones\"] = True zones.splitZones(r.core, cs, daZones) # test to make",
"is 9. Even though we # asked for the last",
"THIS_DIR = pathTools.armiAbsDirFromName(__name__) class Zone_TestCase(unittest.TestCase): def setUp(self): bp = blueprints.Blueprints()",
"See above comment def test_removeZone(self): o, r = self.o, self.r",
"zone single = zones.Zone(\"single\") daZones.add(single) aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc) #",
"assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True) power = 300.0 flow = 300.0",
"to make sure createHotZones can remove the peak assembly from",
"zone anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a), None) class Zones_InRZReactor(unittest.TestCase): def test_splitZones(self): # Test",
"# # Unless required by applicable law or agreed to",
"if indexed like a dict, the zones object should give",
"from armi.reactor.flags import Flags from armi.reactor.tests import test_reactors from armi.utils",
"def setUp(self): bp = blueprints.Blueprints() geom = geometry.SystemLayoutInput() geom.symmetry =",
"zone.addAssemblyLocations, self.aList) def test_iteration(self): locs = [a.getLocation() for a in",
"test to make sure that we split the ring zone",
"rings 8 and 9. See above comment def test_removeZone(self): o,",
"and that we did not lose any assemblies for zone",
"7, 8, 9] diverseZone = \"ring-4\" r.core.buildZones(cs) daZones = r.core.zones",
"daZones = r.core.zones originalassemblies = [] originalPower = 0.0 peakZonePFRatios",
"[] zoneLocations = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True) power =",
"a.spatialLocator = r.core.spatialGrid[ring, 1, 0] a.parent = r.core aList.append(a) self.aList",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a), None) class Zones_InRZReactor(unittest.TestCase): def test_splitZones(self): # Test to",
"tested properly now. self.assertEqual(len(zone), (9 * (9 - 1) +",
"model is 9. Even though we # asked for the",
"if a hot zone can not be created from a",
"zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for aLoc in zone: self.assertIn(aLoc, locs) # loop",
"Version 2.0 (the \"License\"); # you may not use this",
"= 0.0 assembliesAfterHotZoning = [] # Check that power is",
"the actual number of rings in the reactor model is",
"-= 1 peakZonePFRatios.append(max(powerToFlow)) daZones = zones.createHotZones(r.core, daZones) # Test that",
"[9] # build one giant zone r.core.buildZones(cs) daZones = r.core.zones",
"# limitations under the License. \"\"\"Test for Zones\"\"\" import copy",
"daZones.names[0] ) # remove a zone to ensure that our",
"1) def test_createHotZones(self): # Test to make sure createHotZones identifies",
"[] for ring in range(10): a = assemblies.HexAssembly(\"fuel\") a.spatialLocator =",
"= 300.0 for a in assems: a.getFirstBlock().p.power = power assemblyPower",
"assemblies for zone in daZones: locs = daZones.getZoneLocations(zone.name) assems =",
"daZones.names: aZone = daZones[name] def test_findZoneAssemblyIsIn(self): cs = self.o.cs cs[\"ringZones\"]",
"for zone in daZones: if zone.hotZone: hotCount += 1 else:",
"# if indexed like a dict, the zones object should",
"in originalAssemblies if a.hasFlags(Flags.FUEL)][0] newBlock = copy.deepcopy(fuel[-1]) fuel.add(newBlock) # should",
"r.core.zones originalassemblies = [] originalPower = 0.0 peakZonePFRatios = []",
"Zones_InRZReactor(unittest.TestCase): def test_splitZones(self): # Test to make sure that we",
"powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate) originalPower += assemblyPower originalassemblies.append(a) power += 1",
"zone) self.assertIn(\"A6009\", zone) self.assertNotIn(\"A6002\", zone) self.assertNotIn(\"A6010\", zone) class Zones_InReactor(unittest.TestCase): def",
"armi from armi import settings from armi.reactor import assemblies from",
"= [] # Check that power is conserved and that",
"implied. # See the License for the specific language governing",
"= [] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 1) self.assertEqual(9, r.core.numRings)",
"self.aList) def test_iteration(self): locs = [a.getLocation() for a in self.aList]",
"zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 3) zone = zonez[\"ring-3\"] self.assertEqual(len(zone),",
"if the assembly does not exist in a zone a",
"normalCount += 1 self.assertEqual(hotCount, 1) self.assertEqual(normalCount, 2) if __name__ ==",
"in assems: assembliesAfterHotZoning.append(a) powerAfterHotZoning += a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning, originalPower) self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies))",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"cs) self.assertEqual(len(list(zonez)), 1) self.assertEqual(9, r.core.numRings) cs[\"ringZones\"] = [5, 8] zonez",
"import pathTools from armi.settings.fwSettings import globalSettings THIS_DIR = pathTools.armiAbsDirFromName(__name__) class",
"that the actual number of rings in the reactor model",
"zone correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1) def test_createHotZones(self):",
"in zone: self.assertIn(aLoc, locs) def test_addRing(self): zone = zones.Zone(\"TestZone\") zone.addRing(5)",
"geometry from armi.reactor import grids from armi.reactor import reactors from",
"for zone in daZones: locs = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(locs,",
"from that zone and place it in a new zone",
"self.r = test_reactors.loadTestReactor() def test_buildRingZones(self): o, r = self.o, self.r",
"o, r = self.o, self.r cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY] =",
"!= diverseZone: daZones.removeZone(zoneName) # this should split diverseZone into multiple",
"= r.core.spatialGrid[ring, 1, 0] a.parent = r.core aList.append(a) self.aList =",
"have the peak P/F from the host channels i =",
"= o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [] zonez =",
"by applicable law or agreed to in writing, software #",
"correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1) def test_createHotZones(self): #",
"a zone to ensure that our assem does not have",
"zones.splitZones(r.core, cs, daZones) # test to make sure that we",
"object for name in daZones.names: aZone = daZones[name] def test_findZoneAssemblyIsIn(self):",
"for ring in range(10): a = assemblies.HexAssembly(\"fuel\") a.spatialLocator = r.core.spatialGrid[ring,",
"setUp(self): self.o, self.r = test_reactors.loadTestReactor() def test_buildRingZones(self): o, r =",
"= r.core.zones originalassemblies = [] originalPower = 0.0 peakZonePFRatios =",
"geom = geometry.SystemLayoutInput() geom.symmetry = \"third core periodic\" r =",
"for this test, containing assemblies of different types. zoneTup =",
"test_splitZones(self): # Test to make sure that we can split",
"a in originalAssemblies if a.hasFlags(Flags.FUEL)][0] newBlock = copy.deepcopy(fuel[-1]) fuel.add(newBlock) #",
"make one of the assemblies have an extra block zoneLocations",
"= grids.hexGridFromPitch(1.0) aList = [] for ring in range(10): a",
"assemblies with differing numbers of blocks. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\")",
"cs[\"ringZones\"] = [5, 8] # produce 2 zones, with the",
"we split the ring zone correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3)",
"with 1 channel has False for hotzone self.assertEqual(single.hotZone, False) #",
"[1, 2, 3, 4, 5, 6, 7, 8, 9] diverseZone",
"cs, daZones) # test to make sure that we split",
"a dict, the zones object should give a key error",
"self.assertEqual( peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\") / hotAssem[-1].p.THmassFlowRate, ) i += 1 powerAfterHotZoning",
"= test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY] =",
"power. for zone in daZones: powerToFlow = [] zoneLocations =",
"and # limitations under the License. \"\"\"Test for Zones\"\"\" import",
"cs = self.o.cs cs[\"ringZones\"] = [5, 7, 8] daZones =",
"fuel.add(newBlock) # should contain a zone for every ring zone",
"(5 * (5 - 1) + 1)) zone = zonez[\"ring-2\"]",
"permissions and # limitations under the License. \"\"\"Test for Zones\"\"\"",
"1 channel has False for hotzone self.assertEqual(single.hotZone, False) # check",
"of blocks. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs cs[\"splitZones\"]",
"tuple(daZones.names) for zoneName in zoneTup: if zoneName != diverseZone: daZones.removeZone(zoneName)",
"in a zone # Test to make sure createHotZones can",
"check that the original zone with 1 channel has False",
"# test to make sure that we split the ring",
"= 300.0 flow = 300.0 for a in assems: a.getFirstBlock().p.power",
"in zone: self.assertIn(aLoc, locs) # loop twice to make sure",
"zone.hotZone: hotCount += 1 else: normalCount += 1 self.assertEqual(hotCount, 1)",
"import grids from armi.reactor import reactors from armi.reactor import zones",
"armi.settings.fwSettings import globalSettings THIS_DIR = pathTools.armiAbsDirFromName(__name__) class Zone_TestCase(unittest.TestCase): def setUp(self):",
"for aLoc in zone: self.assertIn(aLoc, locs) # loop twice to",
"in daZones.names: aZone = daZones[name] def test_findZoneAssemblyIsIn(self): cs = self.o.cs",
"daZones[daZones.names[0]].locList[0] ) # get assem from first zone daZones.removeZone( daZones.names[0]",
"8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 2) zone = zonez[\"ring-1\"]",
"test_buildRingZones(self): o, r = self.o, self.r cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY]",
"language governing permissions and # limitations under the License. \"\"\"Test",
"this is behavior that we want to preserve, but at",
"assembly zone to verify that it will not create a",
"= daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(locs, assemblyLevel=True) for a in assems:",
"zone is conserved. # Test that if a hot zone",
"= r.core.getLocationContents(locs, assemblyLevel=True) for a in assems: assembliesAfterHotZoning.append(a) powerAfterHotZoning +=",
"9. Even though we # asked for the last zone",
"+= 1 else: normalCount += 1 self.assertEqual(hotCount, 1) self.assertEqual(normalCount, 2)",
"zoneLocations = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True) power = 300.0",
"1) + 1)) cs[\"ringZones\"] = [5, 7, 8] zonez =",
"this should split diverseZone into multiple zones by nodalization type.",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"nicely. for aLoc in zone: self.assertIn(aLoc, locs) def test_addRing(self): zone",
"get assem from first zone daZones.removeZone( daZones.names[0] ) # remove",
"iterate through our zones object for name in daZones.names: aZone",
"Unless required by applicable law or agreed to in writing,",
"originalPower = 0.0 peakZonePFRatios = [] # Create a single",
"diverseZone into multiple zones by nodalization type. cs[\"splitZones\"] = True",
"= assemblies.HexAssembly(\"fuel\") a.spatialLocator = r.core.spatialGrid[ring, 1, 0] a.parent = r.core",
"above comment def test_removeZone(self): o, r = self.o, self.r cs",
"from armi.settings.fwSettings import globalSettings THIS_DIR = pathTools.armiAbsDirFromName(__name__) class Zone_TestCase(unittest.TestCase): def",
"pathTools.armiAbsDirFromName(__name__) class Zone_TestCase(unittest.TestCase): def setUp(self): bp = blueprints.Blueprints() geom =",
"sure that we can split a zone containing control and",
"create a hot zone single = zones.Zone(\"single\") daZones.add(single) aLoc =",
"# this should split diverseZone into multiple zones by nodalization",
"= zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for a in self.aList: self.assertIn(a.getLocation(), zone) self.assertRaises(RuntimeError,",
"rings in the reactor model is 9. Even though we",
"the specific language governing permissions and # limitations under the",
"# Set power and flow. # Also gather channel peak",
"self.o, self.r = test_reactors.loadTestReactor() def test_buildRingZones(self): o, r = self.o,",
"License. \"\"\"Test for Zones\"\"\" import copy import unittest import armi",
"a zone anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a), None) class Zones_InRZReactor(unittest.TestCase): def test_splitZones(self): #",
"we have the correct number of hot and normal zones.",
"list should only house the only other remaining zone now",
"it in a new zone # Test that the power",
"False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [9] # build one",
"1 peakZonePFRatios.append(max(powerToFlow)) daZones = zones.createHotZones(r.core, daZones) # Test that the",
"number of hot and normal zones. hotCount = 0 normalCount",
"applicable law or agreed to in writing, software # distributed",
"assembliesAfterHotZoning = [] # Check that power is conserved and",
"only house the only other remaining zone now self.assertEqual([\"ring-2\"], daZones.names)",
"normalCount = 0 for zone in daZones: if zone.hotZone: hotCount",
"by nodalization type. cs[\"splitZones\"] = True zones.splitZones(r.core, cs, daZones) #",
"not lose any assemblies for zone in daZones: locs =",
"zone = zonez[\"ring-1\"] self.assertEqual(len(zone), (5 * (5 - 1) +",
"test, containing assemblies of different types. zoneTup = tuple(daZones.names) for",
"originalPower += assemblyPower originalassemblies.append(a) power += 1 flow -= 1",
"1 else: normalCount += 1 self.assertEqual(hotCount, 1) self.assertEqual(normalCount, 2) if",
"if zoneName != diverseZone: daZones.removeZone(zoneName) # this should split diverseZone",
"containing assemblies of different types. zoneTup = tuple(daZones.names) for zoneName",
"that we can separate out assemblies with differing numbers of",
"zone) self.assertNotIn(\"A6002\", zone) zone.addRing(6, 3, 9) self.assertIn(\"A6003\", zone) self.assertIn(\"A6009\", zone)",
"self.o.cs cs[\"ringZones\"] = [5, 7, 8] daZones = zones.buildZones(self.r.core, cs)",
"we get a none and a warning if the assembly",
"zones.Zone(\"single\") daZones.add(single) aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc) # Set power and",
"in writing, software # distributed under the License is distributed",
"# produce 2 zones, with the names ringzone0 and ringzone1",
"originalPower) self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies)) # check that the original zone with",
"Zones\"\"\" import copy import unittest import armi from armi import",
"2, 3, 4, 5, 6, 7, 8, 9] diverseZone =",
"self.o, self.r cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] =",
"self.assertEqual([\"ring-2\"], daZones.names) # if indexed like a dict, the zones",
"= test_reactors.loadTestReactor() def test_buildRingZones(self): o, r = self.o, self.r cs",
"aZone = daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone, zone) # lets test if we",
"\"byRingZone\" cs[\"ringZones\"] = [5, 8] # produce 2 zones, with",
"# lets test if we get a none and a",
"2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1) def test_createHotZones(self): # Test to",
"import reactors from armi.reactor import zones from armi.reactor.flags import Flags",
"for zone in daZones: if zone.hotZone: hotAssemLocation = daZones.getZoneLocations(zone.name) hotAssem",
"1 powerAfterHotZoning = 0.0 assembliesAfterHotZoning = [] # Check that",
"= self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone = daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone, zone) # lets test",
"test_reactors.loadTestReactor() def test_buildRingZones(self): o, r = self.o, self.r cs =",
"self.r cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = []",
"= zonez[\"ring-1\"] self.assertEqual(len(zone), (5 * (5 - 1) + 1))",
"self.assertEqual(daZones.findZoneAssemblyIsIn(a), None) class Zones_InRZReactor(unittest.TestCase): def test_splitZones(self): # Test to make",
"zone in daZones: a = self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone = daZones.findZoneAssemblyIsIn(a) self.assertEqual(aZone,",
"first zone daZones.removeZone( daZones.names[0] ) # remove a zone to",
"0] a.parent = r.core aList.append(a) self.aList = aList def test_addAssemblyLocations(self):",
"self.assertEqual(len(list(zonez)), 1) self.assertEqual(9, r.core.numRings) cs[\"ringZones\"] = [5, 8] zonez =",
"[5, 7, 8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 3) zone",
"the zones object should give a key error from the",
"# check that we have the correct number of hot",
"can separate out assemblies with differing numbers of blocks. o,",
"the new zone is conserved. # Test that if a",
"zones.buildZones(r.core, cs) daZones.removeZone(\"ring-1\") # The names list should only house",
"2 zones, with the names ringzone0 and ringzone1 daZones =",
"sure if this is behavior that we want to preserve,",
"channel peak P/F ratios, assemblies and power. for zone in",
"hotAssemLocation = daZones.getZoneLocations(zone.name) hotAssem = r.core.getLocationContents( hotAssemLocation, assemblyLevel=True )[0] self.assertEqual(",
"last zone to to to 8, the zone engine should",
"ratios, assemblies and power. for zone in daZones: powerToFlow =",
"power += 1 flow -= 1 peakZonePFRatios.append(max(powerToFlow)) daZones = zones.createHotZones(r.core,",
"a in self.aList: self.assertIn(a.getLocation(), zone) self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList) def test_iteration(self):",
"zone with 1 channel has False for hotzone self.assertEqual(single.hotZone, False)",
"though we # asked for the last zone to to",
"Ensure we can still iterate through our zones object for",
"assemblyPower originalassemblies.append(a) power += 1 flow -= 1 peakZonePFRatios.append(max(powerToFlow)) daZones",
"zone to to to 8, the zone engine should bump",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"does not have a zone anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a), None) class Zones_InRZReactor(unittest.TestCase):",
"from armi import settings from armi.reactor import assemblies from armi.reactor",
"it's being # tested properly now. self.assertEqual(len(zone), (9 * (9",
"= daZones.getZoneLocations(diverseZone) originalAssemblies = r.core.getLocationContents( zoneLocations, assemblyLevel=True ) fuel =",
"License, Version 2.0 (the \"License\"); # you may not use",
"for zone in daZones: a = self.r.core.getAssemblyWithStringLocation(zone.locList[0]) aZone = daZones.findZoneAssemblyIsIn(a)",
"= o.cs cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] =",
"fuel = [a for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0] newBlock",
"# You may obtain a copy of the License at",
"= pathTools.armiAbsDirFromName(__name__) class Zone_TestCase(unittest.TestCase): def setUp(self): bp = blueprints.Blueprints() geom",
"zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for aLoc in zone: self.assertIn(aLoc, locs)",
"= self.o.cs cs[\"ringZones\"] = [5, 7, 8] daZones = zones.buildZones(self.r.core,",
"contain a zone for every ring zone # we only",
"r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs cs[\"splitZones\"] = False cs[globalSettings.CONF_ZONING_STRATEGY]",
"single.append(aLoc) # Set power and flow. # Also gather channel",
"aZone = daZones[name] def test_findZoneAssemblyIsIn(self): cs = self.o.cs cs[\"ringZones\"] =",
"Set power and flow. # Also gather channel peak P/F",
"and a warning if the assembly does not exist in",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"1) + 1) - (5 * (5 - 1) +",
"assembliesAfterHotZoning.append(a) powerAfterHotZoning += a.calcTotalParam(\"power\") self.assertEqual(powerAfterHotZoning, originalPower) self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies)) # check",
"import geometry from armi.reactor import grids from armi.reactor import reactors",
"test_removeZone(self): o, r = self.o, self.r cs = o.cs cs[globalSettings.CONF_ZONING_STRATEGY]",
"can still iterate through our zones object for name in",
"= r.core.getFirstAssembly(Flags.FUEL).getLocation() single.append(aLoc) # Set power and flow. # Also",
"blueprints.Blueprints() geom = geometry.SystemLayoutInput() geom.symmetry = \"third core periodic\" r",
"\"byRingZone\" cs[\"ringZones\"] = [] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 1)",
"diverseZone: daZones.removeZone(zoneName) # this should split diverseZone into multiple zones",
"cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [5, 8] # produce 2",
"daZones = r.core.zones # lets make one of the assemblies",
"self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList) def test_iteration(self): locs = [a.getLocation() for a",
"settings from armi.reactor import assemblies from armi.reactor import blueprints from",
"o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [5, 8] # produce",
") # remove a zone to ensure that our assem",
"that our assem does not have a zone anymore self.assertEqual(daZones.findZoneAssemblyIsIn(a),",
"the ring zone correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1)",
"self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]), 3) self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1) def test_createHotZones(self): # Test to make",
"zone) self.assertNotIn(\"A6002\", zone) self.assertNotIn(\"A6010\", zone) class Zones_InReactor(unittest.TestCase): def setUp(self): self.o,",
"the License for the specific language governing permissions and #",
"of rings in the reactor model is 9. Even though",
"the original zone with 1 channel has False for hotzone",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"test_createHotZones(self): # Test to make sure createHotZones identifies the highest",
"that the hot zones have the peak P/F from the",
"= 0 for zone in daZones: if zone.hotZone: hotCount +=",
"[5, 8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 2) zone =",
"either express or implied. # See the License for the",
"self.assertEqual(len(zone), (5 * (5 - 1) + 1)) zone =",
"= daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True) power = 300.0 flow",
"= r.core.getLocationContents(zoneLocations, assemblyLevel=True) power = 300.0 flow = 300.0 for",
"a in assems: a.getFirstBlock().p.power = power assemblyPower = a.calcTotalParam(\"power\") a[-1].p.THmassFlowRate",
"= 0 for zone in daZones: if zone.hotZone: hotAssemLocation =",
"power and flow. # Also gather channel peak P/F ratios,",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"from first zone daZones.removeZone( daZones.names[0] ) # remove a zone",
"def test_createHotZones(self): # Test to make sure createHotZones identifies the",
"8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 3) zone = zonez[\"ring-3\"]",
"8] daZones = zones.buildZones(self.r.core, cs) for zone in daZones: a",
"p/f location in a zone # Test to make sure",
"9. See above comment def test_removeZone(self): o, r = self.o,",
"aList = [] for ring in range(10): a = assemblies.HexAssembly(\"fuel\")",
"created from a single assembly zone. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\")",
"from the host channels i = 0 for zone in",
"different types. zoneTup = tuple(daZones.names) for zoneName in zoneTup: if",
"armi.reactor import assemblies from armi.reactor import blueprints from armi.reactor import",
"preserve, but at least it's being # tested properly now.",
"1 flow -= 1 peakZonePFRatios.append(max(powerToFlow)) daZones = zones.createHotZones(r.core, daZones) #",
"= r.core.getLocationContents( hotAssemLocation, assemblyLevel=True )[0] self.assertEqual( peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\") / hotAssem[-1].p.THmassFlowRate,",
"not create a hot zone single = zones.Zone(\"single\") daZones.add(single) aLoc",
"build one giant zone r.core.buildZones(cs) daZones = r.core.zones originalassemblies =",
"test if we get a none and a warning if",
"ring zone # we only want one ring zone for",
"def test_addRing(self): zone = zones.Zone(\"TestZone\") zone.addRing(5) self.assertIn(\"A5003\", zone) self.assertNotIn(\"A6002\", zone)",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"from armi.reactor import assemblies from armi.reactor import blueprints from armi.reactor",
"we want to preserve, but at least it's being #",
"= zones.buildZones(r.core, cs) daZones.removeZone(\"ring-1\") # The names list should only",
"r.core.numRings) cs[\"ringZones\"] = [5, 8] zonez = zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)),",
"the last zone to to to 8, the zone engine",
"originalAssemblies = r.core.getLocationContents( zoneLocations, assemblyLevel=True ) fuel = [a for",
"# we only want one ring zone for this test,",
"want one ring zone for this test, containing assemblies of",
"place it in a new zone # Test that the",
"host channels i = 0 for zone in daZones: if",
"self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0] ) # get assem from first zone daZones.removeZone(",
"exist in a zone a = self.r.core.getAssemblyWithStringLocation( daZones[daZones.names[0]].locList[0] ) #",
"for zone in daZones: powerToFlow = [] zoneLocations = daZones.getZoneLocations(zone.name)",
"300.0 for a in assems: a.getFirstBlock().p.power = power assemblyPower =",
"= tuple(daZones.names) for zoneName in zoneTup: if zoneName != diverseZone:",
"that the original zone with 1 channel has False for",
"if zone.hotZone: hotCount += 1 else: normalCount += 1 self.assertEqual(hotCount,",
"is behavior that we want to preserve, but at least",
"to preserve, but at least it's being # tested properly",
"4, 5, 6, 7, 8, 9] diverseZone = \"ring-4\" r.core.buildZones(cs)",
"2) zone = zonez[\"ring-1\"] self.assertEqual(len(zone), (5 * (5 - 1)",
"self.assertEqual(len(daZones[\"ring-4-middle-fuel-6\"]), 1) def test_createHotZones(self): # Test to make sure createHotZones",
"daZones.removeZone(\"ring-1\") # The names list should only house the only",
"# Test to make sure createHotZones identifies the highest p/f",
"P/F ratios, assemblies and power. for zone in daZones: powerToFlow",
"twice to make sure it iterates nicely. for aLoc in",
"zone in daZones: locs = daZones.getZoneLocations(zone.name) assems = r.core.getLocationContents(locs, assemblyLevel=True)",
"original zone with 1 channel has False for hotzone self.assertEqual(single.hotZone,",
"an extra block zoneLocations = daZones.getZoneLocations(diverseZone) originalAssemblies = r.core.getLocationContents( zoneLocations,",
"range(10): a = assemblies.HexAssembly(\"fuel\") a.spatialLocator = r.core.spatialGrid[ring, 1, 0] a.parent",
"* (5 - 1) + 1)) cs[\"ringZones\"] = [5, 7,",
"a zone # Test to make sure createHotZones can remove",
"i += 1 powerAfterHotZoning = 0.0 assembliesAfterHotZoning = [] #",
"make sure that we can split a zone containing control",
"daZones = zones.buildZones(self.r.core, cs) for zone in daZones: a =",
"self.assertIn(a.getLocation(), zone) self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList) def test_iteration(self): locs = [a.getLocation()",
"self.assertNotIn(\"A6010\", zone) class Zones_InReactor(unittest.TestCase): def setUp(self): self.o, self.r = test_reactors.loadTestReactor()",
"import assemblies from armi.reactor import blueprints from armi.reactor import geometry",
"zone in daZones: powerToFlow = [] zoneLocations = daZones.getZoneLocations(zone.name) assems",
"zone = zones.Zone(\"TestZone\") zone.addRing(5) self.assertIn(\"A5003\", zone) self.assertNotIn(\"A6002\", zone) zone.addRing(6, 3,",
"\"License\"); # you may not use this file except in",
"now. self.assertEqual(len(zone), (9 * (9 - 1) + 1) -",
"peakZonePFRatios[i], hotAssem.calcTotalParam(\"power\") / hotAssem[-1].p.THmassFlowRate, ) i += 1 powerAfterHotZoning =",
"properly now. self.assertEqual(len(zone), (9 * (9 - 1) + 1)",
"to make sure that we can split a zone containing",
"engine should bump it out. Not # sure if this",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"= [] for ring in range(10): a = assemblies.HexAssembly(\"fuel\") a.spatialLocator",
"[a.getLocation() for a in self.aList] zone = zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for",
"test_findZoneAssemblyIsIn(self): cs = self.o.cs cs[\"ringZones\"] = [5, 7, 8] daZones",
"# Test that the power in the old zone and",
"sure that we split the ring zone correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2)",
"# Also test that we can separate out assemblies with",
"from armi.reactor.tests import test_reactors from armi.utils import pathTools from armi.settings.fwSettings",
"import globalSettings THIS_DIR = pathTools.armiAbsDirFromName(__name__) class Zone_TestCase(unittest.TestCase): def setUp(self): bp",
"should contain a zone for every ring zone # we",
"# check that the original zone with 1 channel has",
"armi.reactor import geometry from armi.reactor import grids from armi.reactor import",
"o.cs cs[globalSettings.CONF_ZONING_STRATEGY] = \"byRingZone\" cs[\"ringZones\"] = [] zonez = zones.buildZones(r.core,",
"zones.buildZones(r.core, cs) self.assertEqual(len(list(zonez)), 3) zone = zonez[\"ring-3\"] self.assertEqual(len(zone), 30) #",
"# distributed under the License is distributed on an \"AS",
"and fuel assemblies. # Also test that we can separate",
"be created from a single assembly zone. o, r =",
"power in the old zone and the new zone is",
"Check that power is conserved and that we did not",
"# Unless required by applicable law or agreed to in",
"zone to verify that it will not create a hot",
"import copy import unittest import armi from armi import settings",
"a none and a warning if the assembly does not",
"= [] # Create a single assembly zone to verify",
"it will not create a hot zone single = zones.Zone(\"single\")",
"self.aList: self.assertIn(a.getLocation(), zone) self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList) def test_iteration(self): locs =",
"single assembly zone. o, r = test_reactors.loadTestReactor(inputFileName=\"partisnTestReactor.yaml\") cs = o.cs",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"correct number of hot and normal zones. hotCount = 0",
"8, the zone engine should bump it out. Not #",
"test_iteration(self): locs = [a.getLocation() for a in self.aList] zone =",
"3, 9) self.assertIn(\"A6003\", zone) self.assertIn(\"A6009\", zone) self.assertNotIn(\"A6002\", zone) self.assertNotIn(\"A6010\", zone)",
"9] diverseZone = \"ring-4\" r.core.buildZones(cs) daZones = r.core.zones # lets",
"len(originalassemblies)) # check that the original zone with 1 channel",
"8 and 9. See above comment def test_removeZone(self): o, r",
"+ 1)) cs[\"ringZones\"] = [5, 7, 8] zonez = zones.buildZones(r.core,",
"r.core.spatialGrid[ring, 1, 0] a.parent = r.core aList.append(a) self.aList = aList",
"self.assertEqual(hotCount, 1) self.assertEqual(normalCount, 2) if __name__ == \"__main__\": # import",
"You may obtain a copy of the License at #",
"[] # Create a single assembly zone to verify that",
"zone = zonez[\"ring-3\"] self.assertEqual(len(zone), 30) # rings 8 and 9.",
"(5 * (5 - 1) + 1)) cs[\"ringZones\"] = [5,",
"power is conserved and that we did not lose any",
"remove a zone to ensure that our assem does not",
"zone: self.assertIn(aLoc, locs) # loop twice to make sure it",
"None) class Zones_InRZReactor(unittest.TestCase): def test_splitZones(self): # Test to make sure",
"that we split the ring zone correctly self.assertEqual(len(daZones[\"ring-4-primary-control-5\"]), 2) self.assertEqual(len(daZones[\"ring-4-middle-fuel-5\"]),",
"zones.Zone(\"TestZone\") zone.addAssemblyLocations(self.aList) for a in self.aList: self.assertIn(a.getLocation(), zone) self.assertRaises(RuntimeError, zone.addAssemblyLocations,",
"for a in self.aList: self.assertIn(a.getLocation(), zone) self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList) def",
"+ 1)) zone = zonez[\"ring-2\"] # Note that the actual",
"split diverseZone into multiple zones by nodalization type. cs[\"splitZones\"] =",
"assemblyLevel=True ) fuel = [a for a in originalAssemblies if",
"dict, the zones object should give a key error from",
"produce 2 zones, with the names ringzone0 and ringzone1 daZones",
"zone can not be created from a single assembly zone.",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"ensure that our assem does not have a zone anymore",
"hotAssem[-1].p.THmassFlowRate, ) i += 1 powerAfterHotZoning = 0.0 assembliesAfterHotZoning =",
"zone) self.assertNotIn(\"A6010\", zone) class Zones_InReactor(unittest.TestCase): def setUp(self): self.o, self.r ="
] |
[
"Migration(migrations.Migration): dependencies = [ ('survey', '0004_lastpage_whatsapp_button'), ] operations = [",
"[ ('survey', '0004_lastpage_whatsapp_button'), ] operations = [ migrations.RemoveField( model_name='lastpage', name='whatsapp_button',",
"= [ migrations.RemoveField( model_name='lastpage', name='whatsapp_button', ), migrations.AddField( model_name='lastpage', name='whatsapp_number', field=models.CharField(default=1,",
"on 2021-07-12 19:32 from django.db import migrations, models class Migration(migrations.Migration):",
"<filename>islam_fitz/survey/migrations/0005_auto_20210712_2132.py<gh_stars>0 # Generated by Django 3.1.12 on 2021-07-12 19:32 from",
"2021-07-12 19:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"migrations, models class Migration(migrations.Migration): dependencies = [ ('survey', '0004_lastpage_whatsapp_button'), ]",
"class Migration(migrations.Migration): dependencies = [ ('survey', '0004_lastpage_whatsapp_button'), ] operations =",
"Django 3.1.12 on 2021-07-12 19:32 from django.db import migrations, models",
"model_name='lastpage', name='whatsapp_button', ), migrations.AddField( model_name='lastpage', name='whatsapp_number', field=models.CharField(default=1, max_length=50), preserve_default=False, ),",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('survey',",
"# Generated by Django 3.1.12 on 2021-07-12 19:32 from django.db",
"Generated by Django 3.1.12 on 2021-07-12 19:32 from django.db import",
"[ migrations.RemoveField( model_name='lastpage', name='whatsapp_button', ), migrations.AddField( model_name='lastpage', name='whatsapp_number', field=models.CharField(default=1, max_length=50),",
"('survey', '0004_lastpage_whatsapp_button'), ] operations = [ migrations.RemoveField( model_name='lastpage', name='whatsapp_button', ),",
"migrations.RemoveField( model_name='lastpage', name='whatsapp_button', ), migrations.AddField( model_name='lastpage', name='whatsapp_number', field=models.CharField(default=1, max_length=50), preserve_default=False,",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [",
"operations = [ migrations.RemoveField( model_name='lastpage', name='whatsapp_button', ), migrations.AddField( model_name='lastpage', name='whatsapp_number',",
"dependencies = [ ('survey', '0004_lastpage_whatsapp_button'), ] operations = [ migrations.RemoveField(",
"models class Migration(migrations.Migration): dependencies = [ ('survey', '0004_lastpage_whatsapp_button'), ] operations",
"] operations = [ migrations.RemoveField( model_name='lastpage', name='whatsapp_button', ), migrations.AddField( model_name='lastpage',",
"name='whatsapp_button', ), migrations.AddField( model_name='lastpage', name='whatsapp_number', field=models.CharField(default=1, max_length=50), preserve_default=False, ), ]",
"3.1.12 on 2021-07-12 19:32 from django.db import migrations, models class",
"19:32 from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"by Django 3.1.12 on 2021-07-12 19:32 from django.db import migrations,",
"= [ ('survey', '0004_lastpage_whatsapp_button'), ] operations = [ migrations.RemoveField( model_name='lastpage',",
"'0004_lastpage_whatsapp_button'), ] operations = [ migrations.RemoveField( model_name='lastpage', name='whatsapp_button', ), migrations.AddField(",
"import migrations, models class Migration(migrations.Migration): dependencies = [ ('survey', '0004_lastpage_whatsapp_button'),"
] |
[
"NoteColor def run(): dataDir = \"Data/\" #ExStart: CreateAndSaveOutlookNote note3 =",
"from aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor def run(): dataDir =",
"dataDir = \"Data/\" #ExStart: CreateAndSaveOutlookNote note3 = MapiNote() note3.subject =",
"500 note3.width = 500 note3.save(dataDir + \"CreateAndSaveOutlookNote_out.msg\", NoteSaveFormat.MSG) #ExEnd: CreateAndSaveOutlookNote",
"note3 = MapiNote() note3.subject = \"Blue color note\" note3.body =",
"run(): dataDir = \"Data/\" #ExStart: CreateAndSaveOutlookNote note3 = MapiNote() note3.subject",
"note3.body = \"This is a blue color note\"; note3.color =",
"note3.width = 500 note3.save(dataDir + \"CreateAndSaveOutlookNote_out.msg\", NoteSaveFormat.MSG) #ExEnd: CreateAndSaveOutlookNote if",
"color note\"; note3.color = NoteColor.YELLOW note3.height = 500 note3.width =",
"note\" note3.body = \"This is a blue color note\"; note3.color",
"= \"This is a blue color note\"; note3.color = NoteColor.YELLOW",
"is a blue color note\"; note3.color = NoteColor.YELLOW note3.height =",
"aspose.email.mapi.msg as msg from aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor def",
"note3.height = 500 note3.width = 500 note3.save(dataDir + \"CreateAndSaveOutlookNote_out.msg\", NoteSaveFormat.MSG)",
"= \"Blue color note\" note3.body = \"This is a blue",
"<filename>Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py<gh_stars>1-10 import aspose.email.mapi.msg as msg from aspose.email.mapi import MapiNote, NoteSaveFormat,",
"#ExStart: CreateAndSaveOutlookNote note3 = MapiNote() note3.subject = \"Blue color note\"",
"MapiNote() note3.subject = \"Blue color note\" note3.body = \"This is",
"500 note3.save(dataDir + \"CreateAndSaveOutlookNote_out.msg\", NoteSaveFormat.MSG) #ExEnd: CreateAndSaveOutlookNote if __name__ ==",
"+ \"CreateAndSaveOutlookNote_out.msg\", NoteSaveFormat.MSG) #ExEnd: CreateAndSaveOutlookNote if __name__ == '__main__': run()",
"\"Blue color note\" note3.body = \"This is a blue color",
"NoteSaveFormat, NoteColor def run(): dataDir = \"Data/\" #ExStart: CreateAndSaveOutlookNote note3",
"NoteColor.YELLOW note3.height = 500 note3.width = 500 note3.save(dataDir + \"CreateAndSaveOutlookNote_out.msg\",",
"\"This is a blue color note\"; note3.color = NoteColor.YELLOW note3.height",
"= NoteColor.YELLOW note3.height = 500 note3.width = 500 note3.save(dataDir +",
"aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor def run(): dataDir = \"Data/\"",
"note3.color = NoteColor.YELLOW note3.height = 500 note3.width = 500 note3.save(dataDir",
"= \"Data/\" #ExStart: CreateAndSaveOutlookNote note3 = MapiNote() note3.subject = \"Blue",
"= 500 note3.width = 500 note3.save(dataDir + \"CreateAndSaveOutlookNote_out.msg\", NoteSaveFormat.MSG) #ExEnd:",
"note\"; note3.color = NoteColor.YELLOW note3.height = 500 note3.width = 500",
"as msg from aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor def run():",
"blue color note\"; note3.color = NoteColor.YELLOW note3.height = 500 note3.width",
"def run(): dataDir = \"Data/\" #ExStart: CreateAndSaveOutlookNote note3 = MapiNote()",
"CreateAndSaveOutlookNote note3 = MapiNote() note3.subject = \"Blue color note\" note3.body",
"= 500 note3.save(dataDir + \"CreateAndSaveOutlookNote_out.msg\", NoteSaveFormat.MSG) #ExEnd: CreateAndSaveOutlookNote if __name__",
"a blue color note\"; note3.color = NoteColor.YELLOW note3.height = 500",
"msg from aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor def run(): dataDir",
"= MapiNote() note3.subject = \"Blue color note\" note3.body = \"This",
"import MapiNote, NoteSaveFormat, NoteColor def run(): dataDir = \"Data/\" #ExStart:",
"MapiNote, NoteSaveFormat, NoteColor def run(): dataDir = \"Data/\" #ExStart: CreateAndSaveOutlookNote",
"note3.subject = \"Blue color note\" note3.body = \"This is a",
"note3.save(dataDir + \"CreateAndSaveOutlookNote_out.msg\", NoteSaveFormat.MSG) #ExEnd: CreateAndSaveOutlookNote if __name__ == '__main__':",
"color note\" note3.body = \"This is a blue color note\";",
"\"Data/\" #ExStart: CreateAndSaveOutlookNote note3 = MapiNote() note3.subject = \"Blue color",
"import aspose.email.mapi.msg as msg from aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor"
] |
[
"# handle arg indexing when empty field_names are given. if",
"elif field_name.isdigit(): if auto_arg_index: raise ValueError( \"cannot switch from manual",
"[self.factory()] for (literal_text, field_name, format_spec, conversion) in self.parse( format_string ):",
"参数: template: 模板 factory: 消息类型工厂,默认为 `str` \"\"\" @overload def __init__(",
"template: str, factory: Type[str] = str ) -> None: ...",
"import Formatter from typing import ( TYPE_CHECKING, Any, Set, Dict,",
"format spec, if needed format_control, auto_arg_index = self._vformat( format_spec, args,",
"{name} already exists!\") self.format_specs[name] = spec return spec def format(self,",
"int]: if recursion_depth < 0: raise ValueError(\"Max string recursion exceeded\")",
"= [self.factory()] for (literal_text, field_name, format_spec, conversion) in self.parse( format_string",
"expand the format spec, if needed format_control, auto_arg_index = self._vformat(",
"exception will be raised auto_arg_index = False # given the",
"0: raise ValueError(\"Max string recursion exceeded\") results: List[Any] = [self.factory()]",
"result, _ = self._vformat(format_string, args, kwargs, used_args, 2) self.check_unused_args(list(used_args), args,",
"a string or instance of Message!\") return msg # type:ignore",
"int, auto_arg_index: int = 0, ) -> Tuple[TF, int]: if",
"field, output it if field_name is not None: # this",
"\"cannot switch from manual field specification to \" \"automatic field",
"kwargs, used_args, recursion_depth - 1, auto_arg_index, ) # format the",
"<gh_stars>0 import functools from string import Formatter from typing import",
"return self._format(args, kwargs) def format_map(self, mapping: Mapping[str, Any]) -> TF:",
"import ( TYPE_CHECKING, Any, Set, Dict, List, Type, Tuple, Union,",
"self.template) for seg in template: msg += self.vformat(str(seg), args, kwargs)",
"cast, overload, ) if TYPE_CHECKING: from .message import Message, MessageSegment",
"is False: raise ValueError( \"cannot switch from manual field specification",
"raised auto_arg_index = False # given the field_name, find the",
"= self.get_field(field_name, args, kwargs) used_args.add(arg_used) assert format_spec is not None",
"isinstance(self.template, str): msg += self.vformat(self.template, args, kwargs) elif isinstance(self.template, self.factory):",
"it if field_name is not None: # this is some",
"if recursion_depth < 0: raise ValueError(\"Max string recursion exceeded\") results:",
") -> FormatSpecFunc_T: name = name or spec.__name__ if name",
"List[Any] = [self.factory()] for (literal_text, field_name, format_spec, conversion) in self.parse(",
"used_args = set() result, _ = self._vformat(format_string, args, kwargs, used_args,",
"formatter = getattr(segment_class, format_spec) return ( super().format_field(value, format_spec) if formatter",
"TM], factory: Type[TM] ) -> None: ... def __init__(self, template,",
"the formatting # handle arg indexing when empty field_names are",
"self.factory: Type[TF] = factory self.format_specs: Dict[str, FormatSpecFunc] = {} def",
"-> Tuple[TF, int]: if recursion_depth < 0: raise ValueError(\"Max string",
"format_spec, args, kwargs, used_args, recursion_depth - 1, auto_arg_index, ) #",
"conversion) if conversion else obj # expand the format spec,",
"kwargs: Mapping[str, Any]) -> TF: msg = self.factory() if isinstance(self.template,",
"Sequence, cast, overload, ) if TYPE_CHECKING: from .message import Message,",
"arg_used = self.get_field(field_name, args, kwargs) used_args.add(arg_used) assert format_spec is not",
"format_spec) if formatter is None else formatter(value) ) def _add(self,",
"if seg.is_text() else seg else: raise TypeError(\"template must be a",
"needed format_control, auto_arg_index = self._vformat( format_spec, args, kwargs, used_args, recursion_depth",
"field numbering\" ) # disable auto arg incrementing, if it",
"Optional, Sequence, cast, overload, ) if TYPE_CHECKING: from .message import",
"seg in template: msg += self.vformat(str(seg), args, kwargs) if seg.is_text()",
"{} def add_format_spec( self, spec: FormatSpecFunc_T, name: Optional[str] = None",
"str(format_control)) results.append(formatted_text) return functools.reduce(self._add, results), auto_arg_index def format_field(self, value: Any,",
"exceeded\") results: List[Any] = [self.factory()] for (literal_text, field_name, format_spec, conversion)",
"format_spec is not None # do any conversion on the",
"kwargs: Mapping[str, Any] ) -> TF: used_args = set() result,",
"Generic[TF]): \"\"\"消息模板格式化实现类。 参数: template: 模板 factory: 消息类型工厂,默认为 `str` \"\"\" @overload",
"recursion_depth: int, auto_arg_index: int = 0, ) -> Tuple[TF, int]:",
"def __init__(self, template, factory=str) -> None: self.template: TF = template",
"literal_text: results.append(literal_text) # if there's a field, output it if",
"field_name, find the object it references # and the argument",
"self.format_specs: raise ValueError(f\"Format spec {name} already exists!\") self.format_specs[name] = spec",
"name in self.format_specs: raise ValueError(f\"Format spec {name} already exists!\") self.format_specs[name]",
"recursion exceeded\") results: List[Any] = [self.factory()] for (literal_text, field_name, format_spec,",
"some markup, find the object and do # the formatting",
"do any conversion on the resulting object obj = self.convert_field(obj,",
"auto_arg_index += 1 elif field_name.isdigit(): if auto_arg_index: raise ValueError( \"cannot",
"List, Type, Tuple, Union, Generic, Mapping, TypeVar, Callable, Optional, Sequence,",
"= self.format_field(obj, str(format_control)) results.append(formatted_text) return functools.reduce(self._add, results), auto_arg_index def format_field(self,",
"args, kwargs) if seg.is_text() else seg else: raise TypeError(\"template must",
"2) self.check_unused_args(list(used_args), args, kwargs) return result def _vformat( self, format_string:",
"given. if field_name == \"\": if auto_arg_index is False: raise",
"format_spec: str) -> Any: formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec) if formatter",
"kwargs: Mapping[str, Any], used_args: Set[Union[int, str]], recursion_depth: int, auto_arg_index: int",
"Any]) -> TF: \"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\" return self._format([], mapping) def _format(self,",
"self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any] ) ->",
"str, args: Sequence[Any], kwargs: Mapping[str, Any], used_args: Set[Union[int, str]], recursion_depth:",
"TF: used_args = set() result, _ = self._vformat(format_string, args, kwargs,",
"format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any], used_args: Set[Union[int, str]],",
"if isinstance(self.template, str): msg += self.vformat(self.template, args, kwargs) elif isinstance(self.template,",
"= self.factory() if isinstance(self.template, str): msg += self.vformat(self.template, args, kwargs)",
"or instance of Message!\") return msg # type:ignore def vformat(",
"在传入字段名不是有效标识符时有用\"\"\" return self._format([], mapping) def _format(self, args: Sequence[Any], kwargs: Mapping[str,",
"Dict, List, Type, Tuple, Union, Generic, Mapping, TypeVar, Callable, Optional,",
"do # the formatting # handle arg indexing when empty",
"self.convert_field(obj, conversion) if conversion else obj # expand the format",
"formatting # handle arg indexing when empty field_names are given.",
"Any], used_args: Set[Union[int, str]], recursion_depth: int, auto_arg_index: int = 0,",
"results), auto_arg_index def format_field(self, value: Any, format_spec: str) -> Any:",
"__init__( self: \"MessageTemplate[str]\", template: str, factory: Type[str] = str )",
"Sequence[Any], kwargs: Mapping[str, Any], used_args: Set[Union[int, str]], recursion_depth: int, auto_arg_index:",
"format_string ): # output the literal text if literal_text: results.append(literal_text)",
"append to the result formatted_text = self.format_field(obj, str(format_control)) results.append(formatted_text) return",
"factory=str) -> None: self.template: TF = template self.factory: Type[TF] =",
"str ) -> None: ... @overload def __init__( self: \"MessageTemplate[TM]\",",
"from obj, arg_used = self.get_field(field_name, args, kwargs) used_args.add(arg_used) assert format_spec",
"text if literal_text: results.append(literal_text) # if there's a field, output",
"cast(\"Message[MessageSegment]\", self.template) for seg in template: msg += self.vformat(str(seg), args,",
"-> FormatSpecFunc_T: name = name or spec.__name__ if name in",
"self.get_field(field_name, args, kwargs) used_args.add(arg_used) assert format_spec is not None #",
"if auto_arg_index is False: raise ValueError( \"cannot switch from manual",
"else formatter(value) ) def _add(self, a: Any, b: Any) ->",
"field specification to \" \"automatic field numbering\" ) field_name =",
"is not None # do any conversion on the resulting",
"Type[\"MessageSegment\"] = self.factory.get_segment_class() method = getattr(segment_class, format_spec, None) if callable(method)",
"method = getattr(segment_class, format_spec, None) if callable(method) and not cast(str,",
"return functools.reduce(self._add, results), auto_arg_index def format_field(self, value: Any, format_spec: str)",
"string recursion exceeded\") results: List[Any] = [self.factory()] for (literal_text, field_name,",
"cast(str, method.__name__).startswith(\"_\"): formatter = getattr(segment_class, format_spec) return ( super().format_field(value, format_spec)",
"Any, Set, Dict, List, Type, Tuple, Union, Generic, Mapping, TypeVar,",
"Any]) -> TF: msg = self.factory() if isinstance(self.template, str): msg",
"and do # the formatting # handle arg indexing when",
"Any, format_spec: str) -> Any: formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec) if",
"spec, if needed format_control, auto_arg_index = self._vformat( format_spec, args, kwargs,",
"incrementing, if it gets # used later on, then an",
"FormatSpecFunc] = {} def add_format_spec( self, spec: FormatSpecFunc_T, name: Optional[str]",
"None: # this is some markup, find the object and",
"in self.parse( format_string ): # output the literal text if",
"self.format_specs[name] = spec return spec def format(self, *args, **kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\"",
"MessageSegment TM = TypeVar(\"TM\", bound=\"Message\") TF = TypeVar(\"TF\", str, \"Message\")",
"None: ... @overload def __init__( self: \"MessageTemplate[TM]\", template: Union[str, TM],",
"None # do any conversion on the resulting object obj",
"to the result formatted_text = self.format_field(obj, str(format_control)) results.append(formatted_text) return functools.reduce(self._add,",
"super().format_field(value, format_spec) if formatter is None else formatter(value) ) def",
"else seg else: raise TypeError(\"template must be a string or",
"arg incrementing, if it gets # used later on, then",
"Set, Dict, List, Type, Tuple, Union, Generic, Mapping, TypeVar, Callable,",
"kwargs, used_args, 2) self.check_unused_args(list(used_args), args, kwargs) return result def _vformat(",
"callable(method) and not cast(str, method.__name__).startswith(\"_\"): formatter = getattr(segment_class, format_spec) return",
"Mapping, TypeVar, Callable, Optional, Sequence, cast, overload, ) if TYPE_CHECKING:",
") if TYPE_CHECKING: from .message import Message, MessageSegment TM =",
"if there's a field, output it if field_name is not",
"str(auto_arg_index) auto_arg_index += 1 elif field_name.isdigit(): if auto_arg_index: raise ValueError(",
"import functools from string import Formatter from typing import (",
"and the argument it came from obj, arg_used = self.get_field(field_name,",
"= self._vformat(format_string, args, kwargs, used_args, 2) self.check_unused_args(list(used_args), args, kwargs) return",
"@overload def __init__( self: \"MessageTemplate[TM]\", template: Union[str, TM], factory: Type[TM]",
"str) -> Any: formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec) if formatter is",
"Tuple[TF, int]: if recursion_depth < 0: raise ValueError(\"Max string recursion",
"-> None: ... def __init__(self, template, factory=str) -> None: self.template:",
"= 0, ) -> Tuple[TF, int]: if recursion_depth < 0:",
"if needed format_control, auto_arg_index = self._vformat( format_spec, args, kwargs, used_args,",
"# do any conversion on the resulting object obj =",
"spec def format(self, *args, **kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\" return self._format(args, kwargs) def",
"Generic, Mapping, TypeVar, Callable, Optional, Sequence, cast, overload, ) if",
"elif isinstance(self.template, self.factory): template = cast(\"Message[MessageSegment]\", self.template) for seg in",
"= self.factory.get_segment_class() method = getattr(segment_class, format_spec, None) if callable(method) and",
"and not cast(str, method.__name__).startswith(\"_\"): formatter = getattr(segment_class, format_spec) return (",
"markup, find the object and do # the formatting #",
"on, then an exception will be raised auto_arg_index = False",
"mapping: Mapping[str, Any]) -> TF: \"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\" return self._format([], mapping)",
"None ) -> FormatSpecFunc_T: name = name or spec.__name__ if",
"= self.format_specs.get(format_spec) if formatter is None and not issubclass(self.factory, str):",
"`str` \"\"\" @overload def __init__( self: \"MessageTemplate[str]\", template: str, factory:",
"TypeVar(\"TM\", bound=\"Message\") TF = TypeVar(\"TF\", str, \"Message\") FormatSpecFunc = Callable[[Any],",
"raise ValueError(\"Max string recursion exceeded\") results: List[Any] = [self.factory()] for",
"if conversion else obj # expand the format spec, if",
"object it references # and the argument it came from",
"if auto_arg_index: raise ValueError( \"cannot switch from manual field specification",
"kwargs) return result def _vformat( self, format_string: str, args: Sequence[Any],",
"kwargs) elif isinstance(self.template, self.factory): template = cast(\"Message[MessageSegment]\", self.template) for seg",
"# this is some markup, find the object and do",
"method.__name__).startswith(\"_\"): formatter = getattr(segment_class, format_spec) return ( super().format_field(value, format_spec) if",
"Union, Generic, Mapping, TypeVar, Callable, Optional, Sequence, cast, overload, )",
"given the field_name, find the object it references # and",
"name or spec.__name__ if name in self.format_specs: raise ValueError(f\"Format spec",
"if literal_text: results.append(literal_text) # if there's a field, output it",
"be raised auto_arg_index = False # given the field_name, find",
"mapping) def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:",
"= factory self.format_specs: Dict[str, FormatSpecFunc] = {} def add_format_spec( self,",
"will be raised auto_arg_index = False # given the field_name,",
"TYPE_CHECKING: from .message import Message, MessageSegment TM = TypeVar(\"TM\", bound=\"Message\")",
"Optional[str] = None ) -> FormatSpecFunc_T: name = name or",
"an exception will be raised auto_arg_index = False # given",
"is not None: # this is some markup, find the",
"if TYPE_CHECKING: from .message import Message, MessageSegment TM = TypeVar(\"TM\",",
"def format(self, *args, **kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\" return self._format(args, kwargs) def format_map(self,",
"TF = TypeVar(\"TF\", str, \"Message\") FormatSpecFunc = Callable[[Any], str] FormatSpecFunc_T",
"functools.reduce(self._add, results), auto_arg_index def format_field(self, value: Any, format_spec: str) ->",
"-> None: ... @overload def __init__( self: \"MessageTemplate[TM]\", template: Union[str,",
"self.parse( format_string ): # output the literal text if literal_text:",
"a field, output it if field_name is not None: #",
"Type[str] = str ) -> None: ... @overload def __init__(",
"formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec) if formatter is None and not",
"isinstance(self.template, self.factory): template = cast(\"Message[MessageSegment]\", self.template) for seg in template:",
"getattr(segment_class, format_spec, None) if callable(method) and not cast(str, method.__name__).startswith(\"_\"): formatter",
"-> Any: formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec) if formatter is None",
"0, ) -> Tuple[TF, int]: if recursion_depth < 0: raise",
"later on, then an exception will be raised auto_arg_index =",
"# type:ignore def vformat( self, format_string: str, args: Sequence[Any], kwargs:",
"find the object it references # and the argument it",
"FormatSpecFunc = Callable[[Any], str] FormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc) class MessageTemplate(Formatter,",
"TF: \"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\" return self._format([], mapping) def _format(self, args: Sequence[Any],",
"= getattr(segment_class, format_spec, None) if callable(method) and not cast(str, method.__name__).startswith(\"_\"):",
"# used later on, then an exception will be raised",
"must be a string or instance of Message!\") return msg",
"factory self.format_specs: Dict[str, FormatSpecFunc] = {} def add_format_spec( self, spec:",
") -> Tuple[TF, int]: if recursion_depth < 0: raise ValueError(\"Max",
"conversion on the resulting object obj = self.convert_field(obj, conversion) if",
"results.append(literal_text) # if there's a field, output it if field_name",
"string import Formatter from typing import ( TYPE_CHECKING, Any, Set,",
"TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc) class MessageTemplate(Formatter, Generic[TF]): \"\"\"消息模板格式化实现类。 参数: template: 模板 factory:",
"used_args: Set[Union[int, str]], recursion_depth: int, auto_arg_index: int = 0, )",
"the object and append to the result formatted_text = self.format_field(obj,",
"self: \"MessageTemplate[str]\", template: str, factory: Type[str] = str ) ->",
"= name or spec.__name__ if name in self.format_specs: raise ValueError(f\"Format",
"_vformat( self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any], used_args:",
"FormatSpecFunc_T, name: Optional[str] = None ) -> FormatSpecFunc_T: name =",
"**kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\" return self._format(args, kwargs) def format_map(self, mapping: Mapping[str, Any])",
"\"automatic field numbering\" ) # disable auto arg incrementing, if",
"gets # used later on, then an exception will be",
"(literal_text, field_name, format_spec, conversion) in self.parse( format_string ): # output",
".message import Message, MessageSegment TM = TypeVar(\"TM\", bound=\"Message\") TF =",
"field_name, format_spec, conversion) in self.parse( format_string ): # output the",
"from manual field specification to \" \"automatic field numbering\" )",
"exists!\") self.format_specs[name] = spec return spec def format(self, *args, **kwargs):",
"= self._vformat( format_spec, args, kwargs, used_args, recursion_depth - 1, auto_arg_index,",
"the object it references # and the argument it came",
"Sequence[Any], kwargs: Mapping[str, Any] ) -> TF: used_args = set()",
"= Callable[[Any], str] FormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc) class MessageTemplate(Formatter, Generic[TF]):",
"handle arg indexing when empty field_names are given. if field_name",
"obj, arg_used = self.get_field(field_name, args, kwargs) used_args.add(arg_used) assert format_spec is",
"used_args.add(arg_used) assert format_spec is not None # do any conversion",
"... def __init__(self, template, factory=str) -> None: self.template: TF =",
"used_args, 2) self.check_unused_args(list(used_args), args, kwargs) return result def _vformat( self,",
"name = name or spec.__name__ if name in self.format_specs: raise",
"-> TF: used_args = set() result, _ = self._vformat(format_string, args,",
"def _add(self, a: Any, b: Any) -> Any: try: return",
"str, args: Sequence[Any], kwargs: Mapping[str, Any] ) -> TF: used_args",
"auto arg incrementing, if it gets # used later on,",
"return msg # type:ignore def vformat( self, format_string: str, args:",
"TypeVar, Callable, Optional, Sequence, cast, overload, ) if TYPE_CHECKING: from",
"used_args, recursion_depth - 1, auto_arg_index, ) # format the object",
"this is some markup, find the object and do #",
"numbering\" ) # disable auto arg incrementing, if it gets",
"typing import ( TYPE_CHECKING, Any, Set, Dict, List, Type, Tuple,",
"Mapping[str, Any] ) -> TF: used_args = set() result, _",
"False: raise ValueError( \"cannot switch from manual field specification to",
"object and append to the result formatted_text = self.format_field(obj, str(format_control))",
"conversion) in self.parse( format_string ): # output the literal text",
"field_name == \"\": if auto_arg_index is False: raise ValueError( \"cannot",
"formatter(value) ) def _add(self, a: Any, b: Any) -> Any:",
"auto_arg_index: int = 0, ) -> Tuple[TF, int]: if recursion_depth",
"msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg else:",
"( super().format_field(value, format_spec) if formatter is None else formatter(value) )",
"empty field_names are given. if field_name == \"\": if auto_arg_index",
"string or instance of Message!\") return msg # type:ignore def",
"formatted_text = self.format_field(obj, str(format_control)) results.append(formatted_text) return functools.reduce(self._add, results), auto_arg_index def",
"self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any], used_args: Set[Union[int,",
"to \" \"automatic field numbering\" ) field_name = str(auto_arg_index) auto_arg_index",
"spec {name} already exists!\") self.format_specs[name] = spec return spec def",
"= self.convert_field(obj, conversion) if conversion else obj # expand the",
"= {} def add_format_spec( self, spec: FormatSpecFunc_T, name: Optional[str] =",
"when empty field_names are given. if field_name == \"\": if",
"if field_name == \"\": if auto_arg_index is False: raise ValueError(",
"TF = template self.factory: Type[TF] = factory self.format_specs: Dict[str, FormatSpecFunc]",
"name: Optional[str] = None ) -> FormatSpecFunc_T: name = name",
"self.vformat(self.template, args, kwargs) elif isinstance(self.template, self.factory): template = cast(\"Message[MessageSegment]\", self.template)",
"# disable auto arg incrementing, if it gets # used",
"came from obj, arg_used = self.get_field(field_name, args, kwargs) used_args.add(arg_used) assert",
"obj = self.convert_field(obj, conversion) if conversion else obj # expand",
"find the object and do # the formatting # handle",
"# the formatting # handle arg indexing when empty field_names",
"and not issubclass(self.factory, str): segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class() method =",
"_format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF: msg =",
"# if there's a field, output it if field_name is",
"not cast(str, method.__name__).startswith(\"_\"): formatter = getattr(segment_class, format_spec) return ( super().format_field(value,",
"manual field specification to \" \"automatic field numbering\" ) field_name",
"Any] ) -> TF: used_args = set() result, _ =",
"the field_name, find the object it references # and the",
"\"MessageTemplate[TM]\", template: Union[str, TM], factory: Type[TM] ) -> None: ...",
"value: Any, format_spec: str) -> Any: formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec)",
"\"\"\"消息模板格式化实现类。 参数: template: 模板 factory: 消息类型工厂,默认为 `str` \"\"\" @overload def",
"raise ValueError( \"cannot switch from manual field specification to \"",
"auto_arg_index: raise ValueError( \"cannot switch from manual field specification to",
"import Message, MessageSegment TM = TypeVar(\"TM\", bound=\"Message\") TF = TypeVar(\"TF\",",
"return result def _vformat( self, format_string: str, args: Sequence[Any], kwargs:",
"return self._format([], mapping) def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any])",
"-> None: self.template: TF = template self.factory: Type[TF] = factory",
"assert format_spec is not None # do any conversion on",
"output the literal text if literal_text: results.append(literal_text) # if there's",
"-> TF: msg = self.factory() if isinstance(self.template, str): msg +=",
"is some markup, find the object and do # the",
"= spec return spec def format(self, *args, **kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\" return",
"the result formatted_text = self.format_field(obj, str(format_control)) results.append(formatted_text) return functools.reduce(self._add, results),",
"factory: 消息类型工厂,默认为 `str` \"\"\" @overload def __init__( self: \"MessageTemplate[str]\", template:",
"str, factory: Type[str] = str ) -> None: ... @overload",
"it came from obj, arg_used = self.get_field(field_name, args, kwargs) used_args.add(arg_used)",
"to \" \"automatic field numbering\" ) # disable auto arg",
"\"automatic field numbering\" ) field_name = str(auto_arg_index) auto_arg_index += 1",
"Callable, Optional, Sequence, cast, overload, ) if TYPE_CHECKING: from .message",
"on the resulting object obj = self.convert_field(obj, conversion) if conversion",
"auto_arg_index, ) # format the object and append to the",
"Tuple, Union, Generic, Mapping, TypeVar, Callable, Optional, Sequence, cast, overload,",
"MessageTemplate(Formatter, Generic[TF]): \"\"\"消息模板格式化实现类。 参数: template: 模板 factory: 消息类型工厂,默认为 `str` \"\"\"",
"None: self.template: TF = template self.factory: Type[TF] = factory self.format_specs:",
"instance of Message!\") return msg # type:ignore def vformat( self,",
"functools from string import Formatter from typing import ( TYPE_CHECKING,",
"from .message import Message, MessageSegment TM = TypeVar(\"TM\", bound=\"Message\") TF",
"# output the literal text if literal_text: results.append(literal_text) # if",
"\"MessageTemplate[str]\", template: str, factory: Type[str] = str ) -> None:",
"self.format_specs.get(format_spec) if formatter is None and not issubclass(self.factory, str): segment_class:",
"spec: FormatSpecFunc_T, name: Optional[str] = None ) -> FormatSpecFunc_T: name",
"Sequence[Any], kwargs: Mapping[str, Any]) -> TF: msg = self.factory() if",
"factory: Type[str] = str ) -> None: ... @overload def",
"# format the object and append to the result formatted_text",
"args, kwargs) elif isinstance(self.template, self.factory): template = cast(\"Message[MessageSegment]\", self.template) for",
"\"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\" return self._format([], mapping) def _format(self, args: Sequence[Any], kwargs:",
"Set[Union[int, str]], recursion_depth: int, auto_arg_index: int = 0, ) ->",
"is None else formatter(value) ) def _add(self, a: Any, b:",
"for (literal_text, field_name, format_spec, conversion) in self.parse( format_string ): #",
") -> None: ... @overload def __init__( self: \"MessageTemplate[TM]\", template:",
"Any, b: Any) -> Any: try: return a + b",
"Optional[FormatSpecFunc] = self.format_specs.get(format_spec) if formatter is None and not issubclass(self.factory,",
"def add_format_spec( self, spec: FormatSpecFunc_T, name: Optional[str] = None )",
"None and not issubclass(self.factory, str): segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class() method",
"self._vformat(format_string, args, kwargs, used_args, 2) self.check_unused_args(list(used_args), args, kwargs) return result",
"str]], recursion_depth: int, auto_arg_index: int = 0, ) -> Tuple[TF,",
"raise ValueError(f\"Format spec {name} already exists!\") self.format_specs[name] = spec return",
"= TypeVar(\"TM\", bound=\"Message\") TF = TypeVar(\"TF\", str, \"Message\") FormatSpecFunc =",
"field specification to \" \"automatic field numbering\" ) # disable",
"*args, **kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\" return self._format(args, kwargs) def format_map(self, mapping: Mapping[str,",
"recursion_depth - 1, auto_arg_index, ) # format the object and",
"it gets # used later on, then an exception will",
"class MessageTemplate(Formatter, Generic[TF]): \"\"\"消息模板格式化实现类。 参数: template: 模板 factory: 消息类型工厂,默认为 `str`",
"return spec def format(self, *args, **kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\" return self._format(args, kwargs)",
"format_spec) return ( super().format_field(value, format_spec) if formatter is None else",
"bound=FormatSpecFunc) class MessageTemplate(Formatter, Generic[TF]): \"\"\"消息模板格式化实现类。 参数: template: 模板 factory: 消息类型工厂,默认为",
"TYPE_CHECKING, Any, Set, Dict, List, Type, Tuple, Union, Generic, Mapping,",
"\"\"\" @overload def __init__( self: \"MessageTemplate[str]\", template: str, factory: Type[str]",
"spec.__name__ if name in self.format_specs: raise ValueError(f\"Format spec {name} already",
"Union[str, TM], factory: Type[TM] ) -> None: ... def __init__(self,",
"output it if field_name is not None: # this is",
"+= self.vformat(self.template, args, kwargs) elif isinstance(self.template, self.factory): template = cast(\"Message[MessageSegment]\",",
"for seg in template: msg += self.vformat(str(seg), args, kwargs) if",
"str): segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class() method = getattr(segment_class, format_spec, None)",
"not None # do any conversion on the resulting object",
") field_name = str(auto_arg_index) auto_arg_index += 1 elif field_name.isdigit(): if",
"\" \"automatic field numbering\" ) # disable auto arg incrementing,",
"str): msg += self.vformat(self.template, args, kwargs) elif isinstance(self.template, self.factory): template",
"not issubclass(self.factory, str): segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class() method = getattr(segment_class,",
"= getattr(segment_class, format_spec) return ( super().format_field(value, format_spec) if formatter is",
"b: Any) -> Any: try: return a + b except",
"\"\"\"根据传入参数和模板生成消息对象\"\"\" return self._format(args, kwargs) def format_map(self, mapping: Mapping[str, Any]) ->",
"ValueError(f\"Format spec {name} already exists!\") self.format_specs[name] = spec return spec",
"spec return spec def format(self, *args, **kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\" return self._format(args,",
"else: raise TypeError(\"template must be a string or instance of",
"field_name.isdigit(): if auto_arg_index: raise ValueError( \"cannot switch from manual field",
"def format_field(self, value: Any, format_spec: str) -> Any: formatter: Optional[FormatSpecFunc]",
"field_name is not None: # this is some markup, find",
"# and the argument it came from obj, arg_used =",
"msg = self.factory() if isinstance(self.template, str): msg += self.vformat(self.template, args,",
"_add(self, a: Any, b: Any) -> Any: try: return a",
"field_name = str(auto_arg_index) auto_arg_index += 1 elif field_name.isdigit(): if auto_arg_index:",
"Type[TM] ) -> None: ... def __init__(self, template, factory=str) ->",
"from typing import ( TYPE_CHECKING, Any, Set, Dict, List, Type,",
"str] FormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc) class MessageTemplate(Formatter, Generic[TF]): \"\"\"消息模板格式化实现类。 参数:",
"format(self, *args, **kwargs): \"\"\"根据传入参数和模板生成消息对象\"\"\" return self._format(args, kwargs) def format_map(self, mapping:",
"@overload def __init__( self: \"MessageTemplate[str]\", template: str, factory: Type[str] =",
"argument it came from obj, arg_used = self.get_field(field_name, args, kwargs)",
"-> Any: try: return a + b except TypeError: return",
"self.template: TF = template self.factory: Type[TF] = factory self.format_specs: Dict[str,",
"+= 1 elif field_name.isdigit(): if auto_arg_index: raise ValueError( \"cannot switch",
"args, kwargs) return result def _vformat( self, format_string: str, args:",
"= TypeVar(\"TF\", str, \"Message\") FormatSpecFunc = Callable[[Any], str] FormatSpecFunc_T =",
"self, spec: FormatSpecFunc_T, name: Optional[str] = None ) -> FormatSpecFunc_T:",
"else obj # expand the format spec, if needed format_control,",
"overload, ) if TYPE_CHECKING: from .message import Message, MessageSegment TM",
"field_names are given. if field_name == \"\": if auto_arg_index is",
"then an exception will be raised auto_arg_index = False #",
"the object and do # the formatting # handle arg",
"int = 0, ) -> Tuple[TF, int]: if recursion_depth <",
"if callable(method) and not cast(str, method.__name__).startswith(\"_\"): formatter = getattr(segment_class, format_spec)",
"if formatter is None else formatter(value) ) def _add(self, a:",
"resulting object obj = self.convert_field(obj, conversion) if conversion else obj",
"= template self.factory: Type[TF] = factory self.format_specs: Dict[str, FormatSpecFunc] =",
"there's a field, output it if field_name is not None:",
"Type[TF] = factory self.format_specs: Dict[str, FormatSpecFunc] = {} def add_format_spec(",
"kwargs) used_args.add(arg_used) assert format_spec is not None # do any",
"format_spec, conversion) in self.parse( format_string ): # output the literal",
"format_map(self, mapping: Mapping[str, Any]) -> TF: \"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\" return self._format([],",
"self.factory): template = cast(\"Message[MessageSegment]\", self.template) for seg in template: msg",
"def _vformat( self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any],",
"already exists!\") self.format_specs[name] = spec return spec def format(self, *args,",
"template: msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg",
"add_format_spec( self, spec: FormatSpecFunc_T, name: Optional[str] = None ) ->",
"set() result, _ = self._vformat(format_string, args, kwargs, used_args, 2) self.check_unused_args(list(used_args),",
"self.factory.get_segment_class() method = getattr(segment_class, format_spec, None) if callable(method) and not",
"): # output the literal text if literal_text: results.append(literal_text) #",
"Type, Tuple, Union, Generic, Mapping, TypeVar, Callable, Optional, Sequence, cast,",
"Mapping[str, Any], used_args: Set[Union[int, str]], recursion_depth: int, auto_arg_index: int =",
"Any: try: return a + b except TypeError: return a",
"getattr(segment_class, format_spec) return ( super().format_field(value, format_spec) if formatter is None",
"from string import Formatter from typing import ( TYPE_CHECKING, Any,",
"-> TF: \"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\" return self._format([], mapping) def _format(self, args:",
"args, kwargs, used_args, recursion_depth - 1, auto_arg_index, ) # format",
"args: Sequence[Any], kwargs: Mapping[str, Any], used_args: Set[Union[int, str]], recursion_depth: int,",
"a: Any, b: Any) -> Any: try: return a +",
"= str ) -> None: ... @overload def __init__( self:",
"template = cast(\"Message[MessageSegment]\", self.template) for seg in template: msg +=",
"self._format(args, kwargs) def format_map(self, mapping: Mapping[str, Any]) -> TF: \"\"\"根据传入字典和模板生成消息对象,",
"numbering\" ) field_name = str(auto_arg_index) auto_arg_index += 1 elif field_name.isdigit():",
"format the object and append to the result formatted_text =",
"Any) -> Any: try: return a + b except TypeError:",
"消息类型工厂,默认为 `str` \"\"\" @overload def __init__( self: \"MessageTemplate[str]\", template: str,",
"type:ignore def vformat( self, format_string: str, args: Sequence[Any], kwargs: Mapping[str,",
"auto_arg_index = self._vformat( format_spec, args, kwargs, used_args, recursion_depth - 1,",
") -> TF: used_args = set() result, _ = self._vformat(format_string,",
"obj # expand the format spec, if needed format_control, auto_arg_index",
"conversion else obj # expand the format spec, if needed",
"in self.format_specs: raise ValueError(f\"Format spec {name} already exists!\") self.format_specs[name] =",
") # disable auto arg incrementing, if it gets #",
"# given the field_name, find the object it references #",
"specification to \" \"automatic field numbering\" ) # disable auto",
"factory: Type[TM] ) -> None: ... def __init__(self, template, factory=str)",
"args, kwargs) used_args.add(arg_used) assert format_spec is not None # do",
"Formatter from typing import ( TYPE_CHECKING, Any, Set, Dict, List,",
"field numbering\" ) field_name = str(auto_arg_index) auto_arg_index += 1 elif",
"None) if callable(method) and not cast(str, method.__name__).startswith(\"_\"): formatter = getattr(segment_class,",
"is None and not issubclass(self.factory, str): segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class()",
"Any: formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec) if formatter is None and",
"auto_arg_index def format_field(self, value: Any, format_spec: str) -> Any: formatter:",
"are given. if field_name == \"\": if auto_arg_index is False:",
"msg += self.vformat(self.template, args, kwargs) elif isinstance(self.template, self.factory): template =",
"Dict[str, FormatSpecFunc] = {} def add_format_spec( self, spec: FormatSpecFunc_T, name:",
"if formatter is None and not issubclass(self.factory, str): segment_class: Type[\"MessageSegment\"]",
"self._vformat( format_spec, args, kwargs, used_args, recursion_depth - 1, auto_arg_index, )",
"return a + b except TypeError: return a + str(b)",
"str, \"Message\") FormatSpecFunc = Callable[[Any], str] FormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc)",
"result def _vformat( self, format_string: str, args: Sequence[Any], kwargs: Mapping[str,",
"object and do # the formatting # handle arg indexing",
"1, auto_arg_index, ) # format the object and append to",
"the resulting object obj = self.convert_field(obj, conversion) if conversion else",
"the format spec, if needed format_control, auto_arg_index = self._vformat( format_spec,",
"format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any] ) -> TF:",
"literal text if literal_text: results.append(literal_text) # if there's a field,",
"- 1, auto_arg_index, ) # format the object and append",
"Mapping[str, Any]) -> TF: msg = self.factory() if isinstance(self.template, str):",
"result formatted_text = self.format_field(obj, str(format_control)) results.append(formatted_text) return functools.reduce(self._add, results), auto_arg_index",
"Message, MessageSegment TM = TypeVar(\"TM\", bound=\"Message\") TF = TypeVar(\"TF\", str,",
"TM = TypeVar(\"TM\", bound=\"Message\") TF = TypeVar(\"TF\", str, \"Message\") FormatSpecFunc",
"the argument it came from obj, arg_used = self.get_field(field_name, args,",
"format_control, auto_arg_index = self._vformat( format_spec, args, kwargs, used_args, recursion_depth -",
"def format_map(self, mapping: Mapping[str, Any]) -> TF: \"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\" return",
"False # given the field_name, find the object it references",
"ValueError(\"Max string recursion exceeded\") results: List[Any] = [self.factory()] for (literal_text,",
"\"\": if auto_arg_index is False: raise ValueError( \"cannot switch from",
"template: 模板 factory: 消息类型工厂,默认为 `str` \"\"\" @overload def __init__( self:",
") def _add(self, a: Any, b: Any) -> Any: try:",
") -> None: ... def __init__(self, template, factory=str) -> None:",
"raise TypeError(\"template must be a string or instance of Message!\")",
"formatter is None else formatter(value) ) def _add(self, a: Any,",
"... @overload def __init__( self: \"MessageTemplate[TM]\", template: Union[str, TM], factory:",
"manual field specification to \" \"automatic field numbering\" ) #",
"bound=\"Message\") TF = TypeVar(\"TF\", str, \"Message\") FormatSpecFunc = Callable[[Any], str]",
"auto_arg_index = False # given the field_name, find the object",
"the literal text if literal_text: results.append(literal_text) # if there's a",
"try: return a + b except TypeError: return a +",
"self.format_field(obj, str(format_control)) results.append(formatted_text) return functools.reduce(self._add, results), auto_arg_index def format_field(self, value:",
"< 0: raise ValueError(\"Max string recursion exceeded\") results: List[Any] =",
"if it gets # used later on, then an exception",
"template self.factory: Type[TF] = factory self.format_specs: Dict[str, FormatSpecFunc] = {}",
"any conversion on the resulting object obj = self.convert_field(obj, conversion)",
"template: Union[str, TM], factory: Type[TM] ) -> None: ... def",
"in template: msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else",
"== \"\": if auto_arg_index is False: raise ValueError( \"cannot switch",
"= str(auto_arg_index) auto_arg_index += 1 elif field_name.isdigit(): if auto_arg_index: raise",
"= None ) -> FormatSpecFunc_T: name = name or spec.__name__",
"self.check_unused_args(list(used_args), args, kwargs) return result def _vformat( self, format_string: str,",
"ValueError( \"cannot switch from manual field specification to \" \"automatic",
"= set() result, _ = self._vformat(format_string, args, kwargs, used_args, 2)",
"1 elif field_name.isdigit(): if auto_arg_index: raise ValueError( \"cannot switch from",
"__init__( self: \"MessageTemplate[TM]\", template: Union[str, TM], factory: Type[TM] ) ->",
"be a string or instance of Message!\") return msg #",
"= False # given the field_name, find the object it",
"specification to \" \"automatic field numbering\" ) field_name = str(auto_arg_index)",
"def __init__( self: \"MessageTemplate[str]\", template: str, factory: Type[str] = str",
"or spec.__name__ if name in self.format_specs: raise ValueError(f\"Format spec {name}",
"TypeVar(\"TF\", str, \"Message\") FormatSpecFunc = Callable[[Any], str] FormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\",",
"= cast(\"Message[MessageSegment]\", self.template) for seg in template: msg += self.vformat(str(seg),",
"TF: msg = self.factory() if isinstance(self.template, str): msg += self.vformat(self.template,",
"seg else: raise TypeError(\"template must be a string or instance",
"self.format_specs: Dict[str, FormatSpecFunc] = {} def add_format_spec( self, spec: FormatSpecFunc_T,",
"references # and the argument it came from obj, arg_used",
"( TYPE_CHECKING, Any, Set, Dict, List, Type, Tuple, Union, Generic,",
"\"Message\") FormatSpecFunc = Callable[[Any], str] FormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc) class",
"issubclass(self.factory, str): segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class() method = getattr(segment_class, format_spec,",
"object obj = self.convert_field(obj, conversion) if conversion else obj #",
"arg indexing when empty field_names are given. if field_name ==",
"indexing when empty field_names are given. if field_name == \"\":",
"seg.is_text() else seg else: raise TypeError(\"template must be a string",
"if field_name is not None: # this is some markup,",
"\" \"automatic field numbering\" ) field_name = str(auto_arg_index) auto_arg_index +=",
"kwargs) def format_map(self, mapping: Mapping[str, Any]) -> TF: \"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\"",
"vformat( self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any] )",
"args, kwargs, used_args, 2) self.check_unused_args(list(used_args), args, kwargs) return result def",
"# expand the format spec, if needed format_control, auto_arg_index =",
"results.append(formatted_text) return functools.reduce(self._add, results), auto_arg_index def format_field(self, value: Any, format_spec:",
"results: List[Any] = [self.factory()] for (literal_text, field_name, format_spec, conversion) in",
"FormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc) class MessageTemplate(Formatter, Generic[TF]): \"\"\"消息模板格式化实现类。 参数: template:",
"segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class() method = getattr(segment_class, format_spec, None) if",
"self.factory() if isinstance(self.template, str): msg += self.vformat(self.template, args, kwargs) elif",
"+= self.vformat(str(seg), args, kwargs) if seg.is_text() else seg else: raise",
"self.vformat(str(seg), args, kwargs) if seg.is_text() else seg else: raise TypeError(\"template",
"self: \"MessageTemplate[TM]\", template: Union[str, TM], factory: Type[TM] ) -> None:",
"switch from manual field specification to \" \"automatic field numbering\"",
"def vformat( self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]",
"kwargs) if seg.is_text() else seg else: raise TypeError(\"template must be",
"def __init__( self: \"MessageTemplate[TM]\", template: Union[str, TM], factory: Type[TM] )",
"template, factory=str) -> None: self.template: TF = template self.factory: Type[TF]",
"disable auto arg incrementing, if it gets # used later",
"msg # type:ignore def vformat( self, format_string: str, args: Sequence[Any],",
"if name in self.format_specs: raise ValueError(f\"Format spec {name} already exists!\")",
"模板 factory: 消息类型工厂,默认为 `str` \"\"\" @overload def __init__( self: \"MessageTemplate[str]\",",
"recursion_depth < 0: raise ValueError(\"Max string recursion exceeded\") results: List[Any]",
"format_field(self, value: Any, format_spec: str) -> Any: formatter: Optional[FormatSpecFunc] =",
"used later on, then an exception will be raised auto_arg_index",
"not None: # this is some markup, find the object",
"None else formatter(value) ) def _add(self, a: Any, b: Any)",
"Mapping[str, Any]) -> TF: \"\"\"根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用\"\"\" return self._format([], mapping) def",
"of Message!\") return msg # type:ignore def vformat( self, format_string:",
"and append to the result formatted_text = self.format_field(obj, str(format_control)) results.append(formatted_text)",
"it references # and the argument it came from obj,",
"auto_arg_index is False: raise ValueError( \"cannot switch from manual field",
"FormatSpecFunc_T: name = name or spec.__name__ if name in self.format_specs:",
"_ = self._vformat(format_string, args, kwargs, used_args, 2) self.check_unused_args(list(used_args), args, kwargs)",
"TypeError(\"template must be a string or instance of Message!\") return",
"args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF: msg = self.factory()",
") # format the object and append to the result",
"self._format([], mapping) def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) ->",
"= TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc) class MessageTemplate(Formatter, Generic[TF]): \"\"\"消息模板格式化实现类。 参数: template: 模板",
"formatter is None and not issubclass(self.factory, str): segment_class: Type[\"MessageSegment\"] =",
"args: Sequence[Any], kwargs: Mapping[str, Any] ) -> TF: used_args =",
"__init__(self, template, factory=str) -> None: self.template: TF = template self.factory:",
"format_spec, None) if callable(method) and not cast(str, method.__name__).startswith(\"_\"): formatter =",
"Callable[[Any], str] FormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc) class MessageTemplate(Formatter, Generic[TF]): \"\"\"消息模板格式化实现类。",
"def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF: msg",
"Message!\") return msg # type:ignore def vformat( self, format_string: str,",
"return ( super().format_field(value, format_spec) if formatter is None else formatter(value)",
"None: ... def __init__(self, template, factory=str) -> None: self.template: TF"
] |
[
"-*- def main(): s, t, u = map(str, input().split()) if",
"def main(): s, t, u = map(str, input().split()) if len(s)",
"-*- coding: utf-8 -*- def main(): s, t, u =",
"t, u = map(str, input().split()) if len(s) == 5 and",
"len(t) == 7 and len(u) == 5: print('valid') else: print('invalid')",
"<gh_stars>1-10 # -*- coding: utf-8 -*- def main(): s, t,",
"s, t, u = map(str, input().split()) if len(s) == 5",
"map(str, input().split()) if len(s) == 5 and len(t) == 7",
"== 7 and len(u) == 5: print('valid') else: print('invalid') if",
"len(u) == 5: print('valid') else: print('invalid') if __name__ == '__main__':",
"main(): s, t, u = map(str, input().split()) if len(s) ==",
"= map(str, input().split()) if len(s) == 5 and len(t) ==",
"7 and len(u) == 5: print('valid') else: print('invalid') if __name__",
"5 and len(t) == 7 and len(u) == 5: print('valid')",
"coding: utf-8 -*- def main(): s, t, u = map(str,",
"and len(t) == 7 and len(u) == 5: print('valid') else:",
"utf-8 -*- def main(): s, t, u = map(str, input().split())",
"== 5 and len(t) == 7 and len(u) == 5:",
"and len(u) == 5: print('valid') else: print('invalid') if __name__ ==",
"len(s) == 5 and len(t) == 7 and len(u) ==",
"== 5: print('valid') else: print('invalid') if __name__ == '__main__': main()",
"if len(s) == 5 and len(t) == 7 and len(u)",
"input().split()) if len(s) == 5 and len(t) == 7 and",
"u = map(str, input().split()) if len(s) == 5 and len(t)",
"# -*- coding: utf-8 -*- def main(): s, t, u"
] |
[
"names = ['关羽', '张飞', '赵云', '马超', '黄忠'] courses = ['语文',",
"row, name in enumerate(names): for col, course in enumerate(courses): scores[row][col]",
"录入五个学生三门课程的成绩 scores = [[None] * len(courses) for _ in range(len(names))]",
"'张飞', '赵云', '马超', '黄忠'] courses = ['语文', '数学', '英语'] #",
"* len(courses) for _ in range(len(names))] for row, name in",
"in range(len(names))] for row, name in enumerate(names): for col, course",
"in enumerate(names): for col, course in enumerate(courses): scores[row][col] = float(input(f'请输入{name}的{course}的成绩:'))",
"= ['关羽', '张飞', '赵云', '马超', '黄忠'] courses = ['语文', '数学',",
"# 录入五个学生三门课程的成绩 scores = [[None] * len(courses) for _ in",
"\"\"\" 嵌套的列表的坑 \"\"\" names = ['关羽', '张飞', '赵云', '马超', '黄忠']",
"_ in range(len(names))] for row, name in enumerate(names): for col,",
"len(courses) for _ in range(len(names))] for row, name in enumerate(names):",
"range(len(names))] for row, name in enumerate(names): for col, course in",
"<filename>python_Project/Day_16-20/test_2.py \"\"\" 嵌套的列表的坑 \"\"\" names = ['关羽', '张飞', '赵云', '马超',",
"'英语'] # 录入五个学生三门课程的成绩 scores = [[None] * len(courses) for _",
"嵌套的列表的坑 \"\"\" names = ['关羽', '张飞', '赵云', '马超', '黄忠'] courses",
"[[None] * len(courses) for _ in range(len(names))] for row, name",
"'数学', '英语'] # 录入五个学生三门课程的成绩 scores = [[None] * len(courses) for",
"for row, name in enumerate(names): for col, course in enumerate(courses):",
"'赵云', '马超', '黄忠'] courses = ['语文', '数学', '英语'] # 录入五个学生三门课程的成绩",
"\"\"\" names = ['关羽', '张飞', '赵云', '马超', '黄忠'] courses =",
"'马超', '黄忠'] courses = ['语文', '数学', '英语'] # 录入五个学生三门课程的成绩 scores",
"'黄忠'] courses = ['语文', '数学', '英语'] # 录入五个学生三门课程的成绩 scores =",
"enumerate(names): for col, course in enumerate(courses): scores[row][col] = float(input(f'请输入{name}的{course}的成绩:')) print(scores)",
"scores = [[None] * len(courses) for _ in range(len(names))] for",
"courses = ['语文', '数学', '英语'] # 录入五个学生三门课程的成绩 scores = [[None]",
"= ['语文', '数学', '英语'] # 录入五个学生三门课程的成绩 scores = [[None] *",
"for _ in range(len(names))] for row, name in enumerate(names): for",
"['关羽', '张飞', '赵云', '马超', '黄忠'] courses = ['语文', '数学', '英语']",
"= [[None] * len(courses) for _ in range(len(names))] for row,",
"['语文', '数学', '英语'] # 录入五个学生三门课程的成绩 scores = [[None] * len(courses)",
"name in enumerate(names): for col, course in enumerate(courses): scores[row][col] ="
] |
[
"if self.train_offset > len(self.train_list) - 1: self.train_offset = 0 np.random.shuffle(self.train_list)",
"+= list(pin) # print(phones) return phones self.text_to_vocab = text_to_vocab_func def",
"[i.strip() for i in data if i != ''] self.test_list",
"i.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except: continue if len(data) <",
"= max_input // self.chunk * self.chunk + self.chunk max_in_len =",
"txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate /",
"np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1) in_len = len(speech_feature) // (",
"reduce != 0: chunk_times += 1 in_len *= chunk_times py",
"[['tiáo'], ['àn']], '肖': [['xiāo']], '英雄传': [['yīng'], ['xióng'], ['zhuàn']], '新传': [['xīn'],",
"max_in_len *= chunk_times input_length = np.clip(input_length, 0, max_in_len) speech_features =",
"TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') import",
"pins = [i[0] for i in pins] phones = []",
"phones_length = [] txts = [] max_input = 0 if",
"np import pypinyin import tensorflow as tf from augmentations.augments import",
"tf from augmentations.augments import Augmentation from utils.speech_featurizers import SpeechFeaturizer from",
"None]), ) def get_per_epoch_steps(self): return len(self.train_list) // self.batch def eval_per_epoch_steps(self):",
"= [] max_input = 0 batch = self.batch for i",
"logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array)))",
"import time class AM_DataLoader(): def __init__(self, config_dict, training=True): self.speech_config =",
"def eval_data_generator(self): sample = [] speech_features = [] input_length =",
"< 400: continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info(",
"= config_dict['speech_config'] self.phone_config = config_dict['inp_config'] self.text_config = config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config",
"* self.chunk + self.chunk speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) if self.streaming:",
"= self.speech_featurizer.pad_signal(speech_features, max_input) if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate",
"!= 0: in_len += 1 chunk_times = self.chunk // reduce",
"only_chinese(self, word): txt = '' for ch in word: if",
"= tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]), padding='post', value=self.phone_featurizer.pad) txts",
"{} not all in tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array))) continue txt =",
"max_input = max_input // self.chunk * self.chunk + self.chunk max_in_len",
"[] input_length = [] phones = [] phones_length = []",
"= f.readlines() data = [i.strip() for i in data if",
"word: if '\\u4e00' <= ch <= '\\u9fff': txt += ch",
"= np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000)",
"x, input_length, phones, phones_length,txts def generator(self, train=True): while 1: s=time.time()",
"1 self.steps = 0 def return_data_types(self): return (tf.float32, tf.int32, tf.int32,",
"1: self.train_offset = 0 np.random.shuffle(self.train_list) self.epochs += 1 else: line",
"{}'.format(len(self.train_list), len(self.test_list))) else: with open(test_list, encoding='utf-8') as f: data =",
"self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i",
"> len(self.train_list) - 1: self.train_offset = 0 np.random.shuffle(self.train_list) self.epochs +=",
"= np.array(phones_length, 'int32') return x, input_length, phones, phones_length,txts def generator(self,",
"max_input // self.chunk * self.chunk + self.chunk speech_features = self.speech_featurizer.pad_signal(speech_features,",
"self.streaming: max_input = max_input // self.chunk * self.chunk + self.chunk",
"return len(self.train_list) // self.batch def eval_per_epoch_steps(self): return len(self.test_list) // self.batch",
"pass else: return n return True def generate(self, train=True): sample",
"[] phones = [] phones_length = [] txts = []",
"list {}'.format(len(self.train_list), len(self.test_list))) else: with open(test_list, encoding='utf-8') as f: data",
"self.init_text_to_vocab() self.epochs = 1 self.steps = 0 def return_data_types(self): return",
"- 1: self.train_offset = 0 np.random.shuffle(self.train_list) self.epochs += 1 else:",
"x.shape[0] == 0: logging.info('load data length zero,continue') continue yield x,",
"speech_features = [] input_length = [] phones = [] phones_length",
"wp, txt = line.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except: logging.info('{}",
"pin in pins: if pin in self.phone_featurizer.vocab_array: phones += [pin]",
"in_len += 1 chunk_times = self.chunk // reduce if self.chunk",
"train list {} test list {}'.format(len(self.train_list), len(self.test_list))) else: with open(test_list,",
"self.batch def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']], '调小': [['tiáo'], ['xiǎo']], '调亮':",
"speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch:",
"self.chunk * self.chunk + self.chunk max_in_len = max_input // self.chunk",
"print(phones) return phones self.text_to_vocab = text_to_vocab_func def make_file_list(self, training=True): train_list=self.speech_config['train_list']",
"return_data_types(self): return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32) def return_data_shape(self): return (",
"augmentations.augments import Augmentation from utils.speech_featurizers import SpeechFeaturizer from utils.text_featurizers import",
"len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample) ==",
"random.sample(sample, self.batch // 4) for i in sample: wp, txt",
"phones_length = [] txts = [] max_input = 0 batch",
"phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]), padding='post', value=self.phone_featurizer.pad)",
"input_length = [] phones = [] phones_length = [] txts",
"!= ''] self.test_list = data self.train_offset = 0 self.test_offset =",
"'' for ch in word: if '\\u4e00' <= ch <=",
"= self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms'] max_input",
"in txt: if n in vocab_list: pass else: return n",
"= self.speech_featurizer.load_wav(wp) except: logging.info('{} load data failed,skip'.format(wp)) continue if len(data)",
"i in sample: wp, txt = i.strip().split('\\t') try: data =",
"self.augment.available(): sample = random.sample(sample, self.batch // 4) for i in",
"return x, input_length, phones, phones_length, txts def check_valid(self, txt, vocab_list):",
"[['wài'], ['zhuàn']], '正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']] })",
"> self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info( '{} duration out of wav_max_duration({}),skip'.format(wp,",
"continue return txt def eval_data_generator(self): sample = [] speech_features =",
"self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature =",
"f: train_list = f.readlines() train_list = [i.strip() for i in",
"len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: continue data = self.augment.process(data) if",
"max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in",
"400: continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info( '{}",
"0: chunk_times += 1 max_in_len *= chunk_times input_length = np.clip(input_length,",
"i != ''] self.test_list = data self.test_offset = 0 def",
"( tf.TensorShape([self.batch, None, 1]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch, ]),",
"logging.info('{} load data failed,skip'.format(wp)) continue if len(data) < 400: continue",
"data cost time: {}'.format(e-s)) if x.shape[0] == 0: logging.info('load data",
"= pypinyin.pinyin(txt) pins = [i[0] for i in pins] phones",
"!= 0: chunk_times += 1 in_len *= chunk_times py =",
"np.array(phones_length, 'int32') return x, input_length, phones, phones_length,txts def generator(self, train=True):",
"txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch: break if train",
"batch: break if self.streaming: max_input = max_input // self.chunk *",
"< 400: logging.info('{} wav too short < 25ms,skip'.format(wp)) continue elif",
"e=time.time() logging.info('load data cost time: {}'.format(e-s)) if x.shape[0] == 0:",
"'调暗': [['tiáo'], ['àn']], '肖': [['xiāo']], '英雄传': [['yīng'], ['xióng'], ['zhuàn']], '新传':",
"> self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: continue data = self.augment.process(data) if self.speech_config['only_chinese']:",
"phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad) x = np.array(speech_features, 'float32')",
"SpeechFeaturizer from utils.text_featurizers import TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s -",
"not all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array)",
"= 0 wp, txt = line.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp)",
"{} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array)))",
"np.array(phones, 'int32') txts = np.array(txts, 'int32') input_length = np.array(input_length, 'int32')",
"- %(message)s') import time class AM_DataLoader(): def __init__(self, config_dict, training=True):",
"= np.array(speech_features, 'float32') phones = np.array(phones, 'int32') txts = np.array(txts,",
"= data self.test_offset = 0 def only_chinese(self, word): txt =",
"self.speech_config = config_dict['speech_config'] self.phone_config = config_dict['inp_config'] self.text_config = config_dict['tar_config'] self.running_config=config_dict['running_config']",
"'调亮': [['tiáo'], ['liàng']], '调暗': [['tiáo'], ['àn']], '肖': [['xiāo']], '英雄传': [['yīng'],",
"* 3 // 4 if self.augment.available() else self.batch else: batch",
"self.streaming: speech_feature = data / np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1)",
"= self.only_chinese(txt) if not self.streaming: speech_feature = data / np.abs(data).max()",
"np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones = tf.keras.preprocessing.sequence.pad_sequences(phones,",
"= max_input // self.chunk chunk_times = self.chunk // reduce if",
"= line.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except: logging.info('{} load data",
"TextFeaturizer(self.text_config) self.make_file_list( training) self.augment = Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs = 1",
"continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info( '{} duration",
"phones_length.append(len(phone_feature)) if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000)",
"= [] phones_length = [] txts = [] max_input =",
"<gh_stars>1-10 import logging import random import numpy as np import",
"input_length = np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i)",
"1: s=time.time() x, input_length, phones, phones_length,txts = self.generate(train) e=time.time() logging.info('load",
"wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not",
"True: logging.info(' {} txt phone {} not all in tokens,continue'.format(txt,",
"max_input = max_input // self.chunk * self.chunk + self.chunk speech_features",
"py = self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is not True: logging.info('",
"= [] txts = [] max_input = 0 if train:",
"phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array))) continue txt",
"ch in word: if '\\u4e00' <= ch <= '\\u9fff': txt",
"else: phones += list(pin) # print(phones) return phones self.text_to_vocab =",
"[] txts = [] max_input = 0 batch = self.batch",
"['àn']], '肖': [['xiāo']], '英雄传': [['yīng'], ['xióng'], ['zhuàn']], '新传': [['xīn'], ['zhuàn']],",
"text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature): logging.info('{} feature length",
"maxlen=max([len(i) for i in txts]), padding='post', value=self.text_featurizer.pad) x = np.array(speech_features,",
"text_to_vocab_func def make_file_list(self, training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if training: with open(train_list,",
"''] self.test_list = data self.train_offset = 0 self.test_offset = 0",
"np.array(speech_features, 'float32') phones = np.array(phones, 'int32') txts = np.array(txts, 'int32')",
"+ self.chunk speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) if self.streaming: reduce =",
"len(data) < 400: continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:",
"None, 1]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]),",
"len(self.test_list) // self.batch def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']], '调小': [['tiáo'],",
"length < phone length,continue'.format(wp)) continue max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature)",
"< phone length,continue'.format(wp)) continue max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len)",
"self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature): logging.info('{} feature length < phone",
"self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket'] self.batch = config_dict['running_config']['batch_size'] self.speech_featurizer =",
"out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']: txt = self.only_chinese(txt)",
"[['tiáo'], ['liàng']], '调暗': [['tiáo'], ['àn']], '肖': [['xiāo']], '英雄传': [['yīng'], ['xióng'],",
"self.streaming = self.speech_config['streaming'] self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket'] self.batch =",
"all in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature",
"phone_feature = self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature):",
"logging.info( '{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']:",
"data failed,skip'.format(wp)) continue if len(data) < 400: continue elif len(data)",
"self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \\",
"max_in_len = max_input // self.chunk chunk_times = self.chunk // reduce",
"training: with open(train_list, encoding='utf-8') as f: train_list = f.readlines() train_list",
"tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array) is not True:",
"phones_length, txts def check_valid(self, txt, vocab_list): if len(txt) == 0:",
"* 10): line = self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset",
"* 10): if train: line = self.train_list[self.train_offset] self.train_offset += 1",
"train_list np.random.shuffle(self.train_list) with open(test_list, encoding='utf-8') as f: data = f.readlines()",
"import TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')",
"in phones]), padding='post', value=self.phone_featurizer.pad) txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i",
"txt {} not all in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt",
"x, input_length, phones, phones_length,txts = self.generate(train) e=time.time() logging.info('load data cost",
"training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if training: with open(train_list, encoding='utf-8') as f:",
"self.chunk if len(speech_feature) % self.chunk != 0: in_len += 1",
"self.speech_config['wav_max_duration']: continue data = self.augment.process(data) if self.speech_config['only_chinese']: txt = self.only_chinese(txt)",
"if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *",
"continue max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature))",
"for i in range(batch * 10): line = self.test_list[self.test_offset] self.test_offset",
"txts]),padding='post',value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones = np.array(phones, 'int32') txts",
"numpy as np import pypinyin import tensorflow as tf from",
"in sample: wp, txt = i.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp)",
"n in txt: if n in vocab_list: pass else: return",
"pypinyin import tensorflow as tf from augmentations.augments import Augmentation from",
"i in train_list if i != ''] self.train_list = train_list",
"txts = [] max_input = 0 batch = self.batch for",
"= [] phones = [] phones_length = [] txts =",
"self.chunk speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) if self.streaming: reduce = self.speech_config['reduction_factor']",
"/ np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1) in_len = len(speech_feature) //",
"open(train_list, encoding='utf-8') as f: train_list = f.readlines() train_list = [i.strip()",
"['xiǎo']], '调亮': [['tiáo'], ['liàng']], '调暗': [['tiáo'], ['àn']], '肖': [['xiāo']], '英雄传':",
"self.chunk chunk_times = self.chunk // reduce if self.chunk % reduce",
"-1) reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \\",
"config_dict['speech_config'] self.phone_config = config_dict['inp_config'] self.text_config = config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config =",
"np.random.shuffle(self.train_list) self.epochs += 1 else: line = self.test_list[self.test_offset] self.test_offset +=",
"if '\\u4e00' <= ch <= '\\u9fff': txt += ch else:",
"chunk_times = self.chunk // reduce if self.chunk % reduce !=",
"- 1: self.test_offset = 0 wp, txt = line.strip().split('\\t') try:",
"self.text_config = config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config = config_dict['augments_config'] self.streaming = self.speech_config['streaming']",
"* \\ self.speech_config['stride_ms'] in_len = len(speech_feature) // self.chunk if len(speech_feature)",
"def check_valid(self, txt, vocab_list): if len(txt) == 0: return False",
"0 logging.info('load train list {} test list {}'.format(len(self.train_list), len(self.test_list))) else:",
"'外传': [['wài'], ['zhuàn']], '正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']]",
"= 0 if train: batch = self.batch * 3 //",
"1: self.test_offset = 0 wp, txt = line.strip().split('\\t') try: data",
"self.augment.available() else self.batch else: batch = self.batch for i in",
"{} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array)))",
"[i[0] for i in pins] phones = [] for pin",
"logging.info('load train list {} test list {}'.format(len(self.train_list), len(self.test_list))) else: with",
"= 1 self.steps = 0 def return_data_types(self): return (tf.float32, tf.int32,",
"= [] txts = [] max_input = 0 batch =",
"import random import numpy as np import pypinyin import tensorflow",
"config_dict['augments_config'] self.streaming = self.speech_config['streaming'] self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket'] self.batch",
"== batch: break if self.streaming: max_input = max_input // self.chunk",
"SpeechFeaturizer(self.speech_config) self.phone_featurizer = TextFeaturizer(self.phone_config) self.text_featurizer = TextFeaturizer(self.text_config) self.make_file_list( training) self.augment",
"if self.streaming: max_input = max_input // self.chunk * self.chunk +",
"cost time: {}'.format(e-s)) if x.shape[0] == 0: logging.info('load data length",
"1000) * self.speech_config['stride_ms']) else: speech_feature = data speech_feature = np.expand_dims(speech_feature,",
"self.train_offset += 1 if self.train_offset > len(self.train_list) - 1: self.train_offset",
"train: line = self.train_list[self.train_offset] self.train_offset += 1 if self.train_offset >",
"i in range(batch * 10): line = self.test_list[self.test_offset] self.test_offset +=",
"if len(data) < 400: logging.info('{} wav too short < 25ms,skip'.format(wp))",
"if in_len < len(phone_feature): logging.info('{} feature length < phone length,continue'.format(wp))",
"tf.int32, tf.int32, tf.int32,tf.int32) def return_data_shape(self): return ( tf.TensorShape([self.batch, None, 1]),",
"// self.batch def eval_per_epoch_steps(self): return len(self.test_list) // self.batch def init_text_to_vocab(self):",
"txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array))) continue",
"ch else: continue return txt def eval_data_generator(self): sample = []",
"as np import pypinyin import tensorflow as tf from augmentations.augments",
"phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate",
"(self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms'] max_input = max_input //",
"data self.train_offset = 0 self.test_offset = 0 logging.info('load train list",
"phones += [pin] else: phones += list(pin) # print(phones) return",
"[] for pin in pins: if pin in self.phone_featurizer.vocab_array: phones",
"self.chunk // reduce if self.chunk % reduce != 0: chunk_times",
"reduce != 0: chunk_times += 1 max_in_len *= chunk_times input_length",
"import tensorflow as tf from augmentations.augments import Augmentation from utils.speech_featurizers",
"def generate(self, train=True): sample = [] speech_features = [] input_length",
"txt def eval_data_generator(self): sample = [] speech_features = [] input_length",
"+ self.chunk max_in_len = max_input // self.chunk chunk_times = self.chunk",
"== 0: return False for n in txt: if n",
"len(txt) == 0: return False for n in txt: if",
"elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: continue data = self.augment.process(data)",
"self.test_offset > len(self.test_list) - 1: self.test_offset = 0 wp, txt",
"in pins: if pin in self.phone_featurizer.vocab_array: phones += [pin] else:",
"for i in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad) x",
"train and self.augment.available(): sample = random.sample(sample, self.batch // 4) for",
"np.array(txts, 'int32') input_length = np.array(input_length, 'int32') phones_length = np.array(phones_length, 'int32')",
"if self.chunk % reduce != 0: chunk_times += 1 max_in_len",
"for pin in pins: if pin in self.phone_featurizer.vocab_array: phones +=",
"len(self.test_list))) else: with open(test_list, encoding='utf-8') as f: data = f.readlines()",
"'int32') phones_length = np.array(phones_length, 'int32') return x, input_length, phones, phones_length,",
"if train and self.augment.available(): sample = random.sample(sample, self.batch // 4)",
"self.batch for i in range(batch * 10): line = self.test_list[self.test_offset]",
"= self.augment.process(data) if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming:",
"= SpeechFeaturizer(self.speech_config) self.phone_featurizer = TextFeaturizer(self.phone_config) self.text_featurizer = TextFeaturizer(self.text_config) self.make_file_list( training)",
"'int32') input_length = np.array(input_length, 'int32') phones_length = np.array(phones_length, 'int32') return",
"= np.array(input_length, 'int32') phones_length = np.array(phones_length, 'int32') return x, input_length,",
"len(phone_feature): logging.info('{} feature length < phone length,continue'.format(wp)) continue max_input =",
"self.check_valid(py, self.phone_featurizer.vocab_array) is not True: logging.info(' {} txt phone {}",
"continue if self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {} txt",
"0 wp, txt = line.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except:",
"(self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms'] in_len = len(speech_feature) //",
"input_length = np.array(input_length, 'int32') phones_length = np.array(phones_length, 'int32') return x,",
"if len(data) < 400: continue elif len(data) > self.speech_featurizer.sample_rate *",
"0 self.test_offset = 0 logging.info('load train list {} test list",
"class AM_DataLoader(): def __init__(self, config_dict, training=True): self.speech_config = config_dict['speech_config'] self.phone_config",
"for i in train_list if i != ''] self.train_list =",
"- %(levelname)s - %(message)s') import time class AM_DataLoader(): def __init__(self,",
"phones, phones_length,txts = self.generate(train) e=time.time() logging.info('load data cost time: {}'.format(e-s))",
"ch <= '\\u9fff': txt += ch else: continue return txt",
"i in pins] phones = [] for pin in pins:",
"logging.info('load data length zero,continue') continue yield x, input_length, phones, phones_length,txts",
"= i.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except: continue if len(data)",
"= config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config = config_dict['augments_config'] self.streaming = self.speech_config['streaming'] self.chunk",
"tf.int32,tf.int32) def return_data_shape(self): return ( tf.TensorShape([self.batch, None, 1]), tf.TensorShape([self.batch, ]),",
"f.readlines() train_list = [i.strip() for i in train_list if i",
"'新传': [['xīn'], ['zhuàn']], '外传': [['wài'], ['zhuàn']], '正传': [['zhèng'], ['zhuàn']], '水浒传':",
"in data if i != ''] self.test_list = data self.test_offset",
"{} not all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt,",
"> len(self.test_list) - 1: self.test_offset = 0 wp, txt =",
"self.chunk max_in_len = max_input // self.chunk chunk_times = self.chunk //",
"= data self.train_offset = 0 self.test_offset = 0 logging.info('load train",
"- %(name)s - %(levelname)s - %(message)s') import time class AM_DataLoader():",
"load data failed,skip'.format(wp)) continue if len(data) < 400: continue elif",
"= [] input_length = [] phones = [] phones_length =",
"= config_dict['running_config']['batch_size'] self.speech_featurizer = SpeechFeaturizer(self.speech_config) self.phone_featurizer = TextFeaturizer(self.phone_config) self.text_featurizer =",
"[['yīng'], ['xióng'], ['zhuàn']], '新传': [['xīn'], ['zhuàn']], '外传': [['wài'], ['zhuàn']], '正传':",
"self.train_offset = 0 np.random.shuffle(self.train_list) self.epochs += 1 else: line =",
"speech_feature = data speech_feature = np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor']",
"i in txts]), padding='post', value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones",
"'float32') phones = np.array(phones, 'int32') txts = np.array(txts, 'int32') input_length",
"wp, txt = i.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except: continue",
"chunk_times += 1 max_in_len *= chunk_times input_length = np.clip(input_length, 0,",
"self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {} txt {} not",
"tensorflow as tf from augmentations.augments import Augmentation from utils.speech_featurizers import",
"else self.batch else: batch = self.batch for i in range(batch",
"i in range(batch * 10): if train: line = self.train_list[self.train_offset]",
"input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if self.streaming: reduce = self.speech_config['reduction_factor'] *",
"init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']], '调小': [['tiáo'], ['xiǎo']], '调亮': [['tiáo'], ['liàng']],",
"= 0 def only_chinese(self, word): txt = '' for ch",
"def text_to_vocab_func(txt): pins = pypinyin.pinyin(txt) pins = [i[0] for i",
"if self.chunk % reduce != 0: chunk_times += 1 in_len",
"utils.text_featurizers import TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s -",
"0 batch = self.batch for i in range(batch * 10):",
"return len(self.test_list) // self.batch def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']], '调小':",
"len(speech_feature) % self.chunk != 0: in_len += 1 chunk_times =",
"self.batch = config_dict['running_config']['batch_size'] self.speech_featurizer = SpeechFeaturizer(self.speech_config) self.phone_featurizer = TextFeaturizer(self.phone_config) self.text_featurizer",
"[['xiāo']], '英雄传': [['yīng'], ['xióng'], ['zhuàn']], '新传': [['xīn'], ['zhuàn']], '外传': [['wài'],",
"sample = random.sample(sample, self.batch // 4) for i in sample:",
"duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']: txt =",
"padding='post', value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones = np.array(phones, 'int32')",
"if self.augment.available() else self.batch else: batch = self.batch for i",
"def only_chinese(self, word): txt = '' for ch in word:",
"not all in tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array))) continue txt = list(txt)",
"if pin in self.phone_featurizer.vocab_array: phones += [pin] else: phones +=",
"if i != ''] self.test_list = data self.test_offset = 0",
"= '' for ch in word: if '\\u4e00' <= ch",
"self.batch else: batch = self.batch for i in range(batch *",
"-1) in_len = len(speech_feature) // ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate /",
"4) for i in sample: wp, txt = i.strip().split('\\t') try:",
"txt: if n in vocab_list: pass else: return n return",
"self.epochs = 1 self.steps = 0 def return_data_types(self): return (tf.float32,",
"data = self.speech_featurizer.load_wav(wp) except: logging.info('{} load data failed,skip'.format(wp)) continue if",
"else: return n return True def generate(self, train=True): sample =",
"eval_per_epoch_steps(self): return len(self.test_list) // self.batch def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']],",
"in tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature =",
"= [] max_input = 0 if train: batch = self.batch",
"<= '\\u9fff': txt += ch else: continue return txt def",
"phones_length = np.array(phones_length, 'int32') return x, input_length, phones, phones_length, txts",
"['dà']], '调小': [['tiáo'], ['xiǎo']], '调亮': [['tiáo'], ['liàng']], '调暗': [['tiáo'], ['àn']],",
"= len(speech_feature) // ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *",
"self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: continue data = self.augment.process(data) if self.speech_config['only_chinese']: txt",
"speech_feature = np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate /",
"= max_input // self.chunk * self.chunk + self.chunk speech_features =",
"get_per_epoch_steps(self): return len(self.train_list) // self.batch def eval_per_epoch_steps(self): return len(self.test_list) //",
"= self.batch * 3 // 4 if self.augment.available() else self.batch",
"in vocab_list: pass else: return n return True def generate(self,",
"+= 1 chunk_times = self.chunk // reduce if self.chunk %",
"train_list = f.readlines() train_list = [i.strip() for i in train_list",
"data self.test_offset = 0 def only_chinese(self, word): txt = ''",
"encoding='utf-8') as f: data = f.readlines() data = [i.strip() for",
"self.text_featurizer.vocab_array) is not True: logging.info(' {} txt phone {} not",
"time class AM_DataLoader(): def __init__(self, config_dict, training=True): self.speech_config = config_dict['speech_config']",
"reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms']",
"from augmentations.augments import Augmentation from utils.speech_featurizers import SpeechFeaturizer from utils.text_featurizers",
"'\\u9fff': txt += ch else: continue return txt def eval_data_generator(self):",
"sample = [] speech_features = [] input_length = [] phones",
"s=time.time() x, input_length, phones, phones_length,txts = self.generate(train) e=time.time() logging.info('load data",
"len(speech_feature) // self.chunk if len(speech_feature) % self.chunk != 0: in_len",
"batch: break if train and self.augment.available(): sample = random.sample(sample, self.batch",
"* (self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms'] max_input = max_input",
"len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info( '{} duration out of",
"tf.int32, tf.int32,tf.int32) def return_data_shape(self): return ( tf.TensorShape([self.batch, None, 1]), tf.TensorShape([self.batch,",
"batch = self.batch for i in range(batch * 10): line",
"'int32') return x, input_length, phones, phones_length, txts def check_valid(self, txt,",
"txt = self.only_chinese(txt) if not self.streaming: speech_feature = data /",
"in txts]),padding='post',value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones = np.array(phones, 'int32')",
"self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms'] in_len =",
"{} not all in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt =",
"logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') import time",
"self.train_list[self.train_offset] self.train_offset += 1 if self.train_offset > len(self.train_list) - 1:",
"/ 1000) * \\ self.speech_config['stride_ms'] in_len = len(speech_feature) // self.chunk",
"True: logging.info(' {} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt,",
"tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py)",
"i in txts]),padding='post',value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones = np.array(phones,",
"__init__(self, config_dict, training=True): self.speech_config = config_dict['speech_config'] self.phone_config = config_dict['inp_config'] self.text_config",
"not True: logging.info(' {} txt phone {} not all in",
"self.chunk % reduce != 0: chunk_times += 1 max_in_len *=",
"self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature",
"continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()]",
"= text_to_vocab_func def make_file_list(self, training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if training: with",
"for i in txts]),padding='post',value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones =",
"= np.array(phones, 'int32') txts = np.array(txts, 'int32') input_length = np.array(input_length,",
"None]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), ) def get_per_epoch_steps(self): return len(self.train_list)",
"phones]), padding='post', value=self.phone_featurizer.pad) txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in",
"len(sample) == batch: break if self.streaming: max_input = max_input //",
"* (self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms'] in_len = len(speech_feature)",
"self.augment.process(data) if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming: speech_feature",
"1]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), )",
"try: data = self.speech_featurizer.load_wav(wp) except: logging.info('{} load data failed,skip'.format(wp)) continue",
"n in vocab_list: pass else: return n return True def",
"self.test_offset = 0 logging.info('load train list {} test list {}'.format(len(self.train_list),",
"data if i != ''] self.test_list = data self.train_offset =",
"Augmentation from utils.speech_featurizers import SpeechFeaturizer from utils.text_featurizers import TextFeaturizer logging.basicConfig(level=logging.INFO,",
"len(self.train_list) // self.batch def eval_per_epoch_steps(self): return len(self.test_list) // self.batch def",
"self.test_list = data self.train_offset = 0 self.test_offset = 0 logging.info('load",
"max_input = 0 batch = self.batch for i in range(batch",
"[] max_input = 0 batch = self.batch for i in",
"self.speech_config['streaming'] self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket'] self.batch = config_dict['running_config']['batch_size'] self.speech_featurizer",
"self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature): logging.info('{} feature",
"train: batch = self.batch * 3 // 4 if self.augment.available()",
"sample: wp, txt = i.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except:",
"['xióng'], ['zhuàn']], '新传': [['xīn'], ['zhuàn']], '外传': [['wài'], ['zhuàn']], '正传': [['zhèng'],",
"% reduce != 0: chunk_times += 1 in_len *= chunk_times",
"self.train_offset > len(self.train_list) - 1: self.train_offset = 0 np.random.shuffle(self.train_list) self.epochs",
"config_dict['inp_config'] self.text_config = config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config = config_dict['augments_config'] self.streaming =",
"0 if train: batch = self.batch * 3 // 4",
"[['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']] }) def text_to_vocab_func(txt): pins",
"while 1: s=time.time() x, input_length, phones, phones_length,txts = self.generate(train) e=time.time()",
"return phones self.text_to_vocab = text_to_vocab_func def make_file_list(self, training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list']",
"pins: if pin in self.phone_featurizer.vocab_array: phones += [pin] else: phones",
"sample.append(line) if len(sample) == batch: break if self.streaming: max_input =",
"eval_data_generator(self): sample = [] speech_features = [] input_length = []",
"if self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {} txt {}",
"if not self.streaming: speech_feature = data / np.abs(data).max() speech_feature =",
"1000) * \\ self.speech_config['stride_ms'] in_len = len(speech_feature) // self.chunk if",
"f.readlines() data = [i.strip() for i in data if i",
"= np.array(txts, 'int32') input_length = np.array(input_length, 'int32') phones_length = np.array(phones_length,",
"else: with open(test_list, encoding='utf-8') as f: data = f.readlines() data",
"phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch: break if train and",
"np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) *",
"= len(speech_feature) // self.chunk if len(speech_feature) % self.chunk != 0:",
"continue if len(data) < 400: logging.info('{} wav too short <",
"else: speech_feature = data speech_feature = np.expand_dims(speech_feature, -1) reduce =",
"random import numpy as np import pypinyin import tensorflow as",
"'正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']] }) def text_to_vocab_func(txt):",
"// self.chunk * self.chunk + self.chunk speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)",
"{} test list {}'.format(len(self.train_list), len(self.test_list))) else: with open(test_list, encoding='utf-8') as",
"0 def return_data_types(self): return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32) def return_data_shape(self):",
"def return_data_shape(self): return ( tf.TensorShape([self.batch, None, 1]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch,",
"[i.strip() for i in train_list if i != ''] self.train_list",
"self.running_config=config_dict['running_config'] self.augment_config = config_dict['augments_config'] self.streaming = self.speech_config['streaming'] self.chunk = self.speech_config['sample_rate']",
"True def generate(self, train=True): sample = [] speech_features = []",
"in_len = len(speech_feature) // self.chunk if len(speech_feature) % self.chunk !=",
"== 0: logging.info('load data length zero,continue') continue yield x, input_length,",
"generate(self, train=True): sample = [] speech_features = [] input_length =",
"return False for n in txt: if n in vocab_list:",
"from utils.speech_featurizers import SpeechFeaturizer from utils.text_featurizers import TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s",
"// reduce if self.chunk % reduce != 0: chunk_times +=",
"data = [i.strip() for i in data if i !=",
"self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info('",
"in_len = len(speech_feature) // ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000)",
"= self.chunk // reduce if self.chunk % reduce != 0:",
"// self.chunk * self.chunk + self.chunk max_in_len = max_input //",
"config_dict['running_config']['batch_size'] self.speech_featurizer = SpeechFeaturizer(self.speech_config) self.phone_featurizer = TextFeaturizer(self.phone_config) self.text_featurizer = TextFeaturizer(self.text_config)",
"// ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms']) else:",
"in_len < len(phone_feature): logging.info('{} feature length < phone length,continue'.format(wp)) continue",
"max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if self.streaming: reduce",
"batch = self.batch * 3 // 4 if self.augment.available() else",
"self.text_to_vocab = text_to_vocab_func def make_file_list(self, training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if training:",
"txt, vocab_list): if len(txt) == 0: return False for n",
"tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py)",
"range(batch * 10): line = self.test_list[self.test_offset] self.test_offset += 1 if",
"as tf from augmentations.augments import Augmentation from utils.speech_featurizers import SpeechFeaturizer",
"= np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones =",
"in range(batch * 10): if train: line = self.train_list[self.train_offset] self.train_offset",
"txts def check_valid(self, txt, vocab_list): if len(txt) == 0: return",
"tf.TensorShape([self.batch, None, 1]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch,",
"self.phone_featurizer = TextFeaturizer(self.phone_config) self.text_featurizer = TextFeaturizer(self.text_config) self.make_file_list( training) self.augment =",
"['zhuàn']], '正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']] }) def",
"( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms']) else: speech_feature",
"def return_data_types(self): return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32) def return_data_shape(self): return",
"n return True def generate(self, train=True): sample = [] speech_features",
"wav too short < 25ms,skip'.format(wp)) continue elif len(data) > self.speech_featurizer.sample_rate",
"encoding='utf-8') as f: train_list = f.readlines() train_list = [i.strip() for",
"logging import random import numpy as np import pypinyin import",
"np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i",
"for i in phones]), padding='post', value=self.phone_featurizer.pad) txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i)",
"= 0 def return_data_types(self): return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32) def",
"]), tf.TensorShape([self.batch, None]), ) def get_per_epoch_steps(self): return len(self.train_list) // self.batch",
"data speech_feature = np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate",
"10): if train: line = self.train_list[self.train_offset] self.train_offset += 1 if",
"i != ''] self.train_list = train_list np.random.shuffle(self.train_list) with open(test_list, encoding='utf-8')",
"self.augment_config = config_dict['augments_config'] self.streaming = self.speech_config['streaming'] self.chunk = self.speech_config['sample_rate'] *",
"txt += ch else: continue return txt def eval_data_generator(self): sample",
"phones = [] phones_length = [] txts = [] max_input",
"len(self.train_list) - 1: self.train_offset = 0 np.random.shuffle(self.train_list) self.epochs += 1",
"pins] phones = [] for pin in pins: if pin",
"max_input) phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]), padding='post',",
"len(sample) == batch: break if train and self.augment.available(): sample =",
"self.chunk * self.chunk + self.chunk speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) if",
") def get_per_epoch_steps(self): return len(self.train_list) // self.batch def eval_per_epoch_steps(self): return",
"self.test_list = data self.test_offset = 0 def only_chinese(self, word): txt",
"else: line = self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset >",
"max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line)",
"check_valid(self, txt, vocab_list): if len(txt) == 0: return False for",
"self.chunk + self.chunk speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) if self.streaming: reduce",
"except: continue if len(data) < 400: logging.info('{} wav too short",
"data = f.readlines() data = [i.strip() for i in data",
"'int32') return x, input_length, phones, phones_length,txts def generator(self, train=True): while",
"if len(txt) == 0: return False for n in txt:",
"= self.generate(train) e=time.time() logging.info('load data cost time: {}'.format(e-s)) if x.shape[0]",
"format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') import time class",
"max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample)",
"pin in self.phone_featurizer.vocab_array: phones += [pin] else: phones += list(pin)",
"False for n in txt: if n in vocab_list: pass",
"txt = list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if",
"== batch: break if train and self.augment.available(): sample = random.sample(sample,",
"self.speech_config['wav_max_duration']: logging.info( '{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if",
"if training: with open(train_list, encoding='utf-8') as f: train_list = f.readlines()",
"import numpy as np import pypinyin import tensorflow as tf",
"open(test_list, encoding='utf-8') as f: data = f.readlines() data = [i.strip()",
"list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len <",
"[['tiáo'], ['xiǎo']], '调亮': [['tiáo'], ['liàng']], '调暗': [['tiáo'], ['àn']], '肖': [['xiāo']],",
"input_length, phones, phones_length,txts def generator(self, train=True): while 1: s=time.time() x,",
"[] phones_length = [] txts = [] max_input = 0",
"'水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']] }) def text_to_vocab_func(txt): pins = pypinyin.pinyin(txt)",
"i != ''] self.test_list = data self.train_offset = 0 self.test_offset",
"in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array) is not",
"self.speech_featurizer.load_wav(wp) except: continue if len(data) < 400: logging.info('{} wav too",
"import logging import random import numpy as np import pypinyin",
"tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), ) def get_per_epoch_steps(self): return",
"= [i.strip() for i in data if i != '']",
"if i != ''] self.train_list = train_list np.random.shuffle(self.train_list) with open(test_list,",
"* self.speech_config['wav_max_duration']: logging.info( '{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue",
"/ 1000) * self.speech_config['stride_ms']) else: speech_feature = data speech_feature =",
"< len(phone_feature): logging.info('{} feature length < phone length,continue'.format(wp)) continue max_input",
"%(name)s - %(levelname)s - %(message)s') import time class AM_DataLoader(): def",
"*= chunk_times input_length = np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features,",
"phones_length,txts = self.generate(train) e=time.time() logging.info('load data cost time: {}'.format(e-s)) if",
"['zhuàn']], '外传': [['wài'], ['zhuàn']], '正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'],",
"input_length = np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones",
"chunk_times input_length = np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input)",
"data = self.speech_featurizer.load_wav(wp) except: continue if len(data) < 400: logging.info('{}",
"'\\u4e00' <= ch <= '\\u9fff': txt += ch else: continue",
"self.speech_config['stride_ms'] max_input = max_input // self.chunk * self.chunk + self.chunk",
"max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if",
"= max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if self.streaming:",
"with open(test_list, encoding='utf-8') as f: data = f.readlines() data =",
"reduce if self.chunk % reduce != 0: chunk_times += 1",
"self.chunk + self.chunk max_in_len = max_input // self.chunk chunk_times =",
"4 if self.augment.available() else self.batch else: batch = self.batch for",
"= self.speech_config['sample_rate'] * self.speech_config['streaming_bucket'] self.batch = config_dict['running_config']['batch_size'] self.speech_featurizer = SpeechFeaturizer(self.speech_config)",
"txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones",
"txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch: break if self.streaming:",
"if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming: speech_feature =",
"time: {}'.format(e-s)) if x.shape[0] == 0: logging.info('load data length zero,continue')",
"phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch: break if self.streaming: max_input",
"!= ''] self.test_list = data self.test_offset = 0 def only_chinese(self,",
"np.array(phones_length, 'int32') return x, input_length, phones, phones_length, txts def check_valid(self,",
"= config_dict['inp_config'] self.text_config = config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config = config_dict['augments_config'] self.streaming",
"'{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']: txt",
"self.generate(train) e=time.time() logging.info('load data cost time: {}'.format(e-s)) if x.shape[0] ==",
"continue if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming: speech_feature",
"[] max_input = 0 if train: batch = self.batch *",
"list(pin) # print(phones) return phones self.text_to_vocab = text_to_vocab_func def make_file_list(self,",
"[pin] else: phones += list(pin) # print(phones) return phones self.text_to_vocab",
"txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]), padding='post', value=self.text_featurizer.pad)",
"data / np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1) in_len = len(speech_feature)",
"3 // 4 if self.augment.available() else self.batch else: batch =",
"self.chunk % reduce != 0: chunk_times += 1 in_len *=",
"length,continue'.format(wp)) continue max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature))",
"import Augmentation from utils.speech_featurizers import SpeechFeaturizer from utils.text_featurizers import TextFeaturizer",
"0: return False for n in txt: if n in",
"for i in data if i != ''] self.test_list =",
"self.augment = Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs = 1 self.steps = 0",
"make_file_list(self, training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if training: with open(train_list, encoding='utf-8') as",
"batch = self.batch for i in range(batch * 10): if",
"pypinyin.pinyin(txt) pins = [i[0] for i in pins] phones =",
"'肖': [['xiāo']], '英雄传': [['yīng'], ['xióng'], ['zhuàn']], '新传': [['xīn'], ['zhuàn']], '外传':",
"1 if self.test_offset > len(self.test_list) - 1: self.test_offset = 0",
"return True def generate(self, train=True): sample = [] speech_features =",
"line.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except: logging.info('{} load data failed,skip'.format(wp))",
"self.speech_config['sample_rate'] * self.speech_config['streaming_bucket'] self.batch = config_dict['running_config']['batch_size'] self.speech_featurizer = SpeechFeaturizer(self.speech_config) self.phone_featurizer",
"phones = np.array(phones, 'int32') txts = np.array(txts, 'int32') input_length =",
"for i in pins] phones = [] for pin in",
"return ( tf.TensorShape([self.batch, None, 1]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch,",
"range(batch * 10): if train: line = self.train_list[self.train_offset] self.train_offset +=",
"'int32') phones_length = np.array(phones_length, 'int32') return x, input_length, phones, phones_length,txts",
"except: logging.info('{} load data failed,skip'.format(wp)) continue if len(data) < 400:",
"phones_length = np.array(phones_length, 'int32') return x, input_length, phones, phones_length,txts def",
"= list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len",
"in txts]), padding='post', value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones =",
"< 25ms,skip'.format(wp)) continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: continue",
"if x.shape[0] == 0: logging.info('load data length zero,continue') continue yield",
"= 0 self.test_offset = 0 logging.info('load train list {} test",
"if train: batch = self.batch * 3 // 4 if",
"= Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs = 1 self.steps = 0 def",
"1000) * \\ self.speech_config['stride_ms'] max_input = max_input // self.chunk *",
"400: logging.info('{} wav too short < 25ms,skip'.format(wp)) continue elif len(data)",
"= TextFeaturizer(self.phone_config) self.text_featurizer = TextFeaturizer(self.text_config) self.make_file_list( training) self.augment = Augmentation(self.augment_config)",
"'调小': [['tiáo'], ['xiǎo']], '调亮': [['tiáo'], ['liàng']], '调暗': [['tiáo'], ['àn']], '肖':",
"= 0 np.random.shuffle(self.train_list) self.epochs += 1 else: line = self.test_list[self.test_offset]",
"if len(sample) == batch: break if train and self.augment.available(): sample",
"= data speech_feature = np.expand_dims(speech_feature, -1) reduce = self.speech_config['reduction_factor'] *",
"input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch: break",
"self.batch def eval_per_epoch_steps(self): return len(self.test_list) // self.batch def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大':",
"* self.chunk + self.chunk max_in_len = max_input // self.chunk chunk_times",
"self.steps = 0 def return_data_types(self): return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32)",
"in range(batch * 10): line = self.test_list[self.test_offset] self.test_offset += 1",
"% self.chunk != 0: in_len += 1 chunk_times = self.chunk",
"self.speech_config['stride_ms']) else: speech_feature = data speech_feature = np.expand_dims(speech_feature, -1) reduce",
"% reduce != 0: chunk_times += 1 max_in_len *= chunk_times",
"self.speech_featurizer = SpeechFeaturizer(self.speech_config) self.phone_featurizer = TextFeaturizer(self.phone_config) self.text_featurizer = TextFeaturizer(self.text_config) self.make_file_list(",
"self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming: speech_feature = data",
"{} txt {} not all in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue",
"[['xīn'], ['zhuàn']], '外传': [['wài'], ['zhuàn']], '正传': [['zhèng'], ['zhuàn']], '水浒传': [['shuǐ'],",
"1 chunk_times = self.chunk // reduce if self.chunk % reduce",
"self.speech_config['stride_ms'] in_len = len(speech_feature) // self.chunk if len(speech_feature) % self.chunk",
"config_dict, training=True): self.speech_config = config_dict['speech_config'] self.phone_config = config_dict['inp_config'] self.text_config =",
"+= 1 max_in_len *= chunk_times input_length = np.clip(input_length, 0, max_in_len)",
"speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i",
"continue if len(data) < 400: continue elif len(data) > self.speech_featurizer.sample_rate",
"Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs = 1 self.steps = 0 def return_data_types(self):",
"if self.check_valid(py, self.phone_featurizer.vocab_array) is not True: logging.info(' {} txt phone",
"\\ self.speech_config['stride_ms'] max_input = max_input // self.chunk * self.chunk +",
"value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones = np.array(phones, 'int32') txts",
"self.make_file_list( training) self.augment = Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs = 1 self.steps",
"if self.test_offset > len(self.test_list) - 1: self.test_offset = 0 wp,",
"self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {} txt phone {}",
"as f: train_list = f.readlines() train_list = [i.strip() for i",
"vocab_list): if len(txt) == 0: return False for n in",
"!= ''] self.train_list = train_list np.random.shuffle(self.train_list) with open(test_list, encoding='utf-8') as",
"return txt def eval_data_generator(self): sample = [] speech_features = []",
"phones += list(pin) # print(phones) return phones self.text_to_vocab = text_to_vocab_func",
"if len(speech_feature) % self.chunk != 0: in_len += 1 chunk_times",
"for ch in word: if '\\u4e00' <= ch <= '\\u9fff':",
"1 in_len *= chunk_times py = self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array)",
"{}'.format(e-s)) if x.shape[0] == 0: logging.info('load data length zero,continue') continue",
"self.speech_featurizer.pad_signal(speech_features, max_input) phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]),",
"train=True): sample = [] speech_features = [] input_length = []",
"return x, input_length, phones, phones_length,txts def generator(self, train=True): while 1:",
"len(speech_feature) // ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms'])",
"if i != ''] self.test_list = data self.train_offset = 0",
"elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info( '{} duration out",
"max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad)",
"in data if i != ''] self.test_list = data self.train_offset",
"self.epochs += 1 else: line = self.test_list[self.test_offset] self.test_offset += 1",
"txt = '' for ch in word: if '\\u4e00' <=",
"break if self.streaming: max_input = max_input // self.chunk * self.chunk",
"self.phone_featurizer.vocab_array: phones += [pin] else: phones += list(pin) # print(phones)",
"+= [pin] else: phones += list(pin) # print(phones) return phones",
"else: continue return txt def eval_data_generator(self): sample = [] speech_features",
"''] self.test_list = data self.test_offset = 0 def only_chinese(self, word):",
"= 0 batch = self.batch for i in range(batch *",
"padding='post', value=self.phone_featurizer.pad) txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]),",
"as f: data = f.readlines() data = [i.strip() for i",
"0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in",
"[['tiáo'], ['dà']], '调小': [['tiáo'], ['xiǎo']], '调亮': [['tiáo'], ['liàng']], '调暗': [['tiáo'],",
"speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if self.streaming: reduce = self.speech_config['reduction_factor']",
"= [] for pin in pins: if pin in self.phone_featurizer.vocab_array:",
"for i in range(batch * 10): if train: line =",
"x = np.array(speech_features, 'float32') phones = np.array(phones, 'int32') txts =",
"= data / np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1) in_len =",
"max_input // self.chunk * self.chunk + self.chunk max_in_len = max_input",
"self.test_offset = 0 wp, txt = line.strip().split('\\t') try: data =",
"['zhuàn']], '水浒传': [['shuǐ'], ['hǔ'], ['zhuàn']] }) def text_to_vocab_func(txt): pins =",
"pins = pypinyin.pinyin(txt) pins = [i[0] for i in pins]",
"maxlen=max([len(i) for i in phones]), padding='post', value=self.phone_featurizer.pad) txts = tf.keras.preprocessing.sequence.pad_sequences(txts,",
"10): line = self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset >",
"self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not self.streaming:",
"pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']], '调小': [['tiáo'], ['xiǎo']], '调亮': [['tiáo'], ['liàng']], '调暗':",
"np.array(input_length, 'int32') phones_length = np.array(phones_length, 'int32') return x, input_length, phones,",
"len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) if self.streaming: reduce =",
"else: batch = self.batch for i in range(batch * 10):",
"logging.info('{} wav too short < 25ms,skip'.format(wp)) continue elif len(data) >",
"self.phone_featurizer.vocab_array) is not True: logging.info(' {} txt phone {} not",
"def make_file_list(self, training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if training: with open(train_list, encoding='utf-8')",
"// self.batch def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']], '调小': [['tiáo'], ['xiǎo']],",
"'int32') txts = np.array(txts, 'int32') input_length = np.array(input_length, 'int32') phones_length",
"in train_list if i != ''] self.train_list = train_list np.random.shuffle(self.train_list)",
"[] speech_features = [] input_length = [] phones = []",
"= max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if",
"self.batch for i in range(batch * 10): if train: line",
"+= 1 if self.train_offset > len(self.train_list) - 1: self.train_offset =",
"self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {}",
"self.only_chinese(txt) if not self.streaming: speech_feature = data / np.abs(data).max() speech_feature",
"in word: if '\\u4e00' <= ch <= '\\u9fff': txt +=",
"= [] speech_features = [] input_length = [] phones =",
"generator(self, train=True): while 1: s=time.time() x, input_length, phones, phones_length,txts =",
"// self.chunk chunk_times = self.chunk // reduce if self.chunk %",
"np.random.shuffle(self.train_list) with open(test_list, encoding='utf-8') as f: data = f.readlines() data",
"+= ch else: continue return txt def eval_data_generator(self): sample =",
"self.test_offset = 0 def only_chinese(self, word): txt = '' for",
"// 4 if self.augment.available() else self.batch else: batch = self.batch",
"tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in phones]), padding='post', value=self.phone_featurizer.pad) txts =",
"x, input_length, phones, phones_length, txts def check_valid(self, txt, vocab_list): if",
"in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad) x = np.array(speech_features,",
"# print(phones) return phones self.text_to_vocab = text_to_vocab_func def make_file_list(self, training=True):",
"phones self.text_to_vocab = text_to_vocab_func def make_file_list(self, training=True): train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if",
"word): txt = '' for ch in word: if '\\u4e00'",
"train_list=self.speech_config['train_list'] test_list=self.speech_config['eval_list'] if training: with open(train_list, encoding='utf-8') as f: train_list",
"0 np.random.shuffle(self.train_list) self.epochs += 1 else: line = self.test_list[self.test_offset] self.test_offset",
"= 0 logging.info('load train list {} test list {}'.format(len(self.train_list), len(self.test_list)))",
"and self.augment.available(): sample = random.sample(sample, self.batch // 4) for i",
"feature length < phone length,continue'.format(wp)) continue max_input = max(max_input, len(speech_feature))",
"def eval_per_epoch_steps(self): return len(self.test_list) // self.batch def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'],",
"not all in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt = list(txt)",
"= [i[0] for i in pins] phones = [] for",
"1 max_in_len *= chunk_times input_length = np.clip(input_length, 0, max_in_len) speech_features",
"= self.speech_featurizer.load_wav(wp) except: continue if len(data) < 400: logging.info('{} wav",
"in pins] phones = [] for pin in pins: if",
"self.train_list = train_list np.random.shuffle(self.train_list) with open(test_list, encoding='utf-8') as f: data",
"for i in txts]), padding='post', value=self.text_featurizer.pad) x = np.array(speech_features, 'float32')",
"from utils.text_featurizers import TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s",
"tf.TensorShape([self.batch, None]), ) def get_per_epoch_steps(self): return len(self.train_list) // self.batch def",
"= np.array(phones_length, 'int32') return x, input_length, phones, phones_length, txts def",
"self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms'] max_input =",
"txt = line.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except: logging.info('{} load",
"= TextFeaturizer(self.text_config) self.make_file_list( training) self.augment = Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs =",
"np.expand_dims(speech_feature, -1) in_len = len(speech_feature) // ( self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate",
"if n in vocab_list: pass else: return n return True",
"= random.sample(sample, self.batch // 4) for i in sample: wp,",
"phones, phones_length,txts def generator(self, train=True): while 1: s=time.time() x, input_length,",
"def generator(self, train=True): while 1: s=time.time() x, input_length, phones, phones_length,txts",
"= self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for",
"* \\ self.speech_config['stride_ms'] max_input = max_input // self.chunk * self.chunk",
"self.test_offset += 1 if self.test_offset > len(self.test_list) - 1: self.test_offset",
"phone length,continue'.format(wp)) continue max_input = max(max_input, len(speech_feature)) speech_features.append(speech_feature) input_length.append(in_len) phones.append(np.array(phone_feature))",
"]), tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), ) def get_per_epoch_steps(self):",
"*= chunk_times py = self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is not",
"max_input = 0 if train: batch = self.batch * 3",
"= self.speech_featurizer.pad_signal(speech_features, max_input) phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for i in",
"is not True: logging.info(' {} txt {} not all in",
"tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), ) def",
"0: in_len += 1 chunk_times = self.chunk // reduce if",
"vocab_list: pass else: return n return True def generate(self, train=True):",
"too short < 25ms,skip'.format(wp)) continue elif len(data) > self.speech_featurizer.sample_rate *",
"text_to_vocab_func(txt): pins = pypinyin.pinyin(txt) pins = [i[0] for i in",
"of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration'])) continue if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if",
"txts = np.array(txts, 'int32') input_length = np.array(input_length, 'int32') phones_length =",
"self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: logging.info( '{} duration out of wav_max_duration({}),skip'.format(wp, self.speech_config['wav_max_duration']))",
"self.batch * 3 // 4 if self.augment.available() else self.batch else:",
"['hǔ'], ['zhuàn']] }) def text_to_vocab_func(txt): pins = pypinyin.pinyin(txt) pins =",
"for i in sample: wp, txt = i.strip().split('\\t') try: data",
"self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is not True: logging.info(' {} txt",
"all in tokens,continue'.format(txt, self.check_valid(py, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature",
"line = self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset > len(self.test_list)",
"self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms']) else: speech_feature =",
"not self.streaming: speech_feature = data / np.abs(data).max() speech_feature = np.expand_dims(speech_feature,",
"%(message)s') import time class AM_DataLoader(): def __init__(self, config_dict, training=True): self.speech_config",
"= self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000) * \\ self.speech_config['stride_ms'] in_len",
"if self.check_valid(txt, self.text_featurizer.vocab_array) is not True: logging.info(' {} txt phone",
"['zhuàn']] }) def text_to_vocab_func(txt): pins = pypinyin.pinyin(txt) pins = [i[0]",
"def get_per_epoch_steps(self): return len(self.train_list) // self.batch def eval_per_epoch_steps(self): return len(self.test_list)",
"+= 1 else: line = self.test_list[self.test_offset] self.test_offset += 1 if",
"0 def only_chinese(self, word): txt = '' for ch in",
"= tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]), padding='post', value=self.text_featurizer.pad) x",
"''] self.train_list = train_list np.random.shuffle(self.train_list) with open(test_list, encoding='utf-8') as f:",
"input_length, phones, phones_length, txts def check_valid(self, txt, vocab_list): if len(txt)",
"in_len *= chunk_times py = self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is",
"self.speech_config['streaming_bucket'] self.batch = config_dict['running_config']['batch_size'] self.speech_featurizer = SpeechFeaturizer(self.speech_config) self.phone_featurizer = TextFeaturizer(self.phone_config)",
"i in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad) x =",
"with open(train_list, encoding='utf-8') as f: train_list = f.readlines() train_list =",
"phones_length,txts def generator(self, train=True): while 1: s=time.time() x, input_length, phones,",
"* self.speech_config['wav_max_duration']: continue data = self.augment.process(data) if self.speech_config['only_chinese']: txt =",
"train_list if i != ''] self.train_list = train_list np.random.shuffle(self.train_list) with",
"/ 1000) * \\ self.speech_config['stride_ms'] max_input = max_input // self.chunk",
"}) def text_to_vocab_func(txt): pins = pypinyin.pinyin(txt) pins = [i[0] for",
"len(self.test_list) - 1: self.test_offset = 0 wp, txt = line.strip().split('\\t')",
"TextFeaturizer(self.phone_config) self.text_featurizer = TextFeaturizer(self.text_config) self.make_file_list( training) self.augment = Augmentation(self.augment_config) self.init_text_to_vocab()",
"not True: logging.info(' {} txt {} not all in tokens,continue'.format(txt,",
"self.check_valid(py, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature = self.phone_featurizer.extract(py) text_feature",
"[] txts = [] max_input = 0 if train: batch",
"i in data if i != ''] self.test_list = data",
"for n in txt: if n in vocab_list: pass else:",
"(tf.float32, tf.int32, tf.int32, tf.int32,tf.int32) def return_data_shape(self): return ( tf.TensorShape([self.batch, None,",
"test list {}'.format(len(self.train_list), len(self.test_list))) else: with open(test_list, encoding='utf-8') as f:",
"(self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms']) else: speech_feature = data speech_feature",
"short < 25ms,skip'.format(wp)) continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']:",
"self.speech_featurizer.load_wav(wp) except: logging.info('{} load data failed,skip'.format(wp)) continue if len(data) <",
"= train_list np.random.shuffle(self.train_list) with open(test_list, encoding='utf-8') as f: data =",
"import SpeechFeaturizer from utils.text_featurizers import TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s",
"max_input) if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate / 1000)",
"continue data = self.augment.process(data) if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if",
"= self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature): logging.info('{} feature length <",
"self.batch // 4) for i in sample: wp, txt =",
"= config_dict['augments_config'] self.streaming = self.speech_config['streaming'] self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket']",
"f: data = f.readlines() data = [i.strip() for i in",
"= [i.strip() for i in train_list if i != '']",
"<= ch <= '\\u9fff': txt += ch else: continue return",
"= self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset > len(self.test_list) -",
"sample.append(line) if len(sample) == batch: break if train and self.augment.available():",
"= self.batch for i in range(batch * 10): line =",
"train=True): while 1: s=time.time() x, input_length, phones, phones_length,txts = self.generate(train)",
"0: logging.info('load data length zero,continue') continue yield x, input_length, phones,",
"txts = [] max_input = 0 if train: batch =",
"\\ self.speech_config['stride_ms'] in_len = len(speech_feature) // self.chunk if len(speech_feature) %",
"list {} test list {}'.format(len(self.train_list), len(self.test_list))) else: with open(test_list, encoding='utf-8')",
"speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) if self.streaming: reduce = self.speech_config['reduction_factor'] *",
"= self.speech_config['streaming'] self.chunk = self.speech_config['sample_rate'] * self.speech_config['streaming_bucket'] self.batch = config_dict['running_config']['batch_size']",
"// self.chunk if len(speech_feature) % self.chunk != 0: in_len +=",
"= f.readlines() train_list = [i.strip() for i in train_list if",
"25ms,skip'.format(wp)) continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: continue data",
"tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]), ) def get_per_epoch_steps(self): return len(self.train_list) //",
"try: data = self.speech_featurizer.load_wav(wp) except: continue if len(data) < 400:",
"self.text_featurizer = TextFeaturizer(self.text_config) self.make_file_list( training) self.augment = Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs",
"1 else: line = self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset",
"self.train_offset = 0 self.test_offset = 0 logging.info('load train list {}",
"if train: line = self.train_list[self.train_offset] self.train_offset += 1 if self.train_offset",
"self.chunk != 0: in_len += 1 chunk_times = self.chunk //",
"phones.append(np.array(phone_feature)) txts.append(np.array(text_feature)) phones_length.append(len(phone_feature)) sample.append(line) if len(sample) == batch: break if",
"= self.phone_featurizer.extract(py) text_feature = self.text_featurizer.extract(txt)+[self.text_featurizer.endid()] if in_len < len(phone_feature): logging.info('{}",
"if len(sample) == batch: break if self.streaming: max_input = max_input",
"config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config = config_dict['augments_config'] self.streaming = self.speech_config['streaming'] self.chunk =",
"= self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is not True: logging.info(' {}",
"train_list = [i.strip() for i in train_list if i !=",
"+= 1 in_len *= chunk_times py = self.text_to_vocab(txt) if self.check_valid(py,",
"// 4) for i in sample: wp, txt = i.strip().split('\\t')",
"return (tf.float32, tf.int32, tf.int32, tf.int32,tf.int32) def return_data_shape(self): return ( tf.TensorShape([self.batch,",
"all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if self.check_valid(txt, self.text_featurizer.vocab_array) is",
"self.text_featurizer.vocab_array) is not True: logging.info(' {} txt {} not all",
"chunk_times py = self.text_to_vocab(txt) if self.check_valid(py, self.phone_featurizer.vocab_array) is not True:",
"max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i) for",
"max_input // self.chunk chunk_times = self.chunk // reduce if self.chunk",
"1 if self.train_offset > len(self.train_list) - 1: self.train_offset = 0",
"phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue if",
"utils.speech_featurizers import SpeechFeaturizer from utils.text_featurizers import TextFeaturizer logging.basicConfig(level=logging.INFO, format='%(asctime)s -",
"self.speech_featurizer.pad_signal(speech_features, max_input) if self.streaming: reduce = self.speech_config['reduction_factor'] * (self.speech_featurizer.sample_rate /",
"* (self.speech_featurizer.sample_rate / 1000) * self.speech_config['stride_ms']) else: speech_feature = data",
"txt = i.strip().split('\\t') try: data = self.speech_featurizer.load_wav(wp) except: continue if",
"len(data) < 400: logging.info('{} wav too short < 25ms,skip'.format(wp)) continue",
"return_data_shape(self): return ( tf.TensorShape([self.batch, None, 1]), tf.TensorShape([self.batch, ]), tf.TensorShape([self.batch, None]),",
"= self.batch for i in range(batch * 10): if train:",
"speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i)",
"test_list=self.speech_config['eval_list'] if training: with open(train_list, encoding='utf-8') as f: train_list =",
"tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]), padding='post', value=self.text_featurizer.pad) x =",
"continue elif len(data) > self.speech_featurizer.sample_rate * self.speech_config['wav_max_duration']: continue data =",
"failed,skip'.format(wp)) continue if len(data) < 400: continue elif len(data) >",
"!= 0: chunk_times += 1 max_in_len *= chunk_times input_length =",
"in tokens,continue'.format(txt, self.check_valid(txt, self.text_featurizer.vocab_array))) continue txt = list(txt) phone_feature =",
"'英雄传': [['yīng'], ['xióng'], ['zhuàn']], '新传': [['xīn'], ['zhuàn']], '外传': [['wài'], ['zhuàn']],",
"txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py, self.phone_featurizer.vocab_array))) continue",
"logging.info('load data cost time: {}'.format(e-s)) if x.shape[0] == 0: logging.info('load",
"logging.info('{} feature length < phone length,continue'.format(wp)) continue max_input = max(max_input,",
"0: chunk_times += 1 in_len *= chunk_times py = self.text_to_vocab(txt)",
"input_length, phones, phones_length,txts = self.generate(train) e=time.time() logging.info('load data cost time:",
"phones = [] for pin in pins: if pin in",
"= np.clip(input_length, 0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for",
"speech_feature = np.expand_dims(speech_feature, -1) in_len = len(speech_feature) // ( self.speech_config['reduction_factor']",
"AM_DataLoader(): def __init__(self, config_dict, training=True): self.speech_config = config_dict['speech_config'] self.phone_config =",
"['zhuàn']], '新传': [['xīn'], ['zhuàn']], '外传': [['wài'], ['zhuàn']], '正传': [['zhèng'], ['zhuàn']],",
"break if train and self.augment.available(): sample = random.sample(sample, self.batch //",
"training=True): self.speech_config = config_dict['speech_config'] self.phone_config = config_dict['inp_config'] self.text_config = config_dict['tar_config']",
"speech_feature = data / np.abs(data).max() speech_feature = np.expand_dims(speech_feature, -1) in_len",
"value=self.phone_featurizer.pad) txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for i in txts]), padding='post',",
"self.phone_config = config_dict['inp_config'] self.text_config = config_dict['tar_config'] self.running_config=config_dict['running_config'] self.augment_config = config_dict['augments_config']",
"['liàng']], '调暗': [['tiáo'], ['àn']], '肖': [['xiāo']], '英雄传': [['yīng'], ['xióng'], ['zhuàn']],",
"data = self.augment.process(data) if self.speech_config['only_chinese']: txt = self.only_chinese(txt) if not",
"txts]), padding='post', value=self.text_featurizer.pad) x = np.array(speech_features, 'float32') phones = np.array(phones,",
"logging.info(' {} txt phone {} not all in tokens,continue'.format(txt, self.check_valid(py,",
"line = self.train_list[self.train_offset] self.train_offset += 1 if self.train_offset > len(self.train_list)",
"def init_text_to_vocab(self): pypinyin.load_phrases_dict({'调大': [['tiáo'], ['dà']], '调小': [['tiáo'], ['xiǎo']], '调亮': [['tiáo'],",
"import pypinyin import tensorflow as tf from augmentations.augments import Augmentation",
"in self.phone_featurizer.vocab_array: phones += [pin] else: phones += list(pin) #",
"+= 1 if self.test_offset > len(self.test_list) - 1: self.test_offset =",
"0, max_in_len) speech_features = self.speech_featurizer.pad_signal(speech_features, max_input) phones = tf.keras.preprocessing.sequence.pad_sequences(phones, maxlen=max([len(i)",
"i in phones]), padding='post', value=self.phone_featurizer.pad) txts = tf.keras.preprocessing.sequence.pad_sequences(txts, maxlen=max([len(i) for",
"return n return True def generate(self, train=True): sample = []",
"= self.train_list[self.train_offset] self.train_offset += 1 if self.train_offset > len(self.train_list) -",
"* self.speech_config['streaming_bucket'] self.batch = config_dict['running_config']['batch_size'] self.speech_featurizer = SpeechFeaturizer(self.speech_config) self.phone_featurizer =",
"is not True: logging.info(' {} txt phone {} not all",
"chunk_times += 1 in_len *= chunk_times py = self.text_to_vocab(txt) if",
"[['shuǐ'], ['hǔ'], ['zhuàn']] }) def text_to_vocab_func(txt): pins = pypinyin.pinyin(txt) pins",
"= np.expand_dims(speech_feature, -1) in_len = len(speech_feature) // ( self.speech_config['reduction_factor'] *",
"phones, phones_length, txts def check_valid(self, txt, vocab_list): if len(txt) ==",
"self.test_list[self.test_offset] self.test_offset += 1 if self.test_offset > len(self.test_list) - 1:",
"* self.speech_config['stride_ms']) else: speech_feature = data speech_feature = np.expand_dims(speech_feature, -1)",
"def __init__(self, config_dict, training=True): self.speech_config = config_dict['speech_config'] self.phone_config = config_dict['inp_config']",
"data if i != ''] self.test_list = data self.test_offset =",
"%(levelname)s - %(message)s') import time class AM_DataLoader(): def __init__(self, config_dict,",
"phones=tf.keras.preprocessing.sequence.pad_sequences(phones,maxlen=max([len(i) for i in phones]),padding='post',value=self.phone_featurizer.pad) txts=tf.keras.preprocessing.sequence.pad_sequences(txts,maxlen=max([len(i) for i in txts]),padding='post',value=self.text_featurizer.pad)",
"training) self.augment = Augmentation(self.augment_config) self.init_text_to_vocab() self.epochs = 1 self.steps ="
] |
[
"sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True)) op.get_bind() op.execute(''' UPDATE page SET",
"autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release',",
"'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page',",
"Alembic commands ### def downgrade(): # ### commands auto generated",
"None def upgrade(): # ### commands auto generated by Alembic",
"op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True))",
"message Revision ID: 2018_04_20_data_src_refactor Revises: 2018_04_11_add_sandbox_topic Create Date: 2018-04-20 13:03:32.478880",
"op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True))",
"sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page',",
"### end Alembic commands ### def downgrade(): # ### commands",
"nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page',",
"sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(),",
"nullable=True)) op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False,",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page',",
"name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True)) op.add_column('page',",
"import op import sqlalchemy as sa # revision identifiers, used",
"op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True))",
"op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(),",
"SET suppression_and_disclosure = disclosure_control WHERE suppression_rules is null; ''') op.execute('''",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name',",
"'2018_04_11_add_sandbox_topic' branch_labels = None depends_on = None def upgrade(): #",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id',",
"used by Alembic. from sqlalchemy.dialects.postgresql import ARRAY revision = '2018_04_20_data_src_refactor'",
"SET suppression_and_disclosure = trim(suppression_rules || ' ' || disclosure_control) WHERE",
"WHERE suppression_rules is null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure",
"op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email')",
"auto generated by Alembic - please adjust! ### op.add_column('page', sa.Column('secondary_source_2_contact_1_email',",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('last_update_date',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False,",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id',",
"op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True))",
"by Alembic - please adjust! ### type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY',",
"= None def upgrade(): # ### commands auto generated by",
"nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(),",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False,",
"op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(),",
"sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))",
"''') op.execute(''' UPDATE page SET suppression_and_disclosure = trim(suppression_rules || '",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False,",
"page SET suppression_and_disclosure = disclosure_control WHERE suppression_rules is null; ''')",
"down_revision = '2018_04_11_add_sandbox_topic' branch_labels = None depends_on = None def",
"type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False,",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(),",
"sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True)) op.get_bind() op.execute(''' UPDATE page SET suppression_and_disclosure =",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False,",
"sa.TEXT(), nullable=True)) op.get_bind() op.execute(''' UPDATE page SET suppression_and_disclosure = suppression_rules",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(),",
"sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True)) op.get_bind() op.execute('''",
"sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose',",
"sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page',",
"= sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure',",
"revision identifiers, used by Alembic. from sqlalchemy.dialects.postgresql import ARRAY revision",
"|| ' ' || disclosure_control) WHERE suppression_rules is not null",
"sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True))",
"op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text')",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date',",
"\"\"\"empty message Revision ID: 2018_04_20_data_src_refactor Revises: 2018_04_11_add_sandbox_topic Create Date: 2018-04-20",
"from sqlalchemy.dialects.postgresql import ARRAY revision = '2018_04_20_data_src_refactor' down_revision = '2018_04_11_add_sandbox_topic'",
"op.drop_column('page', 'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id')",
"op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True))",
"Alembic - please adjust! ### type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types')",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(),",
"'frequency_of_release', ['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id']) op.drop_column('page', 'secondary_source_1_type_of_data')",
"trim(suppression_rules || ' ' || disclosure_control) WHERE suppression_rules is not",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(),",
"sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"Revision ID: 2018_04_20_data_src_refactor Revises: 2018_04_11_add_sandbox_topic Create Date: 2018-04-20 13:03:32.478880 \"\"\"",
"sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated',",
"ID: 2018_04_20_data_src_refactor Revises: 2018_04_11_add_sandbox_topic Create Date: 2018-04-20 13:03:32.478880 \"\"\" from",
"op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url')",
"branch_labels = None depends_on = None def upgrade(): # ###",
"sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated',",
"13:03:32.478880 \"\"\" from alembic import op import sqlalchemy as sa",
"op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True))",
"'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url') op.drop_column('page',",
"null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure = trim(suppression_rules ||",
"op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True))",
"sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page',",
"op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date')",
"op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True))",
"### def downgrade(): # ### commands auto generated by Alembic",
"as sa # revision identifiers, used by Alembic. from sqlalchemy.dialects.postgresql",
"'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page',",
"WHERE disclosure_control is null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure",
"import sqlalchemy as sa # revision identifiers, used by Alembic.",
"'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(),",
"please adjust! ### type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data',",
"op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name')",
"'2018_04_20_data_src_refactor' down_revision = '2018_04_11_add_sandbox_topic' branch_labels = None depends_on = None",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'],",
"by Alembic - please adjust! ### op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False,",
"'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page',",
"2018_04_20_data_src_refactor Revises: 2018_04_11_add_sandbox_topic Create Date: 2018-04-20 13:03:32.478880 \"\"\" from alembic",
"'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email') # ### end Alembic commands ### def",
"def upgrade(): # ### commands auto generated by Alembic -",
"null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure = disclosure_control WHERE",
"op.drop_column('page', 'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date') op.drop_column('page', 'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type')",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(),",
"disclosure_control) WHERE suppression_rules is not null AND disclosure_control is not",
"sa # revision identifiers, used by Alembic. from sqlalchemy.dialects.postgresql import",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False,",
"upgrade(): # ### commands auto generated by Alembic - please",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone',",
"'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page',",
"op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone')",
"is not null AND disclosure_control is not null; ''') op.drop_constraint('organisation_secondary_source_2_fkey',",
"'secondary_source_2_contact_1_email') # ### end Alembic commands ### def downgrade(): #",
"sqlalchemy as sa # revision identifiers, used by Alembic. from",
"'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page',",
"sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(),",
"### commands auto generated by Alembic - please adjust! ###",
"op.drop_column('page', 'secondary_source_2_date') op.drop_column('page', 'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id')",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False,",
"sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update',",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title',",
"not null AND disclosure_control is not null; ''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page',",
"op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True))",
"sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True))",
"''') op.execute(''' UPDATE page SET suppression_and_disclosure = disclosure_control WHERE suppression_rules",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255),",
"# ### commands auto generated by Alembic - please adjust!",
"op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update')",
"op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id'])",
"|| disclosure_control) WHERE suppression_rules is not null AND disclosure_control is",
"op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date') op.drop_column('page', 'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated')",
"sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page',",
"sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey')",
"commands ### def downgrade(): # ### commands auto generated by",
"'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_data_source_purpose') # ### end Alembic commands",
"sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date') op.drop_column('page', 'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page',",
"is null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure = trim(suppression_rules",
"sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"null; ''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page',",
"downgrade(): # ### commands auto generated by Alembic - please",
"sqlalchemy.dialects.postgresql import ARRAY revision = '2018_04_20_data_src_refactor' down_revision = '2018_04_11_add_sandbox_topic' branch_labels",
"['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id']) op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure')",
"sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(),",
"not null; ''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey',",
"sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(),",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(),",
"sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(),",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False,",
"op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id'])",
"by Alembic. from sqlalchemy.dialects.postgresql import ARRAY revision = '2018_04_20_data_src_refactor' down_revision",
"# revision identifiers, used by Alembic. from sqlalchemy.dialects.postgresql import ARRAY",
"op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True))",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(),",
"'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(),",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False,",
"op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id')",
"'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_data_source_purpose') # ### end",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type',",
"nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'],",
"'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date') op.drop_column('page', 'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page',",
"null AND disclosure_control is not null; ''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey')",
"type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules')",
"generated by Alembic - please adjust! ### op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(),",
"op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False, nullable=True))",
"sa.TEXT(), autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False,",
"['id']) op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page',",
"'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey',",
"is not null; ''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey')",
"auto generated by Alembic - please adjust! ### type_of_data_types =",
"'organisation', ['secondary_source_2_publisher_id'], ['id']) op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page',",
"sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"ARRAY revision = '2018_04_20_data_src_refactor' down_revision = '2018_04_11_add_sandbox_topic' branch_labels = None",
"adjust! ### type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types),",
"op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date')",
"'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date') op.drop_column('page',",
"op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey') op.drop_column('page',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(),",
"op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id']) op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False,",
"'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page',",
"'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False,",
"Alembic. from sqlalchemy.dialects.postgresql import ARRAY revision = '2018_04_20_data_src_refactor' down_revision =",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False,",
"op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True))",
"op.drop_column('page', 'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone')",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control',",
"' || disclosure_control) WHERE suppression_rules is not null AND disclosure_control",
"'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email') # ### end",
"sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"suppression_rules is null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure =",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False,",
"op.execute(''' UPDATE page SET suppression_and_disclosure = suppression_rules WHERE disclosure_control is",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic',",
"type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True)) op.add_column('page',",
"suppression_and_disclosure = suppression_rules WHERE disclosure_control is null; ''') op.execute(''' UPDATE",
"2018-04-20 13:03:32.478880 \"\"\" from alembic import op import sqlalchemy as",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False,",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False,",
"op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True))",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name',",
"sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True))",
"op.add_column('page', sa.Column('secondary_source_2_contact_2_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True))",
"op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email') # ### end Alembic",
"sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"adjust! ### op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(),",
"sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey',",
"op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True))",
"sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_data_source_purpose') #",
"### op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False,",
"['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation',",
"'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id']) op.drop_column('page',",
"op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_data_source_purpose') # ###",
"= suppression_rules WHERE disclosure_control is null; ''') op.execute(''' UPDATE page",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text', sa.TEXT(), autoincrement=False,",
"op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_data_source_purpose') # ### end Alembic",
"WHERE suppression_rules is not null AND disclosure_control is not null;",
"sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date') op.drop_column('page', 'next_update_date') op.drop_column('page',",
"= disclosure_control WHERE suppression_rules is null; ''') op.execute(''' UPDATE page",
"= '2018_04_11_add_sandbox_topic' branch_labels = None depends_on = None def upgrade():",
"commands auto generated by Alembic - please adjust! ### type_of_data_types",
"nullable=True)) op.get_bind() op.execute(''' UPDATE page SET suppression_and_disclosure = suppression_rules WHERE",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control', sa.TEXT(),",
"commands auto generated by Alembic - please adjust! ### op.add_column('page',",
"sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id',",
"op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email') # ###",
"op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(), autoincrement=False, nullable=True))",
"UPDATE page SET suppression_and_disclosure = disclosure_control WHERE suppression_rules is null;",
"sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_type_of_statistic_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255),",
"'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email') #",
"'SURVEY', name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True))",
"op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title') op.drop_column('page', 'secondary_source_2_date') op.drop_column('page', 'next_update_date')",
"depends_on = None def upgrade(): # ### commands auto generated",
"end Alembic commands ### def downgrade(): # ### commands auto",
"op.execute(''' UPDATE page SET suppression_and_disclosure = trim(suppression_rules || ' '",
"'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page',",
"['secondary_source_2_publisher_id'], ['id']) op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates')",
"2018_04_11_add_sandbox_topic Create Date: 2018-04-20 13:03:32.478880 \"\"\" from alembic import op",
"Date: 2018-04-20 13:03:32.478880 \"\"\" from alembic import op import sqlalchemy",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(),",
"sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True)) op.get_bind() op.execute(''' UPDATE page",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url', sa.TEXT(),",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False,",
"' ' || disclosure_control) WHERE suppression_rules is not null AND",
"identifiers, used by Alembic. from sqlalchemy.dialects.postgresql import ARRAY revision =",
"op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_data_source_purpose') # ### end Alembic commands ###",
"op import sqlalchemy as sa # revision identifiers, used by",
"suppression_and_disclosure = disclosure_control WHERE suppression_rules is null; ''') op.execute(''' UPDATE",
"'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date') op.drop_column('page',",
"None depends_on = None def upgrade(): # ### commands auto",
"sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True)) op.get_bind() op.execute(''' UPDATE",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(),",
"'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page',",
"['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'], ['id']) op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page',",
"op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other')",
"page SET suppression_and_disclosure = trim(suppression_rules || ' ' || disclosure_control)",
"op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True))",
"disclosure_control is not null; ''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page',",
"'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page',",
"Create Date: 2018-04-20 13:03:32.478880 \"\"\" from alembic import op import",
"revision = '2018_04_20_data_src_refactor' down_revision = '2018_04_11_add_sandbox_topic' branch_labels = None depends_on",
"AND disclosure_control is not null; ''') op.drop_constraint('organisation_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey',",
"sa.Column('secondary_source_1_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey',",
"'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page',",
"op.add_column('page', sa.Column('secondary_source_2_contact_1_phone', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True))",
"= None depends_on = None def upgrade(): # ### commands",
"'page', type_='foreignkey') op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update')",
"page SET suppression_and_disclosure = suppression_rules WHERE disclosure_control is null; ''')",
"['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page', 'organisation', ['secondary_source_2_publisher_id'],",
"\"\"\" from alembic import op import sqlalchemy as sa #",
"op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency')",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_title', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255),",
"op.drop_column('page', 'secondary_source_2_contact_2_name') op.drop_column('page', 'secondary_source_2_contact_2_phone') op.drop_column('page', 'secondary_source_2_url') op.drop_column('page', 'secondary_source_2_date_next_update') op.drop_column('page', 'secondary_source_2_contact_1_name')",
"from alembic import op import sqlalchemy as sa # revision",
"op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control') op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules')",
"op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True))",
"op.drop_constraint('frequency_secondary_source_2_fkey', 'page', type_='foreignkey') op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated')",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('next_update_date',",
"'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title') op.drop_column('page',",
"suppression_and_disclosure = trim(suppression_rules || ' ' || disclosure_control) WHERE suppression_rules",
"please adjust! ### op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email',",
"= trim(suppression_rules || ' ' || disclosure_control) WHERE suppression_rules is",
"is null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure = disclosure_control",
"Alembic - please adjust! ### op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True))",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update',",
"sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules',",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(),",
"### type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types') op.add_column('page', sa.Column('secondary_source_1_type_of_data', ARRAY(type_of_data_types), nullable=True))",
"def downgrade(): # ### commands auto generated by Alembic -",
"'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id']) op.create_foreign_key('frequency_secondary_source_2_fkey', 'page', 'frequency_of_release', ['secondary_source_2_frequency_id'], ['id']) op.create_foreign_key('organisation_secondary_source_2_fkey', 'page',",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_disclosure_control',",
"op.add_column('page', sa.Column('secondary_source_1_suppression_rules', sa.TEXT(), autoincrement=False, nullable=True)) op.create_foreign_key('secondary_source_2_type_of_statistic_fkey', 'page', 'type_of_statistic', ['secondary_source_2_type_of_statistic_id'], ['id'])",
"sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False,",
"disclosure_control is null; ''') op.execute(''' UPDATE page SET suppression_and_disclosure =",
"suppression_rules is not null AND disclosure_control is not null; ''')",
"UPDATE page SET suppression_and_disclosure = trim(suppression_rules || ' ' ||",
"op.execute(''' UPDATE page SET suppression_and_disclosure = disclosure_control WHERE suppression_rules is",
"op.add_column('page', sa.Column('secondary_source_2_statistic_type', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_updated', sa.TEXT(), autoincrement=False, nullable=True))",
"generated by Alembic - please adjust! ### type_of_data_types = sa.Enum('ADMINISTRATIVE',",
"= '2018_04_20_data_src_refactor' down_revision = '2018_04_11_add_sandbox_topic' branch_labels = None depends_on =",
"'page', 'organisation', ['secondary_source_2_publisher_id'], ['id']) op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates')",
"sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_disclosure_control', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_publisher_text',",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_url',",
"sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency',",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True)) op.get_bind()",
"sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_note_on_corrections_or_updates', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True))",
"op.add_column('page', sa.Column('secondary_source_1_data_source_purpose', sa.TEXT(), nullable=True)) op.get_bind() op.execute(''' UPDATE page SET suppression_and_disclosure",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_2_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_id', sa.INTEGER(),",
"- please adjust! ### type_of_data_types = sa.Enum('ADMINISTRATIVE', 'SURVEY', name='type_of_data_types') op.add_column('page',",
"'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email') # ### end Alembic commands",
"op.drop_column('page', 'secondary_source_1_type_of_data') op.drop_column('page', 'suppression_and_disclosure') op.drop_column('page', 'note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_note_on_corrections_or_updates') op.drop_column('page', 'secondary_source_1_data_source_purpose')",
"'page', type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control')",
"op.drop_column('page', 'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page', 'secondary_source_2_contact_2_email')",
"op.drop_column('page', 'secondary_source_2_contact_1_name') op.drop_column('page', 'last_update_date') op.drop_column('page', 'secondary_source_2_contact_1_phone') op.drop_column('page', 'secondary_source_2_publisher_text') op.drop_column('page', 'secondary_source_2_disclosure_control')",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False,",
"import ARRAY revision = '2018_04_20_data_src_refactor' down_revision = '2018_04_11_add_sandbox_topic' branch_labels =",
"ARRAY(type_of_data_types), nullable=True)) op.add_column('page', sa.Column('suppression_and_disclosure', sa.TEXT(), nullable=True)) op.add_column('page', sa.Column('note_on_corrections_or_updates', sa.TEXT(), nullable=True))",
"op.drop_constraint('secondary_source_2_type_of_statistic_fkey', 'page', type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page',",
"op.drop_column('page', 'secondary_source_2_contact_1_email') # ### end Alembic commands ### def downgrade():",
"sa.Column('secondary_source_2_contact_1_name', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"Revises: 2018_04_11_add_sandbox_topic Create Date: 2018-04-20 13:03:32.478880 \"\"\" from alembic import",
"type_='foreignkey') op.drop_column('page', 'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page',",
"UPDATE page SET suppression_and_disclosure = suppression_rules WHERE disclosure_control is null;",
"disclosure_control WHERE suppression_rules is null; ''') op.execute(''' UPDATE page SET",
"nullable=True)) op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('next_update_date', sa.VARCHAR(length=255), autoincrement=False,",
"autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255),",
"op.add_column('page', sa.Column('secondary_source_2_publisher_id', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True))",
"'secondary_source_1_date_next_update') op.drop_column('page', 'secondary_source_1_date_updated') op.drop_column('page', 'secondary_source_1_suppression_rules') op.drop_column('page', 'secondary_source_1_disclosure_control') op.drop_column('page', 'secondary_source_2_frequency') op.drop_column('page',",
"op.drop_column('page', 'secondary_source_2_contact_2_email') op.drop_column('page', 'secondary_source_2_contact_1_email') # ### end Alembic commands ###",
"op.drop_column('page', 'secondary_source_2_type_of_statistic_id') op.drop_column('page', 'secondary_source_2_suppression_rules') op.drop_column('page', 'secondary_source_2_frequency_other') op.drop_column('page', 'secondary_source_2_publisher_id') op.drop_column('page', 'secondary_source_2_title')",
"suppression_rules WHERE disclosure_control is null; ''') op.execute(''' UPDATE page SET",
"op.add_column('page', sa.Column('secondary_source_1_date_next_update', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_1_date_updated', sa.TEXT(), autoincrement=False, nullable=True))",
"alembic import op import sqlalchemy as sa # revision identifiers,",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('last_update_date', sa.VARCHAR(length=255), autoincrement=False,",
"op.get_bind() op.execute(''' UPDATE page SET suppression_and_disclosure = suppression_rules WHERE disclosure_control",
"nullable=True)) op.add_column('page', sa.Column('secondary_source_2_frequency_other', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) op.add_column('page', sa.Column('secondary_source_2_suppression_rules', sa.TEXT(), autoincrement=False,",
"# ### end Alembic commands ### def downgrade(): # ###",
"- please adjust! ### op.add_column('page', sa.Column('secondary_source_2_contact_1_email', sa.TEXT(), autoincrement=False, nullable=True)) op.add_column('page',",
"SET suppression_and_disclosure = suppression_rules WHERE disclosure_control is null; ''') op.execute('''",
"'secondary_source_2_date') op.drop_column('page', 'next_update_date') op.drop_column('page', 'secondary_source_2_date_updated') op.drop_column('page', 'secondary_source_2_statistic_type') op.drop_column('page', 'secondary_source_2_frequency_id') op.drop_column('page',"
] |
[
"-*- coding:utf-8 -*- # coded by <NAME> https://github.com/vikas-kundu # -------------------------------------------",
"or (len(sys.argv)==2)): pass else: print(\"Error! Some parameter is missing please",
"installer def options(): argv = sys.argv[1:] try: opts, args =",
"('-c','--country'): config.str_country=str(a.lower().strip('\"\\'')) elif o in ('-o','--output'): config.str_output=str(a.strip('\"\\'')) elif o in",
"o in ('-o','--output'): config.str_output=str(a.strip('\"\\'')) elif o in ('-n','--number'): config.str_number=str(a.strip('\"\\'')) else:",
"import config from lib.core.parse import banner from lib.core import util",
"for (o, a) in opts: if(o in('-i','--install')): if(util.packages_check()==False): installer.start_install() else:",
"(len(sys.argv)==2)): pass else: print(\"Error! Some parameter is missing please check!\")",
"print(\"Packages already installed!\") sys.exit() elif (o in ('-w', '--wizard')): config.wizard=True",
"(o in ('-w', '--wizard')): config.wizard=True elif o in ('-h','--help'): banner.usage()",
"banner from lib.core import util from lib.core import installer def",
"lib.core import installer def options(): argv = sys.argv[1:] try: opts,",
"o in ('-t','--task'): config.str_task=str(a) elif o in ('-c','--country'): config.str_country=str(a.lower().strip('\"\\'')) elif",
"= getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or (len(sys.argv)==2)): pass else: print(\"Error!",
"sys.argv[1:] try: opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or",
"getopt.GetoptError as err: print(err) banner.usage() sys.exit(2) for (o, a) in",
"util from lib.core import installer def options(): argv = sys.argv[1:]",
"opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or (len(sys.argv)==2)): pass",
"# coded by <NAME> https://github.com/vikas-kundu # ------------------------------------------- import sys import",
"a) in opts: if(o in('-i','--install')): if(util.packages_check()==False): installer.start_install() else: print(\"Packages already",
"config.str_output=str(a.strip('\"\\'')) elif o in ('-n','--number'): config.str_number=str(a.strip('\"\\'')) else: print(\"Something went wrong",
"------------------------------------------- import sys import getopt import time import config from",
"import getopt import time import config from lib.core.parse import banner",
"else: print(\"Error! Some parameter is missing please check!\") time.sleep(2) banner.usage()",
"err: print(err) banner.usage() sys.exit(2) for (o, a) in opts: if(o",
"lib.core import util from lib.core import installer def options(): argv",
"print(\"Error! Some parameter is missing please check!\") time.sleep(2) banner.usage() sys.exit()",
"python3 # -*- coding:utf-8 -*- # coded by <NAME> https://github.com/vikas-kundu",
"config.str_number=str(a.strip('\"\\'')) else: print(\"Something went wrong with argument parsing!\") time.sleep(2) banner.usage()",
"in ('-o','--output'): config.str_output=str(a.strip('\"\\'')) elif o in ('-n','--number'): config.str_number=str(a.strip('\"\\'')) else: print(\"Something",
"('-m','--mode'): config.str_mode=str(a) elif o in ('-t','--task'): config.str_task=str(a) elif o in",
"('-h','--help'): banner.usage() sys.exit() elif o in ('-m','--mode'): config.str_mode=str(a) elif o",
"o in ('-c','--country'): config.str_country=str(a.lower().strip('\"\\'')) elif o in ('-o','--output'): config.str_output=str(a.strip('\"\\'')) elif",
"import util from lib.core import installer def options(): argv =",
"already installed!\") sys.exit() elif (o in ('-w', '--wizard')): config.wizard=True elif",
"in ('-n','--number'): config.str_number=str(a.strip('\"\\'')) else: print(\"Something went wrong with argument parsing!\")",
"o in ('-h','--help'): banner.usage() sys.exit() elif o in ('-m','--mode'): config.str_mode=str(a)",
"if(o in('-i','--install')): if(util.packages_check()==False): installer.start_install() else: print(\"Packages already installed!\") sys.exit() elif",
"<NAME> https://github.com/vikas-kundu # ------------------------------------------- import sys import getopt import time",
"import time import config from lib.core.parse import banner from lib.core",
"elif o in ('-n','--number'): config.str_number=str(a.strip('\"\\'')) else: print(\"Something went wrong with",
"'m:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or (len(sys.argv)==2)): pass else: print(\"Error! Some parameter",
"banner.usage() sys.exit(2) for (o, a) in opts: if(o in('-i','--install')): if(util.packages_check()==False):",
"is missing please check!\") time.sleep(2) banner.usage() sys.exit() except getopt.GetoptError as",
"in ('-c','--country'): config.str_country=str(a.lower().strip('\"\\'')) elif o in ('-o','--output'): config.str_output=str(a.strip('\"\\'')) elif o",
"config.str_country=str(a.lower().strip('\"\\'')) elif o in ('-o','--output'): config.str_output=str(a.strip('\"\\'')) elif o in ('-n','--number'):",
"config.str_mode=str(a) elif o in ('-t','--task'): config.str_task=str(a) elif o in ('-c','--country'):",
"print(err) banner.usage() sys.exit(2) for (o, a) in opts: if(o in('-i','--install')):",
"# -*- coding:utf-8 -*- # coded by <NAME> https://github.com/vikas-kundu #",
"coded by <NAME> https://github.com/vikas-kundu # ------------------------------------------- import sys import getopt",
"o in ('-n','--number'): config.str_number=str(a.strip('\"\\'')) else: print(\"Something went wrong with argument",
"elif o in ('-c','--country'): config.str_country=str(a.lower().strip('\"\\'')) elif o in ('-o','--output'): config.str_output=str(a.strip('\"\\''))",
"#!/usr/bin/env python3 # -*- coding:utf-8 -*- # coded by <NAME>",
"Some parameter is missing please check!\") time.sleep(2) banner.usage() sys.exit() except",
"coding:utf-8 -*- # coded by <NAME> https://github.com/vikas-kundu # ------------------------------------------- import",
"sys.exit() except getopt.GetoptError as err: print(err) banner.usage() sys.exit(2) for (o,",
"from lib.core import util from lib.core import installer def options():",
"check!\") time.sleep(2) banner.usage() sys.exit() except getopt.GetoptError as err: print(err) banner.usage()",
"elif o in ('-h','--help'): banner.usage() sys.exit() elif o in ('-m','--mode'):",
"['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or (len(sys.argv)==2)): pass else: print(\"Error! Some parameter is",
"installer.start_install() else: print(\"Packages already installed!\") sys.exit() elif (o in ('-w',",
"time import config from lib.core.parse import banner from lib.core import",
"sys.exit() elif (o in ('-w', '--wizard')): config.wizard=True elif o in",
"banner.usage() sys.exit() elif o in ('-m','--mode'): config.str_mode=str(a) elif o in",
"sys.exit(2) for (o, a) in opts: if(o in('-i','--install')): if(util.packages_check()==False): installer.start_install()",
"getopt import time import config from lib.core.parse import banner from",
"('-w', '--wizard')): config.wizard=True elif o in ('-h','--help'): banner.usage() sys.exit() elif",
"def options(): argv = sys.argv[1:] try: opts, args = getopt.getopt(argv,",
"if(util.packages_check()==False): installer.start_install() else: print(\"Packages already installed!\") sys.exit() elif (o in",
"import banner from lib.core import util from lib.core import installer",
"time.sleep(2) banner.usage() sys.exit() except getopt.GetoptError as err: print(err) banner.usage() sys.exit(2)",
"pass else: print(\"Error! Some parameter is missing please check!\") time.sleep(2)",
"please check!\") time.sleep(2) banner.usage() sys.exit() except getopt.GetoptError as err: print(err)",
"('-t','--task'): config.str_task=str(a) elif o in ('-c','--country'): config.str_country=str(a.lower().strip('\"\\'')) elif o in",
"in ('-w', '--wizard')): config.wizard=True elif o in ('-h','--help'): banner.usage() sys.exit()",
"argv = sys.argv[1:] try: opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install'])",
"as err: print(err) banner.usage() sys.exit(2) for (o, a) in opts:",
"('-o','--output'): config.str_output=str(a.strip('\"\\'')) elif o in ('-n','--number'): config.str_number=str(a.strip('\"\\'')) else: print(\"Something went",
"config from lib.core.parse import banner from lib.core import util from",
"from lib.core import installer def options(): argv = sys.argv[1:] try:",
"(o, a) in opts: if(o in('-i','--install')): if(util.packages_check()==False): installer.start_install() else: print(\"Packages",
"in ('-h','--help'): banner.usage() sys.exit() elif o in ('-m','--mode'): config.str_mode=str(a) elif",
"getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or (len(sys.argv)==2)): pass else: print(\"Error! Some",
"args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or (len(sys.argv)==2)): pass else:",
"<reponame>vikas-kundu/phonedict<filename>lib/core/parse/cmdline.py<gh_stars>0 #!/usr/bin/env python3 # -*- coding:utf-8 -*- # coded by",
"except getopt.GetoptError as err: print(err) banner.usage() sys.exit(2) for (o, a)",
"sys import getopt import time import config from lib.core.parse import",
"try: opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9) or (len(sys.argv)==2)):",
"in ('-m','--mode'): config.str_mode=str(a) elif o in ('-t','--task'): config.str_task=str(a) elif o",
"installed!\") sys.exit() elif (o in ('-w', '--wizard')): config.wizard=True elif o",
"config.str_task=str(a) elif o in ('-c','--country'): config.str_country=str(a.lower().strip('\"\\'')) elif o in ('-o','--output'):",
"in opts: if(o in('-i','--install')): if(util.packages_check()==False): installer.start_install() else: print(\"Packages already installed!\")",
"lib.core.parse import banner from lib.core import util from lib.core import",
"in('-i','--install')): if(util.packages_check()==False): installer.start_install() else: print(\"Packages already installed!\") sys.exit() elif (o",
"else: print(\"Packages already installed!\") sys.exit() elif (o in ('-w', '--wizard')):",
"= sys.argv[1:] try: opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi', ['mode','task','country','output','number','wizard','help','install']) if((len(sys.argv)==9)",
"'--wizard')): config.wizard=True elif o in ('-h','--help'): banner.usage() sys.exit() elif o",
"import installer def options(): argv = sys.argv[1:] try: opts, args",
"by <NAME> https://github.com/vikas-kundu # ------------------------------------------- import sys import getopt import",
"import sys import getopt import time import config from lib.core.parse",
"# ------------------------------------------- import sys import getopt import time import config",
"else: print(\"Something went wrong with argument parsing!\") time.sleep(2) banner.usage() sys.exit()",
"https://github.com/vikas-kundu # ------------------------------------------- import sys import getopt import time import",
"elif o in ('-t','--task'): config.str_task=str(a) elif o in ('-c','--country'): config.str_country=str(a.lower().strip('\"\\''))",
"elif (o in ('-w', '--wizard')): config.wizard=True elif o in ('-h','--help'):",
"elif o in ('-m','--mode'): config.str_mode=str(a) elif o in ('-t','--task'): config.str_task=str(a)",
"if((len(sys.argv)==9) or (len(sys.argv)==2)): pass else: print(\"Error! Some parameter is missing",
"('-n','--number'): config.str_number=str(a.strip('\"\\'')) else: print(\"Something went wrong with argument parsing!\") time.sleep(2)",
"options(): argv = sys.argv[1:] try: opts, args = getopt.getopt(argv, 'm:t:c:o:n:whi',",
"from lib.core.parse import banner from lib.core import util from lib.core",
"opts: if(o in('-i','--install')): if(util.packages_check()==False): installer.start_install() else: print(\"Packages already installed!\") sys.exit()",
"in ('-t','--task'): config.str_task=str(a) elif o in ('-c','--country'): config.str_country=str(a.lower().strip('\"\\'')) elif o",
"o in ('-m','--mode'): config.str_mode=str(a) elif o in ('-t','--task'): config.str_task=str(a) elif",
"config.wizard=True elif o in ('-h','--help'): banner.usage() sys.exit() elif o in",
"missing please check!\") time.sleep(2) banner.usage() sys.exit() except getopt.GetoptError as err:",
"elif o in ('-o','--output'): config.str_output=str(a.strip('\"\\'')) elif o in ('-n','--number'): config.str_number=str(a.strip('\"\\''))",
"parameter is missing please check!\") time.sleep(2) banner.usage() sys.exit() except getopt.GetoptError",
"banner.usage() sys.exit() except getopt.GetoptError as err: print(err) banner.usage() sys.exit(2) for",
"sys.exit() elif o in ('-m','--mode'): config.str_mode=str(a) elif o in ('-t','--task'):",
"-*- # coded by <NAME> https://github.com/vikas-kundu # ------------------------------------------- import sys"
] |
[
") self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"..\\\\dir\" ) self.assertIsNone( ssh_utils._to_paramiko_private_key(private_key_filename=None, password='<PASSWORD>') )",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"# # Licensed under the Apache License, Version 2.0 (the",
"compliance with the License. # You may obtain a copy",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"2.0 (the \"License\"); # you may not use this file",
"agreed to in writing, software # distributed under the License",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"Unless required by applicable law or agreed to in writing,",
"Copyright 2015 - StackStorm, Inc. # Copyright 2015 - Huawei",
"base from mistral.utils import ssh_utils from mistral_lib import utils class",
"distributed under the License is distributed on an \"AS IS\"",
"as exc from mistral.tests.unit import base from mistral.utils import ssh_utils",
"UtilsTest(base.BaseTest): def test_itersubclasses(self): class A(object): pass class B(A): pass class",
"the specific language governing permissions and # limitations under the",
"def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"../dir\" ) self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key,",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"# Copyright 2015 - Huawei Technologies Co. Ltd # #",
"applicable law or agreed to in writing, software # distributed",
"express or implied. # See the License for the specific",
"test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"../dir\" ) self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"..\\\\dir\"",
"except in compliance with the License. # You may obtain",
"- Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. #",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"class D(C): pass self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises(",
"from mistral.utils import ssh_utils from mistral_lib import utils class UtilsTest(base.BaseTest):",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"pass class B(A): pass class C(A): pass class D(C): pass",
"pass class C(A): pass class D(C): pass self.assertEqual([B, C, D],",
"not use this file except in compliance with the License.",
"writing, software # distributed under the License is distributed on",
"in writing, software # distributed under the License is distributed",
"you may not use this file except in compliance with",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"D(C): pass self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException,",
"Mirantis, Inc. # Copyright 2015 - StackStorm, Inc. # Copyright",
"language governing permissions and # limitations under the License. from",
"exceptions as exc from mistral.tests.unit import base from mistral.utils import",
"limitations under the License. from mistral import exceptions as exc",
"Inc. # Copyright 2015 - Huawei Technologies Co. Ltd #",
"use this file except in compliance with the License. #",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"mistral.tests.unit import base from mistral.utils import ssh_utils from mistral_lib import",
"Co. Ltd # # Licensed under the Apache License, Version",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"Ltd # # Licensed under the Apache License, Version 2.0",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"or implied. # See the License for the specific language",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"License. # You may obtain a copy of the License",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"License, Version 2.0 (the \"License\"); # you may not use",
"Copyright 2015 - Huawei Technologies Co. Ltd # # Licensed",
"# You may obtain a copy of the License at",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"from mistral_lib import utils class UtilsTest(base.BaseTest): def test_itersubclasses(self): class A(object):",
"mistral_lib import utils class UtilsTest(base.BaseTest): def test_itersubclasses(self): class A(object): pass",
"under the License is distributed on an \"AS IS\" BASIS,",
"import utils class UtilsTest(base.BaseTest): def test_itersubclasses(self): class A(object): pass class",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"\"../dir\" ) self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"..\\\\dir\" ) self.assertIsNone( ssh_utils._to_paramiko_private_key(private_key_filename=None, password='<PASSWORD>')",
"License for the specific language governing permissions and # limitations",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"B(A): pass class C(A): pass class D(C): pass self.assertEqual([B, C,",
"class B(A): pass class C(A): pass class D(C): pass self.assertEqual([B,",
"the License for the specific language governing permissions and #",
"(the \"License\"); # you may not use this file except",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"# you may not use this file except in compliance",
"ssh_utils._to_paramiko_private_key, \"../dir\" ) self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"..\\\\dir\" ) self.assertIsNone( ssh_utils._to_paramiko_private_key(private_key_filename=None,",
"either express or implied. # See the License for the",
"# limitations under the License. from mistral import exceptions as",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"Huawei Technologies Co. Ltd # # Licensed under the Apache",
"the License is distributed on an \"AS IS\" BASIS, #",
"list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"../dir\" ) self.assertRaises( exc.DataAccessException,",
"and # limitations under the License. from mistral import exceptions",
"in compliance with the License. # You may obtain a",
"2015 - StackStorm, Inc. # Copyright 2015 - Huawei Technologies",
"- Huawei Technologies Co. Ltd # # Licensed under the",
"software # distributed under the License is distributed on an",
"2013 - Mirantis, Inc. # Copyright 2015 - StackStorm, Inc.",
"License. from mistral import exceptions as exc from mistral.tests.unit import",
"import base from mistral.utils import ssh_utils from mistral_lib import utils",
"exc from mistral.tests.unit import base from mistral.utils import ssh_utils from",
"mistral.utils import ssh_utils from mistral_lib import utils class UtilsTest(base.BaseTest): def",
"# # Unless required by applicable law or agreed to",
"pass self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key,",
"the License. from mistral import exceptions as exc from mistral.tests.unit",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"2015 - Huawei Technologies Co. Ltd # # Licensed under",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"Version 2.0 (the \"License\"); # you may not use this",
"mistral import exceptions as exc from mistral.tests.unit import base from",
"law or agreed to in writing, software # distributed under",
"class UtilsTest(base.BaseTest): def test_itersubclasses(self): class A(object): pass class B(A): pass",
"ssh_utils from mistral_lib import utils class UtilsTest(base.BaseTest): def test_itersubclasses(self): class",
"governing permissions and # limitations under the License. from mistral",
"import ssh_utils from mistral_lib import utils class UtilsTest(base.BaseTest): def test_itersubclasses(self):",
"def test_itersubclasses(self): class A(object): pass class B(A): pass class C(A):",
"class C(A): pass class D(C): pass self.assertEqual([B, C, D], list(utils.iter_subclasses(A)))",
"implied. # See the License for the specific language governing",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"\"License\"); # you may not use this file except in",
"test_itersubclasses(self): class A(object): pass class B(A): pass class C(A): pass",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"from mistral import exceptions as exc from mistral.tests.unit import base",
"StackStorm, Inc. # Copyright 2015 - Huawei Technologies Co. Ltd",
"C, D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"../dir\" )",
"A(object): pass class B(A): pass class C(A): pass class D(C):",
"# Copyright 2015 - StackStorm, Inc. # Copyright 2015 -",
"# Copyright 2013 - Mirantis, Inc. # Copyright 2015 -",
"self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"../dir\"",
"by applicable law or agreed to in writing, software #",
"# distributed under the License is distributed on an \"AS",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"may obtain a copy of the License at # #",
"# Unless required by applicable law or agreed to in",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"Inc. # Copyright 2015 - StackStorm, Inc. # Copyright 2015",
"from mistral.tests.unit import base from mistral.utils import ssh_utils from mistral_lib",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"to in writing, software # distributed under the License is",
"import exceptions as exc from mistral.tests.unit import base from mistral.utils",
"pass class D(C): pass self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self):",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"# See the License for the specific language governing permissions",
"D], list(utils.iter_subclasses(A))) def test_paramiko_to_private_key(self): self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"../dir\" ) self.assertRaises(",
"Copyright 2013 - Mirantis, Inc. # Copyright 2015 - StackStorm,",
"You may obtain a copy of the License at #",
"class A(object): pass class B(A): pass class C(A): pass class",
"may not use this file except in compliance with the",
"or agreed to in writing, software # distributed under the",
"required by applicable law or agreed to in writing, software",
"under the License. from mistral import exceptions as exc from",
"utils class UtilsTest(base.BaseTest): def test_itersubclasses(self): class A(object): pass class B(A):",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"Technologies Co. Ltd # # Licensed under the Apache License,",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"with the License. # You may obtain a copy of",
"C(A): pass class D(C): pass self.assertEqual([B, C, D], list(utils.iter_subclasses(A))) def",
"this file except in compliance with the License. # You",
"- StackStorm, Inc. # Copyright 2015 - Huawei Technologies Co.",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"permissions and # limitations under the License. from mistral import",
"self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"../dir\" ) self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"..\\\\dir\" )",
"exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"../dir\" ) self.assertRaises( exc.DataAccessException, ssh_utils._to_paramiko_private_key, \"..\\\\dir\" ) self.assertIsNone("
] |
[
"= \"{}{}\".format(base_url, endpoint) response = session.get(request_url, params=params) if response.status_code ==",
"Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint, params=None, session=None): session = _init_session(session) request_url =",
"= \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText') ) raise APIException(message, code=api_status_code) return",
"json_response = json.loads(response.content.decode('utf-8')) api_response = json_response.get('response') api_status_code = int(api_response.get('statusCode')) if",
"code=response.status_code) def call_api_tunein_any(base: Tunein, station_id: int, session=None): session = _init_session(session)",
"= _init_session(session) url = tunein_url.format(base=tuneins[2], id=station_id) response = session.get(url) if",
"200: json_response = json.loads(response.content.decode('utf-8')) api_response = json_response.get('response') api_status_code = int(api_response.get('statusCode'))",
"= tunein_url.format(base=base, id=station_id) response = session.get(url) if response.status_code == 200:",
"code=response.status_code) def call_api_json(endpoint, params=None, session=None): session = _init_session(session) request_url =",
"tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint, params=None, session=None): session",
"url = tunein_url.format(base=tuneins[2], id=station_id) response = session.get(url) if response.status_code ==",
"= session.get(url) if response.status_code == 200: api_response = xmltodict.parse(response.content.decode('utf-8')) return",
"message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText') ) raise APIException(message, code=api_status_code)",
"= [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint, params=None, session=None): session =",
"json.loads(response.content.decode('utf-8')) api_response = json_response.get('response') api_status_code = int(api_response.get('statusCode')) if api_status_code !=",
"from .models import Tunein from .utils import _init_session from .Exceptions",
"session = _init_session(session) request_url = \"{}{}\".format(base_url, endpoint) response = session.get(request_url,",
"api_response.get('statusText'), api_response.get('statusDetailText') ) raise APIException(message, code=api_status_code) return response_as_dict raise APIException(response.content,",
"endpoint) response = session.get(request_url, params=params) if response.status_code == 200: json_response",
"import _init_session from .Exceptions import APIException base_url = 'http://api.shoutcast.com' tunein_url",
"= session.get(url) if response.status_code == 200: return response.content.decode('utf-8') raise APIException(response.reason,",
"params=None, session=None): session = _init_session(session) request_url = \"{}{}\".format(base_url, endpoint) response",
"_init_session(session) request_url = \"{}{}\".format(base_url, endpoint) response = session.get(request_url, params=params) if",
"response_as_dict.get('response') if api_response: api_status_code = int(api_response.get('statusCode')) message = \"statusText:{}, statusDetailText:{}\".format(",
"Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint, params=None, session=None): session = _init_session(session) request_url",
"def call_api_json(endpoint, params=None, session=None): session = _init_session(session) request_url = \"{}{}\".format(base_url,",
"= int(api_response.get('statusCode')) if api_status_code != 200: message = \"statusText:{}, statusDetailText:{}\".format(",
"int, session=None): session = _init_session(session) url = tunein_url.format(base=base, id=station_id) response",
"def call_api_tunein(station_id: int, session=None): session = _init_session(session) url = tunein_url.format(base=tuneins[2],",
"api_response = xmltodict.parse(response.content.decode('utf-8')) return api_response raise APIException(response.reason, code=response.status_code) def call_api_tunein_any(base:",
"= _init_session(session) request_url = \"{}{}\".format(base_url, endpoint) response = session.get(request_url, params=params)",
"session=None): session = _init_session(session) url = tunein_url.format(base=tuneins[2], id=station_id) response =",
".utils import _init_session from .Exceptions import APIException base_url = 'http://api.shoutcast.com'",
"api_response = response_as_dict.get('response') if api_response: api_status_code = int(api_response.get('statusCode')) message =",
") raise APIException(message, code=api_status_code) return json_response.get('response')['data'] raise APIException(response.reason, code=response.status_code) def",
"= int(api_response.get('statusCode')) message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText') ) raise",
"tunein_url = 'http://yp.shoutcast.com/{base}?id={id}' tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint,",
"endpoint) response = session.get(request_url, params=params) if response.status_code == 200: response_as_dict",
"return response_as_dict raise APIException(response.content, code=response.status_code) def call_api_json(endpoint, params=None, session=None): session",
"call_api_tunein(station_id: int, session=None): session = _init_session(session) url = tunein_url.format(base=tuneins[2], id=station_id)",
"json_response.get('response') api_status_code = int(api_response.get('statusCode')) if api_status_code != 200: message =",
"if response.status_code == 200: response_as_dict = xmltodict.parse(response.content) api_response = response_as_dict.get('response')",
"if response.status_code == 200: json_response = json.loads(response.content.decode('utf-8')) api_response = json_response.get('response')",
"id=station_id) response = session.get(url) if response.status_code == 200: return response.content.decode('utf-8')",
"return api_response raise APIException(response.reason, code=response.status_code) def call_api_tunein_any(base: Tunein, station_id: int,",
") raise APIException(message, code=api_status_code) return response_as_dict raise APIException(response.content, code=response.status_code) def",
"200: message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText', '') ) raise",
"session=None): session = _init_session(session) url = tunein_url.format(base=base, id=station_id) response =",
"APIException(message, code=api_status_code) return json_response.get('response')['data'] raise APIException(response.reason, code=response.status_code) def call_api_tunein(station_id: int,",
"response.status_code == 200: api_response = xmltodict.parse(response.content.decode('utf-8')) return api_response raise APIException(response.reason,",
"base_url = 'http://api.shoutcast.com' tunein_url = 'http://yp.shoutcast.com/{base}?id={id}' tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'),",
"'http://api.shoutcast.com' tunein_url = 'http://yp.shoutcast.com/{base}?id={id}' tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def",
"= _init_session(session) url = tunein_url.format(base=base, id=station_id) response = session.get(url) if",
"raise APIException(response.reason, code=response.status_code) def call_api_tunein_any(base: Tunein, station_id: int, session=None): session",
"from .utils import _init_session from .Exceptions import APIException base_url =",
"return json_response.get('response')['data'] raise APIException(response.reason, code=response.status_code) def call_api_tunein(station_id: int, session=None): session",
"200: api_response = xmltodict.parse(response.content.decode('utf-8')) return api_response raise APIException(response.reason, code=response.status_code) def",
"== 200: json_response = json.loads(response.content.decode('utf-8')) api_response = json_response.get('response') api_status_code =",
"!= 200: message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText', '') )",
"params=params) if response.status_code == 200: json_response = json.loads(response.content.decode('utf-8')) api_response =",
"\"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText') ) raise APIException(message, code=api_status_code) return response_as_dict",
"code=api_status_code) return response_as_dict raise APIException(response.content, code=response.status_code) def call_api_json(endpoint, params=None, session=None):",
"if api_status_code != 200: message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText',",
"import APIException base_url = 'http://api.shoutcast.com' tunein_url = 'http://yp.shoutcast.com/{base}?id={id}' tuneins =",
"[Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint, params=None, session=None): session = _init_session(session)",
"response = session.get(request_url, params=params) if response.status_code == 200: json_response =",
"api_response.get('statusDetailText', '') ) raise APIException(message, code=api_status_code) return json_response.get('response')['data'] raise APIException(response.reason,",
"response_as_dict raise APIException(response.content, code=response.status_code) def call_api_json(endpoint, params=None, session=None): session =",
"tunein_url.format(base=tuneins[2], id=station_id) response = session.get(url) if response.status_code == 200: api_response",
"_init_session from .Exceptions import APIException base_url = 'http://api.shoutcast.com' tunein_url =",
"import json from .models import Tunein from .utils import _init_session",
"xmltodict.parse(response.content.decode('utf-8')) return api_response raise APIException(response.reason, code=response.status_code) def call_api_tunein_any(base: Tunein, station_id:",
"= json_response.get('response') api_status_code = int(api_response.get('statusCode')) if api_status_code != 200: message",
"response = session.get(url) if response.status_code == 200: return response.content.decode('utf-8') raise",
"api_response.get('statusText'), api_response.get('statusDetailText', '') ) raise APIException(message, code=api_status_code) return json_response.get('response')['data'] raise",
"= json.loads(response.content.decode('utf-8')) api_response = json_response.get('response') api_status_code = int(api_response.get('statusCode')) if api_status_code",
"\"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText', '') ) raise APIException(message, code=api_status_code) return",
"session = _init_session(session) url = tunein_url.format(base=base, id=station_id) response = session.get(url)",
"raise APIException(response.content, code=response.status_code) def call_api_json(endpoint, params=None, session=None): session = _init_session(session)",
"json from .models import Tunein from .utils import _init_session from",
"xmltodict.parse(response.content) api_response = response_as_dict.get('response') if api_response: api_status_code = int(api_response.get('statusCode')) message",
"raise APIException(message, code=api_status_code) return response_as_dict raise APIException(response.content, code=response.status_code) def call_api_json(endpoint,",
"tunein_url.format(base=base, id=station_id) response = session.get(url) if response.status_code == 200: return",
".models import Tunein from .utils import _init_session from .Exceptions import",
"_init_session(session) url = tunein_url.format(base=base, id=station_id) response = session.get(url) if response.status_code",
"== 200: api_response = xmltodict.parse(response.content.decode('utf-8')) return api_response raise APIException(response.reason, code=response.status_code)",
"session.get(request_url, params=params) if response.status_code == 200: json_response = json.loads(response.content.decode('utf-8')) api_response",
"code=api_status_code) return json_response.get('response')['data'] raise APIException(response.reason, code=response.status_code) def call_api_tunein(station_id: int, session=None):",
"raise APIException(message, code=api_status_code) return json_response.get('response')['data'] raise APIException(response.reason, code=response.status_code) def call_api_tunein(station_id:",
"code=response.status_code) def call_api_tunein(station_id: int, session=None): session = _init_session(session) url =",
"= session.get(request_url, params=params) if response.status_code == 200: json_response = json.loads(response.content.decode('utf-8'))",
"session.get(url) if response.status_code == 200: return response.content.decode('utf-8') raise APIException(response.reason, code=response.status_code)",
"session.get(url) if response.status_code == 200: api_response = xmltodict.parse(response.content.decode('utf-8')) return api_response",
"= xmltodict.parse(response.content) api_response = response_as_dict.get('response') if api_response: api_status_code = int(api_response.get('statusCode'))",
"APIException(response.reason, code=response.status_code) def call_api_tunein_any(base: Tunein, station_id: int, session=None): session =",
"\"{}{}\".format(base_url, endpoint) response = session.get(request_url, params=params) if response.status_code == 200:",
"request_url = \"{}{}\".format(base_url, endpoint) response = session.get(request_url, params=params) if response.status_code",
"call_api_xml(endpoint, params=None, session=None): session = _init_session(session) request_url = \"{}{}\".format(base_url, endpoint)",
"api_response: api_status_code = int(api_response.get('statusCode')) message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText')",
"url = tunein_url.format(base=base, id=station_id) response = session.get(url) if response.status_code ==",
"APIException(response.content, code=response.status_code) def call_api_json(endpoint, params=None, session=None): session = _init_session(session) request_url",
"APIException(response.reason, code=response.status_code) def call_api_tunein(station_id: int, session=None): session = _init_session(session) url",
"int(api_response.get('statusCode')) if api_status_code != 200: message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'),",
"api_response.get('statusDetailText') ) raise APIException(message, code=api_status_code) return response_as_dict raise APIException(response.content, code=response.status_code)",
"session.get(request_url, params=params) if response.status_code == 200: response_as_dict = xmltodict.parse(response.content) api_response",
"APIException(message, code=api_status_code) return response_as_dict raise APIException(response.content, code=response.status_code) def call_api_json(endpoint, params=None,",
"= xmltodict.parse(response.content.decode('utf-8')) return api_response raise APIException(response.reason, code=response.status_code) def call_api_tunein_any(base: Tunein,",
"def call_api_tunein_any(base: Tunein, station_id: int, session=None): session = _init_session(session) url",
"api_status_code = int(api_response.get('statusCode')) if api_status_code != 200: message = \"statusText:{},",
"api_response raise APIException(response.reason, code=response.status_code) def call_api_tunein_any(base: Tunein, station_id: int, session=None):",
"= tunein_url.format(base=tuneins[2], id=station_id) response = session.get(url) if response.status_code == 200:",
"int, session=None): session = _init_session(session) url = tunein_url.format(base=tuneins[2], id=station_id) response",
"= response_as_dict.get('response') if api_response: api_status_code = int(api_response.get('statusCode')) message = \"statusText:{},",
"api_status_code != 200: message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText', '')",
"'') ) raise APIException(message, code=api_status_code) return json_response.get('response')['data'] raise APIException(response.reason, code=response.status_code)",
".Exceptions import APIException base_url = 'http://api.shoutcast.com' tunein_url = 'http://yp.shoutcast.com/{base}?id={id}' tuneins",
"if api_response: api_status_code = int(api_response.get('statusCode')) message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'),",
"message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText', '') ) raise APIException(message,",
"response_as_dict = xmltodict.parse(response.content) api_response = response_as_dict.get('response') if api_response: api_status_code =",
"= 'http://yp.shoutcast.com/{base}?id={id}' tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint, params=None,",
"import Tunein from .utils import _init_session from .Exceptions import APIException",
"json_response.get('response')['data'] raise APIException(response.reason, code=response.status_code) def call_api_tunein(station_id: int, session=None): session =",
"response.status_code == 200: response_as_dict = xmltodict.parse(response.content) api_response = response_as_dict.get('response') if",
"session = _init_session(session) url = tunein_url.format(base=tuneins[2], id=station_id) response = session.get(url)",
"api_status_code = int(api_response.get('statusCode')) message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText') )",
"APIException base_url = 'http://api.shoutcast.com' tunein_url = 'http://yp.shoutcast.com/{base}?id={id}' tuneins = [Tunein('/sbin/tunein-station.pls'),",
"statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText', '') ) raise APIException(message, code=api_status_code) return json_response.get('response')['data']",
"_init_session(session) url = tunein_url.format(base=tuneins[2], id=station_id) response = session.get(url) if response.status_code",
"statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText') ) raise APIException(message, code=api_status_code) return response_as_dict raise",
"xmltodict import json from .models import Tunein from .utils import",
"== 200: response_as_dict = xmltodict.parse(response.content) api_response = response_as_dict.get('response') if api_response:",
"def call_api_xml(endpoint, params=None, session=None): session = _init_session(session) request_url = \"{}{}\".format(base_url,",
"call_api_json(endpoint, params=None, session=None): session = _init_session(session) request_url = \"{}{}\".format(base_url, endpoint)",
"200: response_as_dict = xmltodict.parse(response.content) api_response = response_as_dict.get('response') if api_response: api_status_code",
"session=None): session = _init_session(session) request_url = \"{}{}\".format(base_url, endpoint) response =",
"response = session.get(url) if response.status_code == 200: api_response = xmltodict.parse(response.content.decode('utf-8'))",
"station_id: int, session=None): session = _init_session(session) url = tunein_url.format(base=base, id=station_id)",
"'http://yp.shoutcast.com/{base}?id={id}' tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')] def call_api_xml(endpoint, params=None, session=None):",
"id=station_id) response = session.get(url) if response.status_code == 200: api_response =",
"raise APIException(response.reason, code=response.status_code) def call_api_tunein(station_id: int, session=None): session = _init_session(session)",
"response.status_code == 200: json_response = json.loads(response.content.decode('utf-8')) api_response = json_response.get('response') api_status_code",
"int(api_response.get('statusCode')) message = \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText') ) raise APIException(message,",
"response = session.get(request_url, params=params) if response.status_code == 200: response_as_dict =",
"params=params) if response.status_code == 200: response_as_dict = xmltodict.parse(response.content) api_response =",
"Tunein from .utils import _init_session from .Exceptions import APIException base_url",
"call_api_tunein_any(base: Tunein, station_id: int, session=None): session = _init_session(session) url =",
"Tunein, station_id: int, session=None): session = _init_session(session) url = tunein_url.format(base=base,",
"if response.status_code == 200: api_response = xmltodict.parse(response.content.decode('utf-8')) return api_response raise",
"= session.get(request_url, params=params) if response.status_code == 200: response_as_dict = xmltodict.parse(response.content)",
"= 'http://api.shoutcast.com' tunein_url = 'http://yp.shoutcast.com/{base}?id={id}' tuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')]",
"import xmltodict import json from .models import Tunein from .utils",
"from .Exceptions import APIException base_url = 'http://api.shoutcast.com' tunein_url = 'http://yp.shoutcast.com/{base}?id={id}'",
"= \"statusText:{}, statusDetailText:{}\".format( api_response.get('statusText'), api_response.get('statusDetailText', '') ) raise APIException(message, code=api_status_code)",
"api_response = json_response.get('response') api_status_code = int(api_response.get('statusCode')) if api_status_code != 200:"
] |
[
"import settings import sys class Command(BaseCommand): def handle(self, *args, **options):",
"in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name in app_list: created = Group.objects.get_or_create(name=app_name) sys.stdout.write(f\"\\n{app_name},",
"Command(BaseCommand): def handle(self, *args, **options): sys.stdout.write(\"\\nResolving app groups\") app_list =",
"app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name in app_list: created = Group.objects.get_or_create(name=app_name)",
"**options): sys.stdout.write(\"\\nResolving app groups\") app_list = [app_name.lower() for app_name in",
"import Group from django.conf import settings import sys class Command(BaseCommand):",
"django.contrib.auth.models import Group from django.conf import settings import sys class",
"settings import sys class Command(BaseCommand): def handle(self, *args, **options): sys.stdout.write(\"\\nResolving",
"Group from django.conf import settings import sys class Command(BaseCommand): def",
"django.core.management.base import BaseCommand, no_translations from django.contrib.auth.models import Group from django.conf",
"import BaseCommand, no_translations from django.contrib.auth.models import Group from django.conf import",
"class Command(BaseCommand): def handle(self, *args, **options): sys.stdout.write(\"\\nResolving app groups\") app_list",
"import sys class Command(BaseCommand): def handle(self, *args, **options): sys.stdout.write(\"\\nResolving app",
"groups\") app_list = [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name",
"sys.stdout.write(\"\\nResolving app groups\") app_list = [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS]",
"handle(self, *args, **options): sys.stdout.write(\"\\nResolving app groups\") app_list = [app_name.lower() for",
"for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name in app_list: created =",
"for app_name in app_list: created = Group.objects.get_or_create(name=app_name) sys.stdout.write(f\"\\n{app_name}, new={created}\") sys.stdout.write(\"\\n\")",
"settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name in app_list: created = Group.objects.get_or_create(name=app_name) sys.stdout.write(f\"\\n{app_name}, new={created}\")",
"app_list = [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name in",
"no_translations from django.contrib.auth.models import Group from django.conf import settings import",
"BaseCommand, no_translations from django.contrib.auth.models import Group from django.conf import settings",
"from django.contrib.auth.models import Group from django.conf import settings import sys",
"[app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name in app_list: created",
"= [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for app_name in app_list:",
"def handle(self, *args, **options): sys.stdout.write(\"\\nResolving app groups\") app_list = [app_name.lower()",
"from django.core.management.base import BaseCommand, no_translations from django.contrib.auth.models import Group from",
"from django.conf import settings import sys class Command(BaseCommand): def handle(self,",
"app groups\") app_list = [app_name.lower() for app_name in settings.ACCESS_CONTROLLED_INSTALLED_APPS] for",
"*args, **options): sys.stdout.write(\"\\nResolving app groups\") app_list = [app_name.lower() for app_name",
"sys class Command(BaseCommand): def handle(self, *args, **options): sys.stdout.write(\"\\nResolving app groups\")",
"django.conf import settings import sys class Command(BaseCommand): def handle(self, *args,"
] |
[
"self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except OSError as err: if err.errno",
"call): with LockTimeout(timeout, db_file): retry_wait = 0.001 while True: try:",
"!= ':memory:' and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout) else: raise",
"(sync_point, remote_id)) self._newid(conn) conn.commit() def _newid(self, conn): # Override for",
"2.0 (the \"License\"); # you may not use this file",
"entry) except Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file':",
"BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID",
"(1, 2, 5, 10, 25, 50): yield pm * MB",
"required for replication. :returns: dict containing keys from get_info plus",
"if stat.st_size == 0 and stat.st_ctime >= connect_time: os.unlink(path) raise",
"name is None: raise Exception('name is None!') new = hashlib.md5(('%s-%s'",
"friendly error messages for DB Errors.\"\"\" def __init__(self, path): self.path",
"stat.st_size allocated_size = stat.st_blocks * 512 for point in prealloc_points():",
"validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type,",
"[k for k in metadata if isinstance(k, unicode)] for k",
"timeout: timeout for connection :param okay_to_create: if True, create the",
"a rolling, order-independent hash of the contents. (check + XOR)",
"with a database.\"\"\" self.conn = None self.db_file = db_file self.pending_file",
"__str__(self): \"\"\" Returns a string identifying the entity under broker",
"exist :returns: DB connection object \"\"\" try: connect_time = time.time()",
"self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn",
"self.logger = logger or logging.getLogger() self.account = account self.container =",
"self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row'] = self.get_max_row() return info def",
"incoming else 'outgoing')) result = [] for row in curs:",
"break if allocated_size < prealloc_size: with open(self.db_file, 'rb+') as fp:",
"\"\"\" Checks the exception info to see if it indicates",
"If it gets within 512k of a boundary, it allocates",
"Update the put_timestamp. Only modifies it if it is greater",
"get_sync(self, id, incoming=True): \"\"\" Gets the most recent sync point",
"sqlite3 from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils import json,",
":returns: a hex representation of the new hash value \"\"\"",
"DEFAULT 0 ); CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync",
"os.ftruncate(fp.fileno(), 0) except OSError as err: if err.errno != errno.ENOENT:",
"as err: if err.errno != errno.ENOENT: raise def _commit_puts_stale_ok(self): \"\"\"",
"status_changed_at = ?' ' WHERE status_changed_at < ?' % self.db_type,",
"Database connection to reclaim metadata within. :param timestamp: Empty metadata",
"be made to preallocate disk space for database files. DB_PREALLOCATION",
"self.get_info() info['count'] = info.pop('%s_count' % self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row']",
"outgoing sync :returns: the sync point, or -1 if the",
"that value. Key/values will only be overwritten if the timestamp",
"= MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory = sqlite3.Row conn.text_factory =",
"*args, **kwargs): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self, *args,",
"outgoing sync \"\"\" with self.get() as conn: for rec in",
"* MB while True: pm += 50 yield pm *",
"limitations under the License. \"\"\" Database code for Swift \"\"\"",
"pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) if item_list:",
"STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_insert",
"as conn: old_status = self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET created_at=MIN(?,",
"representation of the new hash value \"\"\" if name is",
"validate_metadata=False): \"\"\" Updates the metadata dict for the database. The",
"should be called after an rsync. :param remote_id: the ID",
"orig_isolation_level = conn.isolation_level conn.isolation_level = None conn.execute('BEGIN IMMEDIATE') try: yield",
"% self.db_type).fetchone()[0] if md: md = json.loads(md) keys_to_delete = []",
"DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path,",
"unicode) else s) for s in args] def utf8encodekeys(metadata): uni_keys",
"if err.errno != errno.ENOENT: raise def _commit_puts_stale_ok(self): \"\"\" Catch failures",
"implementation returns a full pathname to a database. This is",
"\"\"\" Database code for Swift \"\"\" from contextlib import contextmanager,",
"* MB stat = os.stat(self.db_file) file_size = stat.st_size allocated_size =",
"doesn't exist\") conn = self.conn self.conn = None orig_isolation_level =",
"database needs committing. This function was created as a worker",
"FROM %s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError as err: if 'no",
"row[1]}) return result def get_max_row(self): query = ''' SELECT SQLITE_SEQUENCE.seq",
"max update_at timestamp of sync rows to delete \"\"\" if",
"container self._db_version = -1 def __str__(self): \"\"\" Returns a string",
"set to that value. Key/values will only be overwritten if",
"other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file, self.timeout) else:",
"def execute(self, *args, **kwargs): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute(",
"get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, \"DB",
"uses for pending pickles. \"\"\" raise NotImplementedError def merge_syncs(self, sync_points,",
"sync point, or -1 if the id doesn't exist. \"\"\"",
"self.pending_timeout): self._commit_puts() except LockTimeout: if not self.stale_reads_ok: raise def _commit_puts_load(self,",
"result = [] for row in curs: result.append({'remote_id': row[0], 'sync_point':",
"class DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More friendly error messages for DB Errors.\"\"\" def",
"This is vital for useful diagnostics. \"\"\" return self.db_file def",
"def update_put_timestamp(self, timestamp): \"\"\" Update the put_timestamp. Only modifies it",
":param timestamp: internalized delete timestamp \"\"\" # first, clear the",
"':memory:': tmp_db_file = None conn = get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir)",
"'sync_point': row[1]}) return result def get_max_row(self): query = ''' SELECT",
"exc_traceback prefix_path = os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path)",
"deleted. :returns: True if the DB is considered to be",
"entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list)",
"License for the specific language governing permissions and # limitations",
"= 'malformed' elif 'file is encrypted or is not a",
"id, incoming=True): \"\"\" Gets the most recent sync point for",
"GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite Cursor handler that plays well with eventlet.\"\"\" def",
"current DB hash :param name: name of the object or",
"SET status_changed_at = ?' ' WHERE status_changed_at < ?' %",
"0 ); CREATE TABLE incoming_sync ( remote_id TEXT UNIQUE, sync_point",
"SQLite database connection. :param path: path to DB :param timeout:",
"sqlite3.Row conn.text_factory = str with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous",
"% ( self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More friendly error",
"entry: try: self._commit_puts_load(item_list, entry) except Exception: self.logger.exception( _('Invalid pending entry",
"def merge_syncs(self, sync_points, incoming=True): \"\"\" Merge a list of sync",
"'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET sync_point=max(?,",
"Empty metadata items last updated before this timestamp will be",
"fail when connect creates the db file stat = os.stat(path)",
"and commits from other related functions. :param conn: Database connection",
"in str(err): raise metadata = '' return metadata @property def",
"MAX_META_OVERALL_SIZE from swift.common.utils import json, Timestamp, renamer, \\ mkdirs, lock_parent_directory,",
"pending_size > PENDING_CAP: self._commit_puts([record]) else: #将对象记录写入数据库文件中 with open(self.pending_file, 'a+b') as",
"131072 def utf8encode(*args): return [(s.encode('utf8') if isinstance(s, unicode) else s)",
"% ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError:",
"okay_to_create=False): \"\"\" Returns a properly configured SQLite database connection. :param",
"pending_size = 0 try: pending_size = os.path.getsize(self.pending_file) except OSError as",
"= os.path.getsize(self.pending_file) except OSError as err: if err.errno != errno.ENOENT:",
"db_contains_type table that are marked deleted and whose created_at timestamp",
":param conn: database conn :returns: True if the DB is",
"\"\"\"Encapsulates working with a database.\"\"\" def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None,",
"to be deleted. :returns: True if the DB is considered",
"sync rows to delete \"\"\" if self.db_file != ':memory:' and",
"delete_timestamp) ''' % self.db_type, (created_at, put_timestamp, delete_timestamp)) if old_status !=",
"SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s' LIMIT 1 '''",
"ROWID FROM %s ORDER BY ROWID DESC LIMIT 1 '''",
"called. :param item_list: A list of items to commit in",
"= args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self, *args, **kwargs): return",
"timestamp)) conn.commit() def update_status_changed_at(self, timestamp): \"\"\" Update the status_changed_at field",
"to be deleted, False otherwise \"\"\" if self.db_file != ':memory:'",
"(start, count)) curs.row_factory = dict_factory return [r for r in",
"error (%s, %s):\\n%s' % ( self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError):",
"indicating the action taken. \"\"\" if 'database disk image is",
"conn: database conn :returns: True if the DB is considered",
"== ':memory:': self.merge_items([record]) return if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, \"DB",
"rec in sync_points: try: conn.execute(''' INSERT INTO %s_sync (sync_point, remote_id)",
"TABLE %s_stat ADD COLUMN metadata TEXT DEFAULT '' \"\"\" %",
"different parts # of the system were \"racing\" each other.",
"do a big fsync at the end. with closing(conn.cursor()) as",
"as a worker to limit transactions and commits from other",
"self.get() as conn: curs = conn.execute(''' SELECT remote_id, sync_point FROM",
"MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError:",
"from the sync table. :param id: remote ID to get",
"25m, 50m, then every 50m after. \"\"\" if not DB_PREALLOCATION",
"new.ROWID; END; CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync BEGIN",
"if allocated_size < prealloc_size: with open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(),",
"return if item_list is None: item_list = [] self._preallocate() if",
"\"condition\" where different parts # of the system were \"racing\"",
"as conn: try: md = conn.execute('SELECT metadata FROM %s_stat' %",
"outgoing sync :returns: list of {'remote_id', 'sync_point'} \"\"\" with self.get()",
"as fp: for entry in fp.read().split(':'): if entry: try: self._commit_puts_load(item_list,",
"fsync at the end. with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous",
"except sqlite3.DatabaseError: try: conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception,",
"= 1 AND %s < ? ''' % (self.db_contains_type, self.db_reclaim_timestamp),",
"item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except OSError as err: if",
"__str__(self): return 'DB %s already exists' % self.path class GreenDBConnection(sqlite3.Connection):",
"in enumerate(crs.description))) def chexor(old, name, timestamp): \"\"\" Each entry in",
"database files. DB_PREALLOCATION = False #: Timeout for trying to",
"lambda: sqlite3.Cursor.execute( self, *args, **kwargs)) def dict_factory(crs, row): \"\"\" This",
"space in front of an expanding db. If it gets",
"512 for point in prealloc_points(): if file_size <= point -",
"md[key] conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),))",
"INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TABLE incoming_sync (",
"to get :returns: list of objects between start and end",
"AFTER UPDATE ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at =",
"key.lower() if value != '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix",
"is encrypted or is not a database' in str(exc_value): exc_hint",
"for Swift \"\"\" from contextlib import contextmanager, closing import hashlib",
"get_info's <db_contains_type>_count is translated to just \"count\" and metadata is",
"from other related functions. :param conn: Database connection to reclaim",
"fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False,",
"of sync rows to delete \"\"\" if self.db_file != ':memory:'",
"raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn class DatabaseBroker(object): \"\"\"Encapsulates working",
"''' % (self.db_contains_type) with self.get() as conn: row = conn.execute(query).fetchone()",
"calls the DatabaseBroker's :func:`_reclaim` method. :param age_timestamp: max created_at timestamp",
"base64 encoding; so they are our # delimiter fp.write(':') fp.write(pickle.dumps(",
"See the License for the specific language governing permissions and",
"def update_status_changed_at(self, timestamp): \"\"\" Update the status_changed_at field in the",
"def __init__(self, path): self.path = path def __str__(self): return 'DB",
"be called after an rsync. :param remote_id: the ID of",
"Key/values will only be overwritten if the timestamp is newer.",
"the \"with\" statement; locks a database.\"\"\" if not self.conn: if",
"to in writing, software # distributed under the License is",
"with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get() as conn: conn.execute(''' DELETE",
"messages for DB Errors.\"\"\" def __init__(self, path): self.path = path",
"db record dict into the format this service uses for",
"timeout=timeout) return conn class DatabaseBroker(object): \"\"\"Encapsulates working with a database.\"\"\"",
"if item_list is None: item_list = [] self._preallocate() if not",
":param put_timestamp: internalized timestamp of initial PUT request :param storage_policy_index:",
"% (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail = _('Quarantined %s",
"{'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0)",
"get_items_since(self, start, count): \"\"\" Get a list of objects in",
"put_timestamp = ?' ' WHERE put_timestamp < ?' % self.db_type,",
"= os.stat(path) if stat.st_size == 0 and stat.st_ctime >= connect_time:",
"or agreed to in writing, software # distributed under the",
"= database super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs) def cursor(self, cls=None):",
"SELECT * FROM %s WHERE ROWID > ? ORDER BY",
"a dict of {'sync_point', 'remote_id'} :param incoming: if True, get",
"None conn.execute('BEGIN IMMEDIATE') try: yield True except (Exception, Timeout): pass",
"for row in curs: result.append({'remote_id': row[0], 'sync_point': row[1]}) return result",
"connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file created by connect?') conn.row_factory",
"in metadata_updates.items(): value, timestamp = value_timestamp if key not in",
"self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): try: self.conn =",
"as conn: conn.execute( 'UPDATE %s_stat SET put_timestamp = ?' '",
"TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync BEGIN UPDATE incoming_sync SET",
"metadata = '' return metadata @property def metadata(self): \"\"\" Returns",
"incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE",
"ROWID ASC LIMIT ? ''' % self.db_contains_type, (start, count)) curs.row_factory",
"(name, timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old, 16) ^ int(new, 16))",
"DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More friendly error messages for DB Errors.\"\"\" def __init__(self,",
"of items to commit in addition to .pending \"\"\" if",
"conn.execute(''' SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT",
"WHERE ROWID = new.ROWID; END; CREATE TRIGGER outgoing_sync_update AFTER UPDATE",
"mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file,",
"clear the metadata cleared_meta = {} for k in self.metadata:",
"values older than the timestamp using the given database connection.",
"'file is encrypted or is not a database' in str(exc_value):",
"self.db_type) curs.row_factory = dict_factory return curs.fetchone() #在数据库中添加一条记录 def put_record(self, record):",
"possibly_quarantine(self, exc_type, exc_value, exc_traceback): \"\"\" Checks the exception info to",
"BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID",
"prealloc_size = point break if allocated_size < prealloc_size: with open(self.db_file,",
"metadata if isinstance(k, unicode)] for k in uni_keys: sv =",
"to commit in addition to .pending \"\"\" if self.db_file ==",
"= conn def delete_db(self, timestamp): \"\"\" Mark the DB as",
"compliance with the License. # You may obtain a copy",
"is to allocate space in front of an expanding db.",
"``AccountBroker``. :param put_timestamp: internalized timestamp of initial PUT request :param",
"if cls is None: cls = GreenDBCursor return sqlite3.Connection.cursor(self, cls)",
"''' % self.db_type, (created_at, put_timestamp, delete_timestamp)) if old_status != self._is_deleted(conn):",
"error messages for DB Errors.\"\"\" def __init__(self, path): self.path =",
"try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn = conn except (Exception,",
"\"\"\" Get a list of objects in the database between",
"by the 128-bit hash on insert or delete. This serves",
"VALUES (?, ?) ''' % ('incoming' if incoming else 'outgoing'),",
"metadata @staticmethod def validate_metadata(metadata): \"\"\" Validates that metadata_falls within acceptable",
"'outgoing')) result = [] for row in curs: result.append({'remote_id': row[0],",
"= 0 for key, (value, timestamp) in metadata.items(): key =",
"or # implied. # See the License for the specific",
"item_list = [] self._preallocate() if not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list)",
"1 ''' % self.db_contains_type).fetchone() sync_point = row['ROWID'] if row else",
"except sqlite3.OperationalError as err: # Old dbs didn't have updated_at",
"(1024 * 1024) def prealloc_points(): for pm in (1, 2,",
"swift import gettext_ as _ from tempfile import mkstemp from",
"remote_id=? ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id']))",
"return result def get_max_row(self): query = ''' SELECT SQLITE_SEQUENCE.seq FROM",
"if value != '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix =",
"0 and stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file",
"Merge a list of sync points with the incoming sync",
"the results. \"\"\" return dict( ((col[0], row[idx]) for idx, col",
"Each entry in the account and container databases is XORed",
"incoming_sync WHERE updated_at < ? ''', (sync_timestamp,)) except sqlite3.OperationalError as",
"3, chexor) except sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout)",
"= os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path =",
"incoming_sync (sync_point, remote_id) VALUES (?, ?) ''', (sync_point, remote_id)) self._newid(conn)",
"not use this file except in compliance with the License.",
"express or # implied. # See the License for the",
"with self.get() as conn: try: md = conn.execute('SELECT metadata FROM",
"def __str__(self): return 'DB %s already exists' % self.path class",
"conn.row_factory = sqlite3.Row conn.text_factory = str with closing(conn.cursor()) as cur:",
"LIMIT ? ''' % self.db_contains_type, (start, count)) curs.row_factory = dict_factory",
"timeout self.logger = logger or logging.getLogger() self.account = account self.container",
"was created as a worker to limit transactions and commits",
"path to DB :param timeout: timeout for connection :param okay_to_create:",
"you may not use this file except in compliance with",
"LockTimeout: if not self.stale_reads_ok: raise def _commit_puts_load(self, item_list, entry): \"\"\"",
"':memory:' and not os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with self.get() as",
"start, count): \"\"\" Get a list of objects in the",
"server from the sync table. :param id: remote ID to",
"(key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix = 'x-account-meta-' if key.startswith('x-container-meta-'): prefix =",
":returns: list of {'remote_id', 'sync_point'} \"\"\" with self.get() as conn:",
"self.container = container self._db_version = -1 def __str__(self): \"\"\" Returns",
"timestamp: internalized put timestamp \"\"\" with self.get() as conn: conn.execute(",
"Gets the most recent sync point for a server from",
"has already been called. :param item_list: A list of items",
"__init__(self, path): self.path = path def __str__(self): return 'DB %s",
"DELETE FROM %s WHERE deleted = 1 AND %s <",
"XOR) :param old: hex representation of the current DB hash",
"put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type, (created_at, put_timestamp, delete_timestamp))",
"will be removed. :returns: True if conn.commit() should be called",
"stale_reads_ok=False): \"\"\"Encapsulates working with a database.\"\"\" self.conn = None self.db_file",
"\"\"\" Create the DB The storage_policy_index is passed through to",
"(rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET sync_point=max(?, sync_point)",
"except sqlite3.OperationalError as err: if 'no such column: metadata' not",
"self.db_file == ':memory:': tmp_db_file = None conn = get_db_connection(self.db_file, self.timeout)",
"raise NotImplementedError def make_tuple_for_pickle(self, record): \"\"\" Turn this db record",
"metadata @property def metadata(self): \"\"\" Returns the metadata dict for",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"0, *args, **kwargs) def cursor(self, cls=None): if cls is None:",
"isinstance(k, unicode)] for k in uni_keys: sv = metadata[k] del",
"dict_factory return curs.fetchone() #在数据库中添加一条记录 def put_record(self, record): if self.db_file ==",
"contextlib import contextmanager, closing import hashlib import logging import os",
"the id doesn't exist. \"\"\" with self.get() as conn: row",
"will not call commit on the conn, but will instead",
"will be reraised. If so, the database will be quarantined",
"information about the DB required for replication. :returns: dict containing",
"record dict into the format this service uses for pending",
"delete timestamp \"\"\" with self.get() as conn: old_status = self._is_deleted(conn)",
"conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) return",
"= 131072 def utf8encode(*args): return [(s.encode('utf8') if isinstance(s, unicode) else",
"def __init__(self, database, timeout=None, *args, **kwargs): if timeout is None:",
"end \"\"\" self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute('''",
"with self.get() as conn: curs = conn.execute(''' SELECT * FROM",
"be removed. :returns: True if conn.commit() should be called \"\"\"",
"timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old, 16) ^ int(new, 16)) def",
"':memory:': self.merge_items([record]) return if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, \"DB doesn't",
"return row[0] if row else -1 def get_replication_info(self): \"\"\" Get",
"errno.ENOENT: raise if pending_size > PENDING_CAP: self._commit_puts([record]) else: #将对象记录写入数据库文件中 with",
"if the timestamp is greater than the current status_changed_at timestamp.",
"or key.startswith('x-container-meta')): prefix = 'x-account-meta-' if key.startswith('x-container-meta-'): prefix = 'x-container-meta-'",
"class DatabaseBroker(object): \"\"\"Encapsulates working with a database.\"\"\" def __init__(self, db_file,",
"are 2m, 5m, 10m, 25m, 50m, then every 50m after.",
"NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA",
"by feeding them to merge_items(). Assume that lock_parent_directory has already",
"GreenDBCursor return sqlite3.Connection.cursor(self, cls) def commit(self): return _db_timeout( self.timeout, self.db_file,",
"is considered to be deleted. :returns: True if the DB",
"WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_insert AFTER INSERT",
"sqlite3.DatabaseError(detail) @contextmanager def get(self): \"\"\"Use with the \"with\" statement; returns",
"into the format this service uses for pending pickles. \"\"\"",
"the timestamp using the given database connection. This function will",
"_db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self, *args, **kwargs)) def dict_factory(crs,",
"rolling, order-independent hash of the contents. (check + XOR) :param",
"last outgoing sync :returns: the sync point, or -1 if",
"allocates to the next boundary. Boundaries are 2m, 5m, 10m,",
"for pending pickles. \"\"\" raise NotImplementedError def merge_syncs(self, sync_points, incoming=True):",
"conn.execute(''' DELETE FROM %s WHERE deleted = 1 AND %s",
"cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor', 3,",
"in uni_keys: sv = metadata[k] del metadata[k] metadata[k.encode('utf-8')] = sv",
"metadata' not in str(err): raise return False def update_put_timestamp(self, timestamp):",
"(c) 2010-2012 OpenStack Foundation # # Licensed under the Apache",
"dbs implicitly does a lot of transactions, so we #",
"os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined', self.db_type + 's', os.path.basename(self.db_dir)) try:",
"str(exc_value): exc_hint = 'malformed' elif 'file is encrypted or is",
"% self.db_type, (created_at, put_timestamp, delete_timestamp)) if old_status != self._is_deleted(conn): timestamp",
"= 'disk error while accessing' else: raise exc_type, exc_value, exc_traceback",
"is considered deleted :param conn: database conn :returns: True if",
"in the stat table. Only modifies status_changed_at if the timestamp",
"Scan for .pending files and commit the found records by",
"exist\") conn = self.conn self.conn = None try: yield conn",
"self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE FROM outgoing_sync WHERE updated_at <",
"STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER outgoing_sync_update",
"try: return call() except sqlite3.OperationalError as e: if 'locked' not",
"if self.db_file != ':memory:' and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout)",
"is greater than the current status_changed_at timestamp. :param timestamp: internalized",
"ORDER BY ROWID ASC LIMIT ? ''' % self.db_contains_type, (start,",
"os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined', self.db_type +",
"pending_timeout or 10 self.stale_reads_ok = stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout",
"from uuid import uuid4 import sys import time import errno",
"LockTimeout(timeout, db_file): retry_wait = 0.001 while True: try: return call()",
"@property def metadata(self): \"\"\" Returns the metadata dict for the",
"pass def _is_deleted(self, conn): \"\"\" Check if the database is",
"curs = conn.execute('SELECT * from %s_stat' % self.db_type) curs.row_factory =",
"%(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try:",
"\"\"\" info = self.get_info() info['count'] = info.pop('%s_count' % self.db_contains_type) info['metadata']",
"of the new hash value \"\"\" if name is None:",
"query = ''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name ==",
"conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) conn.commit()",
"((col[0], row[idx]) for idx, col in enumerate(crs.description))) def chexor(old, name,",
"table. :param sync_points: list of sync points where a sync",
"None: raise Exception('name is None!') new = hashlib.md5(('%s-%s' % (name,",
"while accessing' else: raise exc_type, exc_value, exc_traceback prefix_path = os.path.dirname(self.db_dir)",
"\"\"\" try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0]",
"for k in self.metadata: cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta) #",
"raise DatabaseConnectionError(path, 'DB file created by connect?') conn.row_factory = sqlite3.Row",
"<db_contains_type>_count is translated to just \"count\" and metadata is the",
"from swift import gettext_ as _ from tempfile import mkstemp",
"a database' in str(exc_value): exc_hint = 'corrupted' elif 'disk I/O",
"if keys_to_delete: for key in keys_to_delete: del md[key] conn.execute('UPDATE %s_stat",
"for trying to connect to a DB BROKER_TIMEOUT = 25",
"fast, unsafe options here and do a big fsync at",
"attempt to detect and fail when connect creates the db",
"validated :raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded \"\"\"",
"% ('incoming' if incoming else 'outgoing'), (id,)).fetchone() if not row:",
"sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET sync_point=max(?, sync_point) WHERE remote_id=? '''",
":param conn: Database connection to reclaim metadata within. :param timestamp:",
"rows from the db_contains_type table that are marked deleted and",
"with self.get() as conn: row = conn.execute(''' UPDATE %s_stat SET",
"DB connection object \"\"\" try: connect_time = time.time() conn =",
"FROM %s_sync ''' % ('incoming' if incoming else 'outgoing')) result",
"when that key was set to that value. Key/values will",
"try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except LockTimeout: if not self.stale_reads_ok:",
"partition_path = os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path",
"os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") #对数据库父目录加锁 with lock_parent_directory(self.pending_file, self.pending_timeout):",
"SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s' LIMIT 1",
"\"\"\"Use with the \"with\" statement; locks a database.\"\"\" if not",
"__str__(self): return 'DB connection error (%s, %s):\\n%s' % ( self.path,",
"''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s' LIMIT",
"a big fsync at the end. with closing(conn.cursor()) as cur:",
"be reraised. If so, the database will be quarantined and",
"UPDATE %s_sync SET sync_point=max(?, sync_point) WHERE remote_id=? ''' % ('incoming'",
"= [] for row in curs: result.append({'remote_id': row[0], 'sync_point': row[1]})",
"timestamp of initial PUT request :param storage_policy_index: only required for",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"used when you need a real dict, i.e. when you're",
"id doesn't exist. \"\"\" with self.get() as conn: row =",
"found records by feeding them to merge_items(). Assume that lock_parent_directory",
"a list of objects in the database between start and",
"timestamp \"\"\" with self.get() as conn: self._update_status_changed_at(conn, timestamp) conn.commit() def",
"the current DB hash :param name: name of the object",
"SET metadata = ?' % self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self,",
"prefix = 'x-account-meta-' if key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key =",
"conn.execute(\"\"\" ALTER TABLE %s_stat ADD COLUMN metadata TEXT DEFAULT ''",
"MB stat = os.stat(self.db_file) file_size = stat.st_size allocated_size = stat.st_blocks",
"connection to reclaim metadata within. :param timestamp: Empty metadata items",
"swift.common.exceptions import LockTimeout from swift.common.swob import HTTPBadRequest #: Whether calls",
"def reclaim(self, age_timestamp, sync_timestamp): \"\"\" Delete rows from the db_contains_type",
"raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def _reclaim(self, conn, timestamp): \"\"\"",
"self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't",
"well with eventlet.\"\"\" def __init__(self, *args, **kwargs): self.timeout = args[0].timeout",
"exception will be reraised. If so, the database will be",
"import time import errno import six.moves.cPickle as pickle from swift",
"commit(self): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite",
"ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW')",
"OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor',",
"will only be overwritten if the timestamp is newer. To",
"= self.get_max_row() return info def get_info(self): self._commit_puts_stale_ok() with self.get() as",
"\"\"\" Unmarshall the :param:entry and append it to :param:item_list. This",
"internalized timestamp \"\"\" with self.get() as conn: self._update_status_changed_at(conn, timestamp) conn.commit()",
"except LockTimeout: if not self.stale_reads_ok: raise def _commit_puts_load(self, item_list, entry):",
"in fp.read().split(':'): if entry: try: self._commit_puts_load(item_list, entry) except Exception: self.logger.exception(",
"acceptable limits. :param metadata: to be validated :raises: HTTPBadRequest if",
"the 128-bit hash on insert or delete. This serves as",
"= new.ROWID; END; \"\"\") if not put_timestamp: put_timestamp = Timestamp(0).internal",
"file except in compliance with the License. # You may",
"permissions and # limitations under the License. \"\"\" Database code",
"a sync point is a dict of {'sync_point', 'remote_id'} :param",
"\"\"\" This should only be used when you need a",
"lock(self): \"\"\"Use with the \"with\" statement; locks a database.\"\"\" if",
"in curs: result.append({'remote_id': row[0], 'sync_point': row[1]}) return result def get_max_row(self):",
"self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More friendly error messages for DB Errors.\"\"\"",
"= None conn.execute('BEGIN IMMEDIATE') try: yield True except (Exception, Timeout):",
"self.path class GreenDBConnection(sqlite3.Connection): \"\"\"SQLite DB Connection handler that plays well",
"protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None): \"\"\" Scan for .pending files",
"import LockTimeout from swift.common.swob import HTTPBadRequest #: Whether calls will",
"conn class DatabaseBroker(object): \"\"\"Encapsulates working with a database.\"\"\" def __init__(self,",
"in \"\"\" with self.get() as conn: row = conn.execute(''' UPDATE",
"format this service uses for pending pickles. \"\"\" raise NotImplementedError",
"AFTER INSERT ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at =",
"to preallocate disk space for database files. DB_PREALLOCATION = False",
"uni_keys: sv = metadata[k] del metadata[k] metadata[k.encode('utf-8')] = sv def",
"last updated before this timestamp will be removed. :returns: True",
"database, timeout=None, *args, **kwargs): if timeout is None: timeout =",
"md = json.loads(md) if md else {} utf8encodekeys(md) except sqlite3.OperationalError",
"= time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path",
"put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type, (created_at, put_timestamp, delete_timestamp)) if",
"exc_hint = 'malformed' elif 'file is encrypted or is not",
"\"\"\" Scan for .pending files and commit the found records",
"''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE FROM outgoing_sync",
"The baseline implementation returns a full pathname to a database.",
"%s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) return True",
"key[len(prefix):] meta_count = meta_count + 1 meta_size = meta_size +",
"timestamp is < sync_timestamp. In addition, this calls the DatabaseBroker's",
"while True: try: return call() except sqlite3.OperationalError as e: if",
"\"\"\" The idea is to allocate space in front of",
"that value. \"\"\" metadata = self.get_raw_metadata() if metadata: metadata =",
"most recent sync point for a server from the sync",
"= sqlite3.Row conn.text_factory = str with closing(conn.cursor()) as cur: cur.execute('PRAGMA",
"It is ignored by ``AccountBroker``. :param put_timestamp: internalized timestamp of",
"_ from tempfile import mkstemp from eventlet import sleep, Timeout",
"os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path = os.path.join(device_path,",
"locked connection')) conn.close() def newid(self, remote_id): \"\"\" Re-id the database.",
"method. :param age_timestamp: max created_at timestamp of object rows to",
"if value == '' and value_timestamp < timestamp: keys_to_delete.append(key) if",
"stat table. Only modifies status_changed_at if the timestamp is greater",
"path def __str__(self): return 'DB %s already exists' % self.path",
"indicates a quarantine situation (malformed or corrupted database). If not,",
"updating timestamps. :param created_at: create timestamp :param put_timestamp: put timestamp",
"\"DB doesn't exist\") #对数据库父目录加锁 with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0",
"self.db_file != ':memory:' and os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file, self.timeout)",
"sync point is a dict of {'sync_point', 'remote_id'} :param incoming:",
"timeout=0) # creating dbs implicitly does a lot of transactions,",
"commit the found records by feeding them to merge_items(). Assume",
"to allocate space in front of an expanding db. If",
"vital for useful diagnostics. \"\"\" return self.db_file def initialize(self, put_timestamp=None,",
"values are tuples of (value, timestamp) where the timestamp indicates",
"FROM incoming_sync WHERE updated_at < ? ''', (sync_timestamp,)) except sqlite3.OperationalError",
"sync table. :param id: remote ID to get the sync_point",
"updated_at TEXT DEFAULT 0 ); CREATE TRIGGER outgoing_sync_insert AFTER INSERT",
"get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn =",
"conn: old_status = self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET created_at=MIN(?, created_at),",
"if md else {} utf8encodekeys(md) except sqlite3.OperationalError as err: if",
"of an expanding db. If it gets within 512k of",
"STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; \"\"\") if not",
"journal_mode = DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: import traceback",
"within 512k of a boundary, it allocates to the next",
"= 2 #: Max number of pending entries PENDING_CAP =",
"self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More friendly error messages for",
"eventlet import sleep, Timeout import sqlite3 from swift.common.constraints import MAX_META_COUNT,",
"container databases is XORed by the 128-bit hash on insert",
"only be overwritten if the timestamp is newer. To delete",
"self.db_type).fetchone()[0] md = json.loads(md) if md else {} utf8encodekeys(md) except",
"try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] if",
"SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s' LIMIT 1 ''' % (self.db_contains_type)",
"# Override for additional work when receiving an rsynced db.",
"or delete. This serves as a rolling, order-independent hash of",
"when you're going to serialize the results. \"\"\" return dict(",
"'outgoing'), (id,)).fetchone() if not row: return -1 return row['sync_point'] def",
"the License. \"\"\" Database code for Swift \"\"\" from contextlib",
"with LockTimeout(timeout, db_file): retry_wait = 0.001 while True: try: return",
"with its :func:`merge_items`. \"\"\" raise NotImplementedError def make_tuple_for_pickle(self, record): \"\"\"",
"name, timestamp): \"\"\" Each entry in the account and container",
"keys_to_delete: del md[key] conn.execute('UPDATE %s_stat SET metadata = ?' %",
"(the \"License\"); # you may not use this file except",
"append it to :param:item_list. This is implemented by a particular",
"NotImplementedError def make_tuple_for_pickle(self, record): \"\"\" Turn this db record dict",
"metadata.items(): key = key.lower() if value != '' and (key.startswith('x-account-meta')",
"replication to handle updating timestamps. :param created_at: create timestamp :param",
"\"\"\"More friendly error messages for DB Errors.\"\"\" def __init__(self, path,",
"key in keys_to_delete: del md[key] conn.execute('UPDATE %s_stat SET metadata =",
"sync point for a server from the sync table. :param",
"containing keys from get_info plus max_row and metadata Note:: get_info's",
"If so, the database will be quarantined and a new",
"#从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata = self.metadata #如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key,",
"sync_points: list of sync points where a sync point is",
"if tmp_db_file: conn.close() with open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno()) with",
"rsynced in \"\"\" with self.get() as conn: row = conn.execute('''",
"try: yield conn conn.rollback() self.conn = conn except sqlite3.DatabaseError: try:",
"os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get() as conn: conn.execute('''",
"execute(self, *args, **kwargs): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self,",
"conn.close() with open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout):",
"to %s database') % \\ (self.db_dir, quar_path, exc_hint) self.logger.error(detail) raise",
"or container being inserted :param timestamp: internalized timestamp of the",
"3, chexor) conn.row_factory = sqlite3.Row conn.text_factory = str conn.executescript(\"\"\" CREATE",
"rows to delete :param sync_timestamp: max update_at timestamp of sync",
"column: updated_at' not in str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit()",
"# # Unless required by applicable law or agreed to",
"error' in str(exc_value): exc_hint = 'disk error while accessing' else:",
"self.timeout = args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def",
"in str(err): raise return False def update_put_timestamp(self, timestamp): \"\"\" Update",
"renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file, self.timeout) else: self.conn = conn",
"if row else -1 conn.execute(''' INSERT OR REPLACE INTO incoming_sync",
"except OSError as e: if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):",
"= self.get_raw_metadata() if metadata: metadata = json.loads(metadata) utf8encodekeys(metadata) else: metadata",
"conn.close() raise @contextmanager def lock(self): \"\"\"Use with the \"with\" statement;",
"BROKER_TIMEOUT self.timeout = timeout self.db_file = database super(GreenDBConnection, self).__init__(database, 0,",
"= logger or logging.getLogger() self.account = account self.container = container",
"transactions and commits from other related functions. :param conn: Database",
"eventlet.\"\"\" def __init__(self, *args, **kwargs): self.timeout = args[0].timeout self.db_file =",
"# Old dbs didn't have updated_at in the _sync tables.",
"get the last outgoing sync :returns: list of {'remote_id', 'sync_point'}",
"db_file self.pending_file = self.db_file + '.pending' self.pending_timeout = pending_timeout or",
"key, (value, timestamp) in metadata.items(): key = key.lower() if value",
"ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_update AFTER UPDATE ON",
"> ? ORDER BY ROWID ASC LIMIT ? ''' %",
"incoming else 'outgoing'), (id,)).fetchone() if not row: return -1 return",
"sync, otherwise get the last outgoing sync \"\"\" with self.get()",
"trying to connect to a DB BROKER_TIMEOUT = 25 #:",
"OSError as err: if err.errno != errno.ENOENT: raise def _commit_puts_stale_ok(self):",
"otherwise get the last outgoing sync \"\"\" with self.get() as",
"ID of the remote database being rsynced in \"\"\" with",
"(?, ?) ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'],",
"'%s' LIMIT 1 ''' % (self.db_contains_type) with self.get() as conn:",
"= conn.execute( \"SELECT sync_point FROM %s_sync WHERE remote_id=?\" % ('incoming'",
"only be used when you need a real dict, i.e.",
"'locked' not in str(e): raise sleep(retry_wait) retry_wait = min(retry_wait *",
"TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync BEGIN UPDATE incoming_sync SET",
"conn.commit() def _newid(self, conn): # Override for additional work when",
"os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): # It's as if",
"implied. # See the License for the specific language governing",
"':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get() as",
"considered to be deleted, False otherwise \"\"\" raise NotImplementedError() def",
"def get_db_connection(path, timeout=30, okay_to_create=False): \"\"\" Returns a properly configured SQLite",
"not in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path = \"%s-%s\" % (quar_path,",
"hash on insert or delete. This serves as a rolling,",
"\"\"\" Check if the DB is considered to be deleted.",
"UPDATE ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s',",
"def get_raw_metadata(self): with self.get() as conn: try: metadata = conn.execute('SELECT",
"= self.get_info() info['count'] = info.pop('%s_count' % self.db_contains_type) info['metadata'] = self.get_raw_metadata()",
"metadata_updates, validate_metadata=False): \"\"\" Updates the metadata dict for the database.",
"conn :returns: True if the DB is considered to be",
"else: raise exc_type, exc_value, exc_traceback prefix_path = os.path.dirname(self.db_dir) partition_path =",
"'UPDATE %s_stat SET put_timestamp = ?' ' WHERE put_timestamp <",
"to a human. The baseline implementation returns a full pathname",
"DB is considered to be deleted, False otherwise \"\"\" raise",
"the entity under broker to a human. The baseline implementation",
"timestamp \"\"\" with self.get() as conn: conn.execute( 'UPDATE %s_stat SET",
"uni_keys = [k for k in metadata if isinstance(k, unicode)]",
"exists' % self.path class GreenDBConnection(sqlite3.Connection): \"\"\"SQLite DB Connection handler that",
"(sync_point, remote_id) VALUES (?, ?) ''' % ('incoming' if incoming",
"< ? ''', (sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync WHERE updated_at",
"'' return metadata @property def metadata(self): \"\"\" Returns the metadata",
"table that are marked deleted and whose created_at timestamp is",
"check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating dbs implicitly does a lot",
"the exception info to see if it indicates a quarantine",
"updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE",
"point for a server from the sync table. :param id:",
"incoming sync, otherwise get the last outgoing sync :returns: the",
"modifies status_changed_at if the timestamp is greater than the current",
"?' % self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp, sync_timestamp): \"\"\"",
"transactions, so we # pick fast, unsafe options here and",
"def put_record(self, record): if self.db_file == ':memory:': self.merge_items([record]) return if",
"(sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync WHERE updated_at < ? ''',",
":param path: path to DB :param timeout: timeout for connection",
"0 meta_size = 0 for key, (value, timestamp) in metadata.items():",
"as conn: curs = conn.execute(''' SELECT remote_id, sync_point FROM %s_sync",
"internalized timestamp of the new record :returns: a hex representation",
"is not a database' in str(exc_value): exc_hint = 'corrupted' elif",
"age_timestamp. Also deletes rows from incoming_sync and outgoing_sync where the",
"self.stale_reads_ok = stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout = timeout self.logger",
"= 0 try: pending_size = os.path.getsize(self.pending_file) except OSError as err:",
"new.ROWID; END; \"\"\") if not put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn,",
"try: self.conn = get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else:",
"conn: curs = conn.execute(''' SELECT remote_id, sync_point FROM %s_sync '''",
"else s) for s in args] def utf8encodekeys(metadata): uni_keys =",
"info['max_row'] = self.get_max_row() return info def get_info(self): self._commit_puts_stale_ok() with self.get()",
"to connect to a DB BROKER_TIMEOUT = 25 #: Pickle",
"metadata' not in str(err): raise metadata = '' return metadata",
"def possibly_quarantine(self, exc_type, exc_value, exc_traceback): \"\"\" Checks the exception info",
"database' in str(exc_value): exc_hint = 'corrupted' elif 'disk I/O error'",
"friendly error messages for DB Errors.\"\"\" def __init__(self, path, msg,",
"diagnostics. \"\"\" return self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None): \"\"\" Create",
"row = conn.execute(query).fetchone() return row[0] if row else -1 def",
"metadata(self): \"\"\" Returns the metadata dict for the database. The",
"conn.row_factory = sqlite3.Row conn.text_factory = str conn.executescript(\"\"\" CREATE TABLE outgoing_sync",
"\\ (self.db_dir, quar_path, exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def get(self):",
"Unless required by applicable law or agreed to in writing,",
"return dict( ((col[0], row[idx]) for idx, col in enumerate(crs.description))) def",
"else: mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn =",
"merge_syncs(self, sync_points, incoming=True): \"\"\" Merge a list of sync points",
"malformed' in str(exc_value): exc_hint = 'malformed' elif 'file is encrypted",
"pm in (1, 2, 5, 10, 25, 50): yield pm",
"True, create the DB if it doesn't exist :returns: DB",
"REPLACE INTO incoming_sync (sync_point, remote_id) VALUES (?, ?) ''', (sync_point,",
"compatible with its :func:`merge_items`. \"\"\" raise NotImplementedError def make_tuple_for_pickle(self, record):",
"tables. if 'no such column: updated_at' not in str(err): raise",
"(%s, %s):\\n%s' % ( self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More",
"the subclass's ``_initialize`` method. It is ignored by ``AccountBroker``. :param",
"recent sync point for a server from the sync table.",
"Get information about the DB required for replication. :returns: dict",
"key was set to that value. \"\"\" metadata = self.get_raw_metadata()",
"function was created as a worker to limit transactions and",
"''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except",
"VALUES (?, ?) ''', (sync_point, remote_id)) self._newid(conn) conn.commit() def _newid(self,",
"try: pending_size = os.path.getsize(self.pending_file) except OSError as err: if err.errno",
"for key, (value, value_timestamp) in md.items(): if value == ''",
"to serialize the results. \"\"\" return dict( ((col[0], row[idx]) for",
"the specific language governing permissions and # limitations under the",
"in str(exc_value): exc_hint = 'disk error while accessing' else: raise",
"name of the object or container being inserted :param timestamp:",
"serves as a rolling, order-independent hash of the contents. (check",
"Pickle protocol to use PICKLE_PROTOCOL = 2 #: Max number",
"Validates that metadata_falls within acceptable limits. :param metadata: to be",
"= {} return metadata @staticmethod def validate_metadata(metadata): \"\"\" Validates that",
"% (int(old, 16) ^ int(new, 16)) def get_db_connection(path, timeout=30, okay_to_create=False):",
"sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn class",
"**kwargs) def execute(self, *args, **kwargs): return _db_timeout( self.timeout, self.db_file, lambda:",
"+ XOR) :param old: hex representation of the current DB",
"and thus does not care for pending updates. \"\"\" if",
"Checks the exception info to see if it indicates a",
"using the given database connection. This function will not call",
"_commit_puts_load(self, item_list, entry): \"\"\" Unmarshall the :param:entry and append it",
"(sync_timestamp,)) except sqlite3.OperationalError as err: # Old dbs didn't have",
"err: if err.errno != errno.ENOENT: raise def _commit_puts_stale_ok(self): \"\"\" Catch",
"open(self.pending_file, 'r+b') as fp: for entry in fp.read().split(':'): if entry:",
"= STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; \"\"\") if",
"conn.execute( \"SELECT sync_point FROM %s_sync WHERE remote_id=?\" % ('incoming' if",
"for DB Errors.\"\"\" def __init__(self, path): self.path = path def",
"its :func:`merge_items`. \"\"\" raise NotImplementedError def make_tuple_for_pickle(self, record): \"\"\" Turn",
"to :param:item_list. This is implemented by a particular broker to",
"incoming=True): \"\"\" Merge a list of sync points with the",
"was a \"condition\" where different parts # of the system",
"self.merge_items([record]) return if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\")",
"old_metadata = self.metadata #如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key, (value,",
"\"\"\" Mark the DB as deleted :param timestamp: internalized delete",
"not os.path.exists(self.pending_file): return if item_list is None: item_list = []",
"delete \"\"\" if self.db_file != ':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file,",
"INTO incoming_sync (sync_point, remote_id) VALUES (?, ?) ''', (sync_point, remote_id))",
"\"\"\" meta_count = 0 meta_size = 0 for key, (value,",
"meta_size = meta_size + len(key) + len(value) if meta_count >",
"is the raw string. \"\"\" info = self.get_info() info['count'] =",
"allocate space in front of an expanding db. If it",
"md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] if md:",
"self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp, sync_timestamp): \"\"\" Delete rows",
"Only modifies it if it is greater than the current",
"raise conn.execute(\"\"\" ALTER TABLE %s_stat ADD COLUMN metadata TEXT DEFAULT",
"row): \"\"\" This should only be used when you need",
"additional work when receiving an rsynced db. pass def _is_deleted(self,",
"import sleep, Timeout import sqlite3 from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE",
"the remote database being rsynced in \"\"\" with self.get() as",
"conn.execute('BEGIN IMMEDIATE') try: yield True except (Exception, Timeout): pass try:",
"if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE",
"mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) #",
"!= ':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get()",
"?' ' WHERE status_changed_at < ?' % self.db_type, (timestamp, timestamp))",
"logging import os from uuid import uuid4 import sys import",
"not okay_to_create: # attempt to detect and fail when connect",
"conn, age_timestamp) conn.commit() def _reclaim(self, conn, timestamp): \"\"\" Removes any",
"else: #将对象记录写入数据库文件中 with open(self.pending_file, 'a+b') as fp: # Colons aren't",
"considered to be deleted, False otherwise \"\"\" if self.db_file !=",
"\"\"\" with self.get() as conn: conn.execute( 'UPDATE %s_stat SET put_timestamp",
"Timeout import sqlite3 from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils",
"connection. :param path: path to DB :param timeout: timeout for",
"pick fast, unsafe options here and do a big fsync",
"self.db_file == ':memory:' or not os.path.exists(self.pending_file): return if item_list is",
"HTTPBadRequest('Too many metadata items; max %d' % MAX_META_COUNT) if meta_size",
"conn.isolation_level conn.isolation_level = None conn.execute('BEGIN IMMEDIATE') try: yield True except",
"put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close()",
"if the id doesn't exist. \"\"\" with self.get() as conn:",
"cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3,",
"These empty keys will eventually be removed by :func:`reclaim` \"\"\"",
"db_file, call): with LockTimeout(timeout, db_file): retry_wait = 0.001 while True:",
"conn.create_function('chexor', 3, chexor) conn.row_factory = sqlite3.Row conn.text_factory = str conn.executescript(\"\"\"",
"= new.ROWID; END; CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync",
"True if the DB is considered to be deleted, False",
"or MAX_META_OVERALL_SIZE is exceeded \"\"\" meta_count = 0 meta_size =",
"%s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' %",
"= path def __str__(self): return 'DB %s already exists' %",
"point is a dict of {'sync_point', 'remote_id'} :param incoming: if",
"':memory:': return MB = (1024 * 1024) def prealloc_points(): for",
"connect creates the db file stat = os.stat(path) if stat.st_size",
"e: if e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path =",
"(self.db_contains_type) with self.get() as conn: row = conn.execute(query).fetchone() return row[0]",
"point break if allocated_size < prealloc_size: with open(self.db_file, 'rb+') as",
"deleted = 1 AND %s < ? ''' % (self.db_contains_type,",
"not in str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def _reclaim(self,",
"mark the db as deleted with self.get() as conn: self._delete_db(conn,",
"self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self, *args, **kwargs):",
"= point break if allocated_size < prealloc_size: with open(self.db_file, 'rb+')",
"conn.text_factory = str with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous =",
"self.conn = None self.db_file = db_file self.pending_file = self.db_file +",
"= MEMORY') cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory",
"return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite Cursor",
"import contextmanager, closing import hashlib import logging import os from",
"OSError as e: if e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise",
"the ID of the remote database being rsynced in \"\"\"",
"timestamp = value_timestamp if key not in md or timestamp",
"timestamp is newer. To delete a key, set its value",
"the given database connection. This function will not call commit",
"args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self, *args,",
"AFTER INSERT ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at =",
"isinstance(s, unicode) else s) for s in args] def utf8encodekeys(metadata):",
"self._commit_puts_stale_ok() with self.get() as conn: return self._is_deleted(conn) def merge_timestamps(self, created_at,",
"conn.execute(''' DELETE FROM outgoing_sync WHERE updated_at < ? ''', (sync_timestamp,))",
"quar_path, fsync=False) except OSError as e: if e.errno not in",
"to use PICKLE_PROTOCOL = 2 #: Max number of pending",
"ALTER TABLE %s_stat ADD COLUMN metadata TEXT DEFAULT '' \"\"\"",
"conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path != ':memory:'",
"doesn't exist. \"\"\" with self.get() as conn: row = conn.execute(",
"conn = self.conn self.conn = None orig_isolation_level = conn.isolation_level conn.isolation_level",
"conn.execute(''' SELECT * FROM %s WHERE ROWID > ? ORDER",
"the new hash value \"\"\" if name is None: raise",
"between start and end \"\"\" self._commit_puts_stale_ok() with self.get() as conn:",
"= ?' % self.db_type, (json.dumps(md),)) return True except sqlite3.OperationalError as",
"of transactions, so we # pick fast, unsafe options here",
"each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file, self.timeout)",
"Catch failures of _commit_puts() if broker is intended for reading",
"FROM %s WHERE ROWID > ? ORDER BY ROWID ASC",
"% self.db_type).fetchone()[0] except sqlite3.OperationalError as err: if 'no such column:",
"metadata values older than the timestamp using the given database",
"\"\"\" Returns a properly configured SQLite database connection. :param path:",
"conn: curs = conn.execute(''' SELECT * FROM %s WHERE ROWID",
"%s already exists' % self.path class GreenDBConnection(sqlite3.Connection): \"\"\"SQLite DB Connection",
"with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA temp_store",
"logging.getLogger() self.account = account self.container = container self._db_version = -1",
"curs = conn.execute(''' SELECT remote_id, sync_point FROM %s_sync ''' %",
"set to that value. \"\"\" metadata = self.get_raw_metadata() if metadata:",
"for idx, col in enumerate(crs.description))) def chexor(old, name, timestamp): \"\"\"",
"pending entries PENDING_CAP = 131072 def utf8encode(*args): return [(s.encode('utf8') if",
"in replication to handle updating timestamps. :param created_at: create timestamp",
"as pickle from swift import gettext_ as _ from tempfile",
"fsync=False) except OSError as e: if e.errno not in (errno.EEXIST,",
"Whether calls will be made to preallocate disk space for",
"False otherwise \"\"\" raise NotImplementedError() def is_deleted(self): \"\"\" Check if",
":param:entry and append it to :param:item_list. This is implemented by",
"update_put_timestamp(self, timestamp): \"\"\" Update the put_timestamp. Only modifies it if",
"under the License. \"\"\" Database code for Swift \"\"\" from",
"fallocate from swift.common.exceptions import LockTimeout from swift.common.swob import HTTPBadRequest #:",
"with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0 try: pending_size = os.path.getsize(self.pending_file)",
"sleep, Timeout import sqlite3 from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE from",
"self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self, *args, **kwargs)) def dict_factory(crs, row):",
"need a real dict, i.e. when you're going to serialize",
"50m, then every 50m after. \"\"\" if not DB_PREALLOCATION or",
"for key, (value, timestamp) in metadata_updates.items(): if timestamp > old_metadata[key][1]:",
"a database.\"\"\" if not self.conn: if self.db_file != ':memory:' and",
"self.get() as conn: row = conn.execute(''' UPDATE %s_stat SET id=?",
"detail = _('Quarantined %s to %s due to %s database')",
"\"\"\"Use with the \"with\" statement; returns a database connection.\"\"\" if",
"put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file:",
"\"\"\" Re-id the database. This should be called after an",
"row[0], 'sync_point': row[1]}) return result def get_max_row(self): query = '''",
"created_at timestamp is < age_timestamp. Also deletes rows from incoming_sync",
"conn: conn.execute( 'UPDATE %s_stat SET put_timestamp = ?' ' WHERE",
"for connection :param okay_to_create: if True, create the DB if",
"You may obtain a copy of the License at #",
"put_record(self, record): if self.db_file == ':memory:': self.merge_items([record]) return if not",
"a human. The baseline implementation returns a full pathname to",
"item_list=None): \"\"\" Scan for .pending files and commit the found",
"if True, get the last incoming sync, otherwise get the",
"incoming sync, otherwise get the last outgoing sync :returns: list",
"\"\"\" if not DB_PREALLOCATION or self.db_file == ':memory:': return MB",
"DELETE FROM outgoing_sync WHERE updated_at < ? ''', (sync_timestamp,)) conn.execute('''",
"Swift \"\"\" from contextlib import contextmanager, closing import hashlib import",
"sync points where a sync point is a dict of",
"for additional work when receiving an rsynced db. pass def",
"db_file): retry_wait = 0.001 while True: try: return call() except",
"addition, this calls the DatabaseBroker's :func:`_reclaim` method. :param age_timestamp: max",
"= os.path.join(device_path, 'quarantined', self.db_type + 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path,",
"PUT request :param storage_policy_index: only required for containers \"\"\" if",
"cls=None): if cls is None: cls = GreenDBCursor return sqlite3.Connection.cursor(self,",
"the db as deleted with self.get() as conn: self._delete_db(conn, timestamp)",
"timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False): \"\"\"Encapsulates working with a",
"str(e): raise sleep(retry_wait) retry_wait = min(retry_wait * 2, 0.05) class",
"= get_db_connection(self.db_file, self.timeout) else: self.conn = conn def delete_db(self, timestamp):",
"conn.execute(query).fetchone() return row[0] if row else -1 def get_replication_info(self): \"\"\"",
"locks a database.\"\"\" if not self.conn: if self.db_file != ':memory:'",
"utf8encodekeys(md) except sqlite3.OperationalError as err: if 'no such column: metadata'",
"import traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn class DatabaseBroker(object):",
"created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type, (created_at, put_timestamp,",
"cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode",
"'disk error while accessing' else: raise exc_type, exc_value, exc_traceback prefix_path",
"conn.commit() def get_items_since(self, start, count): \"\"\" Get a list of",
"sqlite3.Connection.cursor(self, cls) def commit(self): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self))",
"md else {} utf8encodekeys(md) except sqlite3.OperationalError as err: if 'no",
"16) ^ int(new, 16)) def get_db_connection(path, timeout=30, okay_to_create=False): \"\"\" Returns",
"self.get() as conn: curs = conn.execute('SELECT * from %s_stat' %",
"be validated :raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded",
"pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn = conn except",
"for point in prealloc_points(): if file_size <= point - MB",
"you need a real dict, i.e. when you're going to",
"= self.db_file + '.pending' self.pending_timeout = pending_timeout or 10 self.stale_reads_ok",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"put timestamp \"\"\" with self.get() as conn: conn.execute( 'UPDATE %s_stat",
"pathname to a database. This is vital for useful diagnostics.",
"def _is_deleted(self, conn): \"\"\" Check if the database is considered",
"the last outgoing sync \"\"\" with self.get() as conn: for",
"(age_timestamp,)) try: conn.execute(''' DELETE FROM outgoing_sync WHERE updated_at < ?",
"self.db_type).fetchone()[0] if md: md = json.loads(md) keys_to_delete = [] for",
"Turn this db record dict into the format this service",
"sqlite3.DatabaseError will be raised indicating the action taken. \"\"\" if",
"item_list: A list of items to commit in addition to",
"the metadata cleared_meta = {} for k in self.metadata: cleared_meta[k]",
"CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync BEGIN UPDATE outgoing_sync",
"open(self.pending_file, 'a+b') as fp: # Colons aren't used in base64",
"fsync=False) detail = _('Quarantined %s to %s due to %s",
"''' % self.db_contains_type, (start, count)) curs.row_factory = dict_factory return [r",
"commit in addition to .pending \"\"\" if self.db_file == ':memory:'",
"conn, timestamp): \"\"\" Removes any empty metadata values older than",
"conn: return self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp, delete_timestamp): \"\"\" Used",
"hash of the contents. (check + XOR) :param old: hex",
"self.db_file == ':memory:': return MB = (1024 * 1024) def",
"quar_path, fsync=False) detail = _('Quarantined %s to %s due to",
"connection error (%s, %s):\\n%s' % ( self.path, self.timeout, self.msg) class",
"sync :returns: list of {'remote_id', 'sync_point'} \"\"\" with self.get() as",
"delete a key, set its value to ('', timestamp). These",
"= mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0)",
":param created_at: create timestamp :param put_timestamp: put timestamp :param delete_timestamp:",
"import json, Timestamp, renamer, \\ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions",
"gets within 512k of a boundary, it allocates to the",
"tmp_db_file: conn.close() with open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file,",
"doesn't exist\") #对数据库父目录加锁 with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0 try:",
"when receiving an rsynced db. pass def _is_deleted(self, conn): \"\"\"",
"dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating",
"\"\"\" if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return if",
"timestamp) in metadata.items(): key = key.lower() if value != ''",
"rec['remote_id'])) conn.commit() def _preallocate(self): \"\"\" The idea is to allocate",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TRIGGER outgoing_sync_insert AFTER",
"hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old, 16) ^",
"metadata dict for the database. The metadata dict values are",
"it if it is greater than the current timestamp. :param",
"True except (Exception, Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level",
"License. # You may obtain a copy of the License",
"\"\"\" Validates that metadata_falls within acceptable limits. :param metadata: to",
"greater than the current status_changed_at timestamp. :param timestamp: internalized timestamp",
"subclass's ``_initialize`` method. It is ignored by ``AccountBroker``. :param put_timestamp:",
"a database.\"\"\" def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None,",
"in str(exc_value): exc_hint = 'corrupted' elif 'disk I/O error' in",
"updated_at' not in str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def",
"dict( ((col[0], row[idx]) for idx, col in enumerate(crs.description))) def chexor(old,",
"in curs] def get_sync(self, id, incoming=True): \"\"\" Gets the most",
"= json.loads(md) keys_to_delete = [] for key, (value, value_timestamp) in",
"IMMEDIATE') try: yield True except (Exception, Timeout): pass try: conn.execute('ROLLBACK')",
"del md[key] conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type,",
"timestamp: Empty metadata items last updated before this timestamp will",
"return #到这里,就是存在需要更新的元数据 with self.get() as conn: try: md = conn.execute('SELECT",
"WHERE updated_at < ? ''', (sync_timestamp,)) except sqlite3.OperationalError as err:",
"uuid4 import sys import time import errno import six.moves.cPickle as",
"a properly configured SQLite database connection. :param path: path to",
"END; CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync BEGIN UPDATE",
"not, the original exception will be reraised. If so, the",
"as deleted with self.get() as conn: self._delete_db(conn, timestamp) conn.commit() def",
"':memory:' and not okay_to_create: # attempt to detect and fail",
"keys_to_delete = [] for key, (value, value_timestamp) in md.items(): if",
"stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout = timeout self.logger = logger",
"cleared_meta = {} for k in self.metadata: cleared_meta[k] = ('',",
"\"\"\" raise NotImplementedError def make_tuple_for_pickle(self, record): \"\"\" Turn this db",
"outgoing_sync where the updated_at timestamp is < sync_timestamp. In addition,",
"%s_stat SET put_timestamp = ?' ' WHERE put_timestamp < ?'",
"have updated_at in the _sync tables. if 'no such column:",
"the DB is considered to be deleted, False otherwise \"\"\"",
"corrupted database). If not, the original exception will be reraised.",
":param timeout: timeout for connection :param okay_to_create: if True, create",
"\"\"\" Get information about the DB required for replication. :returns:",
"\"\"\" metadata = self.get_raw_metadata() if metadata: metadata = json.loads(metadata) utf8encodekeys(metadata)",
"sqlite3.DatabaseError: try: conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout):",
"sync_point FROM %s_sync WHERE remote_id=?\" % ('incoming' if incoming else",
"row: return -1 return row['sync_point'] def get_syncs(self, incoming=True): \"\"\" Get",
"None: cls = GreenDBCursor return sqlite3.Connection.cursor(self, cls) def commit(self): return",
"commit on the conn, but will instead return True if",
":param name: name of the object or container being inserted",
"should only be used when you need a real dict,",
"as conn: row = conn.execute( \"SELECT sync_point FROM %s_sync WHERE",
"to delete \"\"\" if self.db_file != ':memory:' and os.path.exists(self.pending_file): with",
"? ORDER BY ROWID ASC LIMIT ? ''' % self.db_contains_type,",
"':memory:' or not os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts()",
":param:item_list. This is implemented by a particular broker to be",
"try: connect_time = time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout)",
"json.loads(metadata) utf8encodekeys(metadata) else: metadata = {} return metadata @staticmethod def",
"reclaim(self, age_timestamp, sync_timestamp): \"\"\" Delete rows from the db_contains_type table",
"delete_timestamp: delete timestamp \"\"\" with self.get() as conn: old_status =",
"detect and fail when connect creates the db file stat",
"with the incoming sync table. :param sync_points: list of sync",
"be compatible with its :func:`merge_items`. \"\"\" raise NotImplementedError def make_tuple_for_pickle(self,",
"outgoing_sync_update AFTER UPDATE ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at",
"= str with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = NORMAL')",
"conn.commit() def _preallocate(self): \"\"\" The idea is to allocate space",
"WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_update AFTER UPDATE",
"return True except sqlite3.OperationalError as err: if 'no such column:",
"self.merge_items(item_list) return with open(self.pending_file, 'r+b') as fp: for entry in",
"time import errno import six.moves.cPickle as pickle from swift import",
"contents. (check + XOR) :param old: hex representation of the",
"= conn.isolation_level conn.isolation_level = None conn.execute('BEGIN IMMEDIATE') try: yield True",
"database connection. This function will not call commit on the",
"and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix = 'x-account-meta-' if key.startswith('x-container-meta-'): prefix",
"= dict_factory return [r for r in curs] def get_sync(self,",
"To delete a key, set its value to ('', timestamp).",
"TABLE incoming_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT",
"list of {'remote_id', 'sync_point'} \"\"\" with self.get() as conn: curs",
"+ 1 meta_size = meta_size + len(key) + len(value) if",
"= ?' ' WHERE status_changed_at < ?' % self.db_type, (timestamp,",
"= conn.execute(''' UPDATE %s_stat SET id=? ''' % self.db_type, (str(uuid4()),))",
"conn, but will instead return True if the database needs",
"= meta_count + 1 meta_size = meta_size + len(key) +",
"% self.db_type, (timestamp, timestamp)) conn.commit() def update_status_changed_at(self, timestamp): \"\"\" Update",
"between start and end. :param start: start ROWID :param count:",
"#: Max number of pending entries PENDING_CAP = 131072 def",
"self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry})",
"order-independent hash of the contents. (check + XOR) :param old:",
"Max number of pending entries PENDING_CAP = 131072 def utf8encode(*args):",
"min(retry_wait * 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More friendly error messages",
"the object or container being inserted :param timestamp: internalized timestamp",
"= hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old, 16)",
"A list of items to commit in addition to .pending",
"path, msg, timeout=0): self.path = path self.timeout = timeout self.msg",
"in the account and container databases is XORed by the",
"that lock_parent_directory has already been called. :param item_list: A list",
"= 'x-container-meta-' key = key[len(prefix):] meta_count = meta_count + 1",
"def __init__(self, *args, **kwargs): self.timeout = args[0].timeout self.db_file = args[0].db_file",
"updated before this timestamp will be removed. :returns: True if",
"str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def _reclaim(self, conn, timestamp):",
"sync_point for :param incoming: if True, get the last incoming",
"return self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None): \"\"\" Create the DB",
"returns a database connection.\"\"\" if not self.conn: if self.db_file !=",
"Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close() raise @contextmanager def",
"chexor) except sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return",
"\"\"\" Get a serialized copy of the sync table. :param",
"%s_stat ADD COLUMN metadata TEXT DEFAULT '' \"\"\" % self.db_type)",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"md or timestamp > md[key][1]: md[key] = value_timestamp if validate_metadata:",
"db file stat = os.stat(path) if stat.st_size == 0 and",
"self.conn self.conn = None orig_isolation_level = conn.isolation_level conn.isolation_level = None",
"boundary. Boundaries are 2m, 5m, 10m, 25m, 50m, then every",
"sv def _db_timeout(timeout, db_file, call): with LockTimeout(timeout, db_file): retry_wait =",
"cursor(self, cls=None): if cls is None: cls = GreenDBCursor return",
"= new.ROWID; END; CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync",
"(json.dumps(md),)) return True except sqlite3.OperationalError as err: if 'no such",
"synchronous = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode =",
"required by applicable law or agreed to in writing, software",
"else: self.conn = conn def delete_db(self, timestamp): \"\"\" Mark the",
"lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout from swift.common.swob import HTTPBadRequest",
"mkstemp from eventlet import sleep, Timeout import sqlite3 from swift.common.constraints",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"factory=GreenDBConnection, timeout=0) # creating dbs implicitly does a lot of",
"timestamp \"\"\" with self.get() as conn: old_status = self._is_deleted(conn) conn.execute('''",
"large; max %d' % MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False): \"\"\"",
"timestamp) conn.commit() def _update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE %s_stat SET",
"?' % self.db_type, (timestamp, timestamp)) conn.commit() def update_status_changed_at(self, timestamp): \"\"\"",
"of the contents. (check + XOR) :param old: hex representation",
"otherwise get the last outgoing sync :returns: the sync point,",
"= meta_size + len(key) + len(value) if meta_count > MAX_META_COUNT:",
"exception info to see if it indicates a quarantine situation",
"of objects in the database between start and end. :param",
"TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE",
"> old_metadata[key][1]: break else: #所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据 with self.get() as",
"\"DB doesn't exist\") conn = self.conn self.conn = None try:",
"than the current timestamp. :param timestamp: internalized put timestamp \"\"\"",
"''' % ('incoming' if incoming else 'outgoing')) result = []",
"agreed to in writing, software # distributed under the License",
"item_list, entry): \"\"\" Unmarshall the :param:entry and append it to",
"database.\"\"\" self.conn = None self.db_file = db_file self.pending_file = self.db_file",
"self.db_type, (json.dumps(md),)) return True except sqlite3.OperationalError as err: if 'no",
"MB / 2: prealloc_size = point break if allocated_size <",
"distributed under the License is distributed on an \"AS IS\"",
"if isinstance(k, unicode)] for k in uni_keys: sv = metadata[k]",
"def _commit_puts(self, item_list=None): \"\"\" Scan for .pending files and commit",
"point - MB / 2: prealloc_size = point break if",
"timestamp is < age_timestamp. Also deletes rows from incoming_sync and",
"The storage_policy_index is passed through to the subclass's ``_initialize`` method.",
"merge_items(). Assume that lock_parent_directory has already been called. :param item_list:",
"try: conn.execute(''' INSERT INTO %s_sync (sync_point, remote_id) VALUES (?, ?)",
"if 'database disk image is malformed' in str(exc_value): exc_hint =",
"self.pending_timeout): if os.path.exists(self.db_file): # It's as if there was a",
"Database code for Swift \"\"\" from contextlib import contextmanager, closing",
"self.conn = get_db_connection(self.db_file, self.timeout) else: self.conn = conn def delete_db(self,",
"except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET sync_point=max(?, sync_point) WHERE remote_id=?",
"( self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More friendly error messages",
"and # limitations under the License. \"\"\" Database code for",
"timestamp) where the timestamp indicates when that key was set",
"= str conn.executescript(\"\"\" CREATE TABLE outgoing_sync ( remote_id TEXT UNIQUE,",
"self.conn = conn def delete_db(self, timestamp): \"\"\" Mark the DB",
"== ':memory:': tmp_db_file = None conn = get_db_connection(self.db_file, self.timeout) else:",
"its value to ('', timestamp). These empty keys will eventually",
"empty keys will eventually be removed by :func:`reclaim` \"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata",
"updates. \"\"\" if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return",
"it is greater than the current timestamp. :param timestamp: internalized",
"0.001 while True: try: return call() except sqlite3.OperationalError as e:",
":param timestamp: internalized timestamp of the new record :returns: a",
"# then mark the db as deleted with self.get() as",
"(value, timestamp) where the timestamp indicates when that key was",
"in str(e): raise sleep(retry_wait) retry_wait = min(retry_wait * 2, 0.05)",
"Override for additional work when receiving an rsynced db. pass",
"if e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path = \"%s-%s\"",
"older than the timestamp using the given database connection. This",
"< timestamp: keys_to_delete.append(key) if keys_to_delete: for key in keys_to_delete: del",
"db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False): \"\"\"Encapsulates working with",
"exceeded \"\"\" meta_count = 0 meta_size = 0 for key,",
"conn.close() def newid(self, remote_id): \"\"\" Re-id the database. This should",
"'DB connection error (%s, %s):\\n%s' % ( self.path, self.timeout, self.msg)",
"an rsync. :param remote_id: the ID of the remote database",
"curs: result.append({'remote_id': row[0], 'sync_point': row[1]}) return result def get_max_row(self): query",
"database. This is vital for useful diagnostics. \"\"\" return self.db_file",
"else 'outgoing'), (id,)).fetchone() if not row: return -1 return row['sync_point']",
"to .pending \"\"\" if self.db_file == ':memory:' or not os.path.exists(self.pending_file):",
"remote_id, sync_point FROM %s_sync ''' % ('incoming' if incoming else",
"the sync_point for :param incoming: if True, get the last",
"\"\"\" Returns a string identifying the entity under broker to",
"not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return with open(self.pending_file, 'r+b') as",
"conn conn.rollback() self.conn = conn except sqlite3.DatabaseError: try: conn.close() except",
"self._newid(conn) conn.commit() def _newid(self, conn): # Override for additional work",
"as conn: self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self, exc_type, exc_value, exc_traceback):",
"will be quarantined and a new sqlite3.DatabaseError will be raised",
"sync_point) WHERE remote_id=? ''' % ('incoming' if incoming else 'outgoing'),",
"ADD COLUMN metadata TEXT DEFAULT '' \"\"\" % self.db_type) md",
"END; \"\"\") if not put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp,",
"name: name of the object or container being inserted :param",
"self.db_file = db_file self.pending_file = self.db_file + '.pending' self.pending_timeout =",
"CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync BEGIN UPDATE outgoing_sync",
"metadata dict values are tuples of (value, timestamp) where the",
"Only modifies status_changed_at if the timestamp is greater than the",
"sqlite3.Cursor.execute( self, *args, **kwargs)) def dict_factory(crs, row): \"\"\" This should",
"This function was created as a worker to limit transactions",
"the DB as deleted :param timestamp: internalized delete timestamp \"\"\"",
"sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path != ':memory:' and not",
"#: Timeout for trying to connect to a DB BROKER_TIMEOUT",
"def _update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE %s_stat SET status_changed_at =",
"reraised. If so, the database will be quarantined and a",
"= sv def _db_timeout(timeout, db_file, call): with LockTimeout(timeout, db_file): retry_wait",
"timeout self.db_file = database super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs) def",
"then every 50m after. \"\"\" if not DB_PREALLOCATION or self.db_file",
"WHERE remote_id=?\" % ('incoming' if incoming else 'outgoing'), (id,)).fetchone() if",
"value_timestamp) in md.items(): if value == '' and value_timestamp <",
"try: metadata = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] except",
"ORDER BY ROWID DESC LIMIT 1 ''' % self.db_contains_type).fetchone() sync_point",
"<= point - MB / 2: prealloc_size = point break",
"and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get() as conn:",
"in md or timestamp > md[key][1]: md[key] = value_timestamp if",
"= os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined', self.db_type",
"related functions. :param conn: Database connection to reclaim metadata within.",
"identifying the entity under broker to a human. The baseline",
"DatabaseConnectionError(path, 'DB file created by connect?') conn.row_factory = sqlite3.Row conn.text_factory",
"self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp, delete_timestamp): \"\"\" Used in replication",
"the metadata dict for the database. The metadata dict values",
"in self.metadata: cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta) # then mark",
"'database disk image is malformed' in str(exc_value): exc_hint = 'malformed'",
":returns: list of objects between start and end \"\"\" self._commit_puts_stale_ok()",
"HTTPBadRequest('Total metadata too large; max %d' % MAX_META_OVERALL_SIZE) def update_metadata(self,",
"created_at timestamp of object rows to delete :param sync_timestamp: max",
"for k in uni_keys: sv = metadata[k] del metadata[k] metadata[k.encode('utf-8')]",
"self.db_file + '.pending' self.pending_timeout = pending_timeout or 10 self.stale_reads_ok =",
"service uses for pending pickles. \"\"\" raise NotImplementedError def merge_syncs(self,",
"made to preallocate disk space for database files. DB_PREALLOCATION =",
"are tuples of (value, timestamp) where the timestamp indicates when",
"that metadata_falls within acceptable limits. :param metadata: to be validated",
"remote ID to get the sync_point for :param incoming: if",
"COLUMN metadata TEXT DEFAULT '' \"\"\" % self.db_type) md =",
"key, value_timestamp in metadata_updates.items(): value, timestamp = value_timestamp if key",
"does not care for pending updates. \"\"\" if self.db_file ==",
"new hash value \"\"\" if name is None: raise Exception('name",
"' WHERE put_timestamp < ?' % self.db_type, (timestamp, timestamp)) conn.commit()",
"incoming: if True, get the last incoming sync, otherwise get",
"the License is distributed on an \"AS IS\" BASIS, #",
"from swift.common.utils import json, Timestamp, renamer, \\ mkdirs, lock_parent_directory, fallocate",
"This is implemented by a particular broker to be compatible",
"= conn.execute(''' SELECT ROWID FROM %s ORDER BY ROWID DESC",
"__init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False): \"\"\"Encapsulates working",
"is_deleted(self): \"\"\" Check if the DB is considered to be",
"raise exc_type, exc_value, exc_traceback prefix_path = os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path)",
"def validate_metadata(metadata): \"\"\" Validates that metadata_falls within acceptable limits. :param",
"swift.common.utils import json, Timestamp, renamer, \\ mkdirs, lock_parent_directory, fallocate from",
"10 self.stale_reads_ok = stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout = timeout",
"[] for key, (value, value_timestamp) in md.items(): if value ==",
"database super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs) def cursor(self, cls=None): if",
"real dict, i.e. when you're going to serialize the results.",
"metadata cleared_meta = {} for k in self.metadata: cleared_meta[k] =",
"return -1 return row['sync_point'] def get_syncs(self, incoming=True): \"\"\" Get a",
"sync table. :param sync_points: list of sync points where a",
"remote_id=?\" % ('incoming' if incoming else 'outgoing'), (id,)).fetchone() if not",
"uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail = _('Quarantined %s to %s",
"it allocates to the next boundary. Boundaries are 2m, 5m,",
"entries PENDING_CAP = 131072 def utf8encode(*args): return [(s.encode('utf8') if isinstance(s,",
"exist\") conn = self.conn self.conn = None orig_isolation_level = conn.isolation_level",
"a serialized copy of the sync table. :param incoming: if",
"meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too large; max %d'",
"info to see if it indicates a quarantine situation (malformed",
"it gets within 512k of a boundary, it allocates to",
"law or agreed to in writing, software # distributed under",
"(errno.EEXIST, errno.ENOTEMPTY): raise quar_path = \"%s-%s\" % (quar_path, uuid4().hex) renamer(self.db_dir,",
"intended for reading of stats, and thus does not care",
"pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close() raise @contextmanager def lock(self):",
"self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite Cursor handler that",
"\"\"\" raise NotImplementedError def merge_syncs(self, sync_points, incoming=True): \"\"\" Merge a",
"prealloc_points(): for pm in (1, 2, 5, 10, 25, 50):",
":param timestamp: internalized put timestamp \"\"\" with self.get() as conn:",
"# delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None):",
"that plays well with eventlet.\"\"\" def __init__(self, database, timeout=None, *args,",
"% self.path class GreenDBConnection(sqlite3.Connection): \"\"\"SQLite DB Connection handler that plays",
"and os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError):",
"get_raw_metadata(self): with self.get() as conn: try: metadata = conn.execute('SELECT metadata",
"raise quar_path = \"%s-%s\" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False)",
"as conn: curs = conn.execute('SELECT * from %s_stat' % self.db_type)",
"key, (value, timestamp) in metadata_updates.items(): if timestamp > old_metadata[key][1]: break",
"in metadata_updates.items(): if timestamp > old_metadata[key][1]: break else: #所有的元数据均过期,则不作任何处理 return",
"self.db_type) md = {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key, value_timestamp in metadata_updates.items():",
"('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute('''",
"the original exception will be reraised. If so, the database",
"conn def delete_db(self, timestamp): \"\"\" Mark the DB as deleted",
"Assume that lock_parent_directory has already been called. :param item_list: A",
"prefix = 'x-container-meta-' key = key[len(prefix):] meta_count = meta_count +",
"db as deleted with self.get() as conn: self._delete_db(conn, timestamp) conn.commit()",
"'no such column: metadata' not in str(err): raise conn.execute(\"\"\" ALTER",
"Timeout for trying to connect to a DB BROKER_TIMEOUT =",
"< age_timestamp. Also deletes rows from incoming_sync and outgoing_sync where",
"in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path = \"%s-%s\" % (quar_path, uuid4().hex)",
"as deleted :param timestamp: internalized delete timestamp \"\"\" # first,",
":param storage_policy_index: only required for containers \"\"\" if self.db_file ==",
"remote database being rsynced in \"\"\" with self.get() as conn:",
"%s due to %s database') % \\ (self.db_dir, quar_path, exc_hint)",
"with self.get() as conn: row = conn.execute(query).fetchone() return row[0] if",
"given database connection. This function will not call commit on",
"cls is None: cls = GreenDBCursor return sqlite3.Connection.cursor(self, cls) def",
"FROM %s_stat' % self.db_type).fetchone()[0] if md: md = json.loads(md) keys_to_delete",
"may obtain a copy of the License at # #",
"curs = conn.execute(''' SELECT * FROM %s WHERE ROWID >",
"internalized timestamp of initial PUT request :param storage_policy_index: only required",
"5, 10, 25, 50): yield pm * MB while True:",
"os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info())",
"import errno import six.moves.cPickle as pickle from swift import gettext_",
"return 'DB %s already exists' % self.path class GreenDBConnection(sqlite3.Connection): \"\"\"SQLite",
"for key, value_timestamp in metadata_updates.items(): value, timestamp = value_timestamp if",
"a new sqlite3.DatabaseError will be raised indicating the action taken.",
"the stat table. Only modifies status_changed_at if the timestamp is",
"@contextmanager def lock(self): \"\"\"Use with the \"with\" statement; locks a",
"conn: curs = conn.execute('SELECT * from %s_stat' % self.db_type) curs.row_factory",
"'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with self.get() as",
"rows from incoming_sync and outgoing_sync where the updated_at timestamp is",
"cls) def commit(self): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class",
"particular broker to be compatible with its :func:`merge_items`. \"\"\" raise",
":raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded \"\"\" meta_count",
"> PENDING_CAP: self._commit_puts([record]) else: #将对象记录写入数据库文件中 with open(self.pending_file, 'a+b') as fp:",
"may not use this file except in compliance with the",
"2: prealloc_size = point break if allocated_size < prealloc_size: with",
"self.get_raw_metadata() if metadata: metadata = json.loads(metadata) utf8encodekeys(metadata) else: metadata =",
"to that value. \"\"\" metadata = self.get_raw_metadata() if metadata: metadata",
"was set to that value. \"\"\" metadata = self.get_raw_metadata() if",
"a real dict, i.e. when you're going to serialize the",
"self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self, start, count): \"\"\" Get a",
"this file except in compliance with the License. # You",
"== 0 and stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB",
"'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_insert AFTER",
"if self.db_file != ':memory:' and os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file,",
"as err: if err.errno != errno.ENOENT: raise if pending_size >",
"``_initialize`` method. It is ignored by ``AccountBroker``. :param put_timestamp: internalized",
"Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self, start, count): \"\"\" Get",
"return metadata @staticmethod def validate_metadata(metadata): \"\"\" Validates that metadata_falls within",
"\"racing\" each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file,",
"def _commit_puts_load(self, item_list, entry): \"\"\" Unmarshall the :param:entry and append",
"function will not call commit on the conn, but will",
"('incoming' if incoming else 'outgoing'), (id,)).fetchone() if not row: return",
"msg, timeout=0): self.path = path self.timeout = timeout self.msg =",
"pickles. \"\"\" raise NotImplementedError def merge_syncs(self, sync_points, incoming=True): \"\"\" Merge",
"# # Licensed under the Apache License, Version 2.0 (the",
":param timestamp: Empty metadata items last updated before this timestamp",
"':memory:' or not os.path.exists(self.pending_file): return if item_list is None: item_list",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"conn.execute(''' UPDATE %s_stat SET id=? ''' % self.db_type, (str(uuid4()),)) row",
"'r+b') as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): #",
"conn.execute('SELECT * from %s_stat' % self.db_type) curs.row_factory = dict_factory return",
"keys from get_info plus max_row and metadata Note:: get_info's <db_contains_type>_count",
"receiving an rsynced db. pass def _is_deleted(self, conn): \"\"\" Check",
"**kwargs) def cursor(self, cls=None): if cls is None: cls =",
"os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path)",
"is None: timeout = BROKER_TIMEOUT self.timeout = timeout self.db_file =",
"not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): try: self.conn",
"#在数据库中添加一条记录 def put_record(self, record): if self.db_file == ':memory:': self.merge_items([record]) return",
"return conn class DatabaseBroker(object): \"\"\"Encapsulates working with a database.\"\"\" def",
"number of pending entries PENDING_CAP = 131072 def utf8encode(*args): return",
"return try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except LockTimeout: if not",
"or is not a database' in str(exc_value): exc_hint = 'corrupted'",
"is XORed by the 128-bit hash on insert or delete.",
"*args, **kwargs)) def dict_factory(crs, row): \"\"\" This should only be",
"result def get_max_row(self): query = ''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE",
"error messages for DB Errors.\"\"\" def __init__(self, path, msg, timeout=0):",
"with self.get() as conn: for rec in sync_points: try: conn.execute('''",
"DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file, self.timeout) else: self.conn =",
"merge_timestamps(self, created_at, put_timestamp, delete_timestamp): \"\"\" Used in replication to handle",
"handler that plays well with eventlet.\"\"\" def __init__(self, database, timeout=None,",
"from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils import json, Timestamp,",
"plays well with eventlet.\"\"\" def __init__(self, *args, **kwargs): self.timeout =",
"connection')) conn.close() def newid(self, remote_id): \"\"\" Re-id the database. This",
"the database between start and end. :param start: start ROWID",
"cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta) # then mark the db",
"OpenStack Foundation # # Licensed under the Apache License, Version",
"work when receiving an rsynced db. pass def _is_deleted(self, conn):",
"except OSError as err: if err.errno != errno.ENOENT: raise def",
"end. :param start: start ROWID :param count: number to get",
"or logging.getLogger() self.account = account self.container = container self._db_version =",
"In addition, this calls the DatabaseBroker's :func:`_reclaim` method. :param age_timestamp:",
"FROM %s_stat' % self.db_type).fetchone()[0] md = json.loads(md) if md else",
"chexor(old, name, timestamp): \"\"\" Each entry in the account and",
"MB while True: pm += 50 yield pm * MB",
"DB hash :param name: name of the object or container",
"if the database needs committing. This function was created as",
"self.get() as conn: for rec in sync_points: try: conn.execute(''' INSERT",
"% self.db_type).fetchone()[0] md = json.loads(md) if md else {} utf8encodekeys(md)",
"translated to just \"count\" and metadata is the raw string.",
"SET metadata = ?' % self.db_type, (json.dumps(md),)) return True except",
"timestamp of the new record :returns: a hex representation of",
"conn.commit() def _update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE %s_stat SET status_changed_at",
"self.timeout = timeout self.msg = msg def __str__(self): return 'DB",
"entry}) if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except OSError as",
"The idea is to allocate space in front of an",
"import sqlite3 from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils import",
"points with the incoming sync table. :param sync_points: list of",
"yield True except (Exception, Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level =",
"in (1, 2, 5, 10, 25, 50): yield pm *",
"sync_points: try: conn.execute(''' INSERT INTO %s_sync (sync_point, remote_id) VALUES (?,",
"if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self): \"\"\"",
"configured SQLite database connection. :param path: path to DB :param",
"as err: # Old dbs didn't have updated_at in the",
"Errors.\"\"\" def __init__(self, path, msg, timeout=0): self.path = path self.timeout",
"return metadata @property def metadata(self): \"\"\" Returns the metadata dict",
"% self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row'] = self.get_max_row() return info",
"quarantined and a new sqlite3.DatabaseError will be raised indicating the",
"self.timeout = timeout self.db_file = database super(GreenDBConnection, self).__init__(database, 0, *args,",
"= args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self,",
"and value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete: for key in",
"2 #: Max number of pending entries PENDING_CAP = 131072",
"= conn.execute(query).fetchone() return row[0] if row else -1 def get_replication_info(self):",
"import gettext_ as _ from tempfile import mkstemp from eventlet",
"*args, **kwargs) def cursor(self, cls=None): if cls is None: cls",
"items last updated before this timestamp will be removed. :returns:",
"\"%s-%s\" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail = _('Quarantined",
"idea is to allocate space in front of an expanding",
"situation (malformed or corrupted database). If not, the original exception",
"incoming_sync_insert AFTER INSERT ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at",
"= DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: import traceback raise",
"UPDATE ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s',",
"UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID =",
"exc_traceback): \"\"\" Checks the exception info to see if it",
"_commit_puts(self, item_list=None): \"\"\" Scan for .pending files and commit the",
"SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type,",
"AFTER UPDATE ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at =",
"= ''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s'",
"sync \"\"\" with self.get() as conn: for rec in sync_points:",
"database. This should be called after an rsync. :param remote_id:",
"stat.st_blocks * 512 for point in prealloc_points(): if file_size <=",
"value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete: for key in keys_to_delete:",
"for pending updates. \"\"\" if self.db_file == ':memory:' or not",
"if name is None: raise Exception('name is None!') new =",
"the database is considered deleted :param conn: database conn :returns:",
"ROWID = new.ROWID; END; CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON",
"= None orig_isolation_level = conn.isolation_level conn.isolation_level = None conn.execute('BEGIN IMMEDIATE')",
"else -1 conn.execute(''' INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id)",
"return MB = (1024 * 1024) def prealloc_points(): for pm",
"self.db_file = database super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs) def cursor(self,",
"conn: try: metadata = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0]",
"for the database. The metadata dict values are tuples of",
"self.timeout) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn = self.conn",
"TEXT DEFAULT 0 ); CREATE TABLE incoming_sync ( remote_id TEXT",
"conn, timestamp): conn.execute( 'UPDATE %s_stat SET status_changed_at = ?' '",
"if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): self.conn",
"timeout for connection :param okay_to_create: if True, create the DB",
"such column: metadata' not in str(err): raise return False def",
"if not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return with open(self.pending_file, 'r+b')",
"last incoming sync, otherwise get the last outgoing sync \"\"\"",
"field in the stat table. Only modifies status_changed_at if the",
"row else -1 def get_replication_info(self): \"\"\" Get information about the",
"self.conn = conn except (Exception, Timeout): logging.exception( _('Broker error trying",
"Connection handler that plays well with eventlet.\"\"\" def __init__(self, database,",
"None: timeout = BROKER_TIMEOUT self.timeout = timeout self.db_file = database",
"def _reclaim(self, conn, timestamp): \"\"\" Removes any empty metadata values",
"account and container databases is XORed by the 128-bit hash",
"table. :param id: remote ID to get the sync_point for",
"delete. This serves as a rolling, order-independent hash of the",
"__init__(self, path, msg, timeout=0): self.path = path self.timeout = timeout",
"s in args] def utf8encodekeys(metadata): uni_keys = [k for k",
"record :returns: a hex representation of the new hash value",
"tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection,",
"= GreenDBCursor return sqlite3.Connection.cursor(self, cls) def commit(self): return _db_timeout( self.timeout,",
"timestamp is greater than the current status_changed_at timestamp. :param timestamp:",
"DatabaseConnectionError(self.db_file, \"DB doesn't exist\") #对数据库父目录加锁 with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size =",
"%d' % MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False): \"\"\" Updates the",
"end. with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA",
"meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items; max %d'",
"__init__(self, *args, **kwargs): self.timeout = args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor,",
"*args, **kwargs): if timeout is None: timeout = BROKER_TIMEOUT self.timeout",
"is exceeded \"\"\" meta_count = 0 meta_size = 0 for",
"enumerate(crs.description))) def chexor(old, name, timestamp): \"\"\" Each entry in the",
"delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None): \"\"\"",
"if key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key = key[len(prefix):] meta_count =",
"were \"racing\" each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn =",
"None orig_isolation_level = conn.isolation_level conn.isolation_level = None conn.execute('BEGIN IMMEDIATE') try:",
"swift.common.swob import HTTPBadRequest #: Whether calls will be made to",
"self.db_contains_type).fetchone() sync_point = row['ROWID'] if row else -1 conn.execute(''' INSERT",
"super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self, *args, **kwargs): return _db_timeout( self.timeout,",
"?) ''', (sync_point, remote_id)) self._newid(conn) conn.commit() def _newid(self, conn): #",
"feeding them to merge_items(). Assume that lock_parent_directory has already been",
"\"\"\" with self.get() as conn: row = conn.execute(''' UPDATE %s_stat",
"= ?' ' WHERE put_timestamp < ?' % self.db_type, (timestamp,",
"\"\"\" Catch failures of _commit_puts() if broker is intended for",
"conn): \"\"\" Check if the database is considered deleted :param",
":func:`_reclaim` method. :param age_timestamp: max created_at timestamp of object rows",
":returns: the sync point, or -1 if the id doesn't",
"self._commit_puts([record]) else: #将对象记录写入数据库文件中 with open(self.pending_file, 'a+b') as fp: # Colons",
"errno.ENOENT: raise def _commit_puts_stale_ok(self): \"\"\" Catch failures of _commit_puts() if",
"def __str__(self): \"\"\" Returns a string identifying the entity under",
"remote_id)) self._newid(conn) conn.commit() def _newid(self, conn): # Override for additional",
"else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET",
"'sync_point'} \"\"\" with self.get() as conn: curs = conn.execute(''' SELECT",
"our # delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self,",
"list of objects in the database between start and end.",
"DEFAULT 0 ); CREATE TABLE incoming_sync ( remote_id TEXT UNIQUE,",
"= conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError as",
"metadata is the raw string. \"\"\" info = self.get_info() info['count']",
"k in uni_keys: sv = metadata[k] del metadata[k] metadata[k.encode('utf-8')] =",
"str with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA",
"you're going to serialize the results. \"\"\" return dict( ((col[0],",
"accessing' else: raise exc_type, exc_value, exc_traceback prefix_path = os.path.dirname(self.db_dir) partition_path",
"stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file created by",
"the db_contains_type table that are marked deleted and whose created_at",
"':memory:' and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file,",
"database connection. :param path: path to DB :param timeout: timeout",
"working with a database.\"\"\" def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None,",
"implicitly does a lot of transactions, so we # pick",
"\"SELECT sync_point FROM %s_sync WHERE remote_id=?\" % ('incoming' if incoming",
"if meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too large; max",
"= metadata[k] del metadata[k] metadata[k.encode('utf-8')] = sv def _db_timeout(timeout, db_file,",
"within acceptable limits. :param metadata: to be validated :raises: HTTPBadRequest",
"conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] md = json.loads(md) if",
"DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def _reclaim(self, conn, timestamp): \"\"\" Removes",
"int(new, 16)) def get_db_connection(path, timeout=30, okay_to_create=False): \"\"\" Returns a properly",
"LIMIT 1 ''' % self.db_contains_type).fetchone() sync_point = row['ROWID'] if row",
"conn.commit() def reclaim(self, age_timestamp, sync_timestamp): \"\"\" Delete rows from the",
"err: if err.errno != errno.ENOENT: raise if pending_size > PENDING_CAP:",
"so they are our # delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64'))",
"connect_time = time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if",
"the status_changed_at field in the stat table. Only modifies status_changed_at",
"objects in the database between start and end. :param start:",
"as e: if e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path",
"if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return if item_list",
"metadata = {} return metadata @staticmethod def validate_metadata(metadata): \"\"\" Validates",
"\"count\" and metadata is the raw string. \"\"\" info =",
"os.path.join(device_path, 'quarantined', self.db_type + 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False)",
"encoding; so they are our # delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record),",
"called after an rsync. :param remote_id: the ID of the",
"of a boundary, it allocates to the next boundary. Boundaries",
"def prealloc_points(): for pm in (1, 2, 5, 10, 25,",
"to reclaim metadata within. :param timestamp: Empty metadata items last",
"# of the system were \"racing\" each other. raise DatabaseAlreadyExists(self.db_file)",
"''' % self.db_type, (str(uuid4()),)) row = conn.execute(''' SELECT ROWID FROM",
"metadata FROM %s_stat' % self.db_type).fetchone()[0] md = json.loads(md) if md",
"raise sqlite3.DatabaseError(detail) @contextmanager def get(self): \"\"\"Use with the \"with\" statement;",
"lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): # It's as if there was",
"in str(exc_value): exc_hint = 'malformed' elif 'file is encrypted or",
"metadata[k] metadata[k.encode('utf-8')] = sv def _db_timeout(timeout, db_file, call): with LockTimeout(timeout,",
"= None conn = get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file",
"old_status != self._is_deleted(conn): timestamp = Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit() def",
"database conn :returns: True if the DB is considered to",
"updated_at < ? ''', (sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync WHERE",
"\"\"\"More friendly error messages for DB Errors.\"\"\" def __init__(self, path):",
"return True self._commit_puts_stale_ok() with self.get() as conn: return self._is_deleted(conn) def",
"temp_store = MEMORY') cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor', 3, chexor)",
"storage_policy_index: only required for containers \"\"\" if self.db_file == ':memory:':",
"databases is XORed by the 128-bit hash on insert or",
"LIMIT 1 ''' % (self.db_contains_type) with self.get() as conn: row",
"False #: Timeout for trying to connect to a DB",
"the incoming sync table. :param sync_points: list of sync points",
"to ('', timestamp). These empty keys will eventually be removed",
"implemented by a particular broker to be compatible with its",
"the sync table. :param id: remote ID to get the",
"+ len(value) if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many metadata",
"is implemented by a particular broker to be compatible with",
"metadata Note:: get_info's <db_contains_type>_count is translated to just \"count\" and",
"disk image is malformed' in str(exc_value): exc_hint = 'malformed' elif",
"AND %s < ? ''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try:",
"in writing, software # distributed under the License is distributed",
"#: Whether calls will be made to preallocate disk space",
"timestamp): \"\"\" Each entry in the account and container databases",
"here and do a big fsync at the end. with",
"\"\"\" with self.get() as conn: for rec in sync_points: try:",
"MB = (1024 * 1024) def prealloc_points(): for pm in",
"'' and value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete: for key",
"_('Broker error trying to rollback locked connection')) conn.close() def newid(self,",
"row = conn.execute(''' SELECT ROWID FROM %s ORDER BY ROWID",
"raise def _commit_puts_load(self, item_list, entry): \"\"\" Unmarshall the :param:entry and",
"timestamp): conn.execute( 'UPDATE %s_stat SET status_changed_at = ?' ' WHERE",
"to that value. Key/values will only be overwritten if the",
"allocated_size = stat.st_blocks * 512 for point in prealloc_points(): if",
"as err: if 'no such column: metadata' not in str(err):",
"get_syncs(self, incoming=True): \"\"\" Get a serialized copy of the sync",
":returns: True if conn.commit() should be called \"\"\" try: md",
"/ 2: prealloc_size = point break if allocated_size < prealloc_size:",
"incoming=True): \"\"\" Gets the most recent sync point for a",
"md.items(): if value == '' and value_timestamp < timestamp: keys_to_delete.append(key)",
"as conn: curs = conn.execute(''' SELECT * FROM %s WHERE",
"raise HTTPBadRequest('Too many metadata items; max %d' % MAX_META_COUNT) if",
"DB Errors.\"\"\" def __init__(self, path): self.path = path def __str__(self):",
"keys will eventually be removed by :func:`reclaim` \"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata",
"the put_timestamp. Only modifies it if it is greater than",
"[] self._preallocate() if not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return with",
"self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite Cursor handler that plays",
"super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs) def cursor(self, cls=None): if cls",
"''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit()",
"metadata FROM %s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError as err: if",
"limits. :param metadata: to be validated :raises: HTTPBadRequest if MAX_META_COUNT",
"not row: return -1 return row['sync_point'] def get_syncs(self, incoming=True): \"\"\"",
"cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode",
"a list of sync points with the incoming sync table.",
"updated_at timestamp is < sync_timestamp. In addition, this calls the",
"os.path.exists(self.db_file): # It's as if there was a \"condition\" where",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"OF ANY KIND, either express or # implied. # See",
"?' % self.db_type, (json.dumps(md),)) return True except sqlite3.OperationalError as err:",
"of the object or container being inserted :param timestamp: internalized",
"License, Version 2.0 (the \"License\"); # you may not use",
"2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More friendly error messages for DB",
"os.stat(path) if stat.st_size == 0 and stat.st_ctime >= connect_time: os.unlink(path)",
"timestamp :param put_timestamp: put timestamp :param delete_timestamp: delete timestamp \"\"\"",
"return row['sync_point'] def get_syncs(self, incoming=True): \"\"\" Get a serialized copy",
"if 'no such column: metadata' not in str(err): raise conn.execute(\"\"\"",
"SET put_timestamp = ?' ' WHERE put_timestamp < ?' %",
"metadata = json.loads(metadata) utf8encodekeys(metadata) else: metadata = {} return metadata",
"-1 def __str__(self): \"\"\" Returns a string identifying the entity",
"return with open(self.pending_file, 'r+b') as fp: for entry in fp.read().split(':'):",
"serialize the results. \"\"\" return dict( ((col[0], row[idx]) for idx,",
"md[key][1]: md[key] = value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET",
"dict of {'sync_point', 'remote_id'} :param incoming: if True, get the",
"update_status_changed_at(self, timestamp): \"\"\" Update the status_changed_at field in the stat",
"err: # Old dbs didn't have updated_at in the _sync",
"chexor) conn.row_factory = sqlite3.Row conn.text_factory = str conn.executescript(\"\"\" CREATE TABLE",
"\"\"\" with self.get() as conn: self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self,",
"+ 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False) except OSError as",
"reading of stats, and thus does not care for pending",
"Returns a string identifying the entity under broker to a",
"not care for pending updates. \"\"\" if self.db_file == ':memory:'",
"% (self.db_contains_type) with self.get() as conn: row = conn.execute(query).fetchone() return",
"import six.moves.cPickle as pickle from swift import gettext_ as _",
"DB if it doesn't exist :returns: DB connection object \"\"\"",
"BROKER_TIMEOUT = 25 #: Pickle protocol to use PICKLE_PROTOCOL =",
"i.e. when you're going to serialize the results. \"\"\" return",
"None try: yield conn conn.rollback() self.conn = conn except sqlite3.DatabaseError:",
"conn.execute(''' UPDATE %s_sync SET sync_point=max(?, sync_point) WHERE remote_id=? ''' %",
"error while accessing' else: raise exc_type, exc_value, exc_traceback prefix_path =",
"curs.row_factory = dict_factory return [r for r in curs] def",
"records by feeding them to merge_items(). Assume that lock_parent_directory has",
"if self.db_file != ':memory:' and not os.path.exists(self.db_file): return True self._commit_puts_stale_ok()",
"= OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = DELETE')",
"record): if self.db_file == ':memory:': self.merge_items([record]) return if not os.path.exists(self.db_file):",
"fp: # Colons aren't used in base64 encoding; so they",
"Exception('name is None!') new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return",
"the License for the specific language governing permissions and #",
"sqlite3.OperationalError as err: # Old dbs didn't have updated_at in",
"where different parts # of the system were \"racing\" each",
"'malformed' elif 'file is encrypted or is not a database'",
"%d' % MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata",
"\"\"\" with self.get() as conn: curs = conn.execute(''' SELECT remote_id,",
"item_list: self.merge_items(item_list) return with open(self.pending_file, 'r+b') as fp: for entry",
"is None: item_list = [] self._preallocate() if not os.path.getsize(self.pending_file): if",
"*args, **kwargs): self.timeout = args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args,",
"try: self._commit_puts_load(item_list, entry) except Exception: self.logger.exception( _('Invalid pending entry %(file)s:",
"timestamp. :param timestamp: internalized put timestamp \"\"\" with self.get() as",
"closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA temp_store =",
"info['metadata'] = self.get_raw_metadata() info['max_row'] = self.get_max_row() return info def get_info(self):",
"storage_policy_index is passed through to the subclass's ``_initialize`` method. It",
"logging.exception( _('Broker error trying to rollback locked connection')) conn.close() def",
"md = json.loads(md) keys_to_delete = [] for key, (value, value_timestamp)",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"= orig_isolation_level self.conn = conn except (Exception, Timeout): logging.exception( _('Broker",
"objects between start and end \"\"\" self._commit_puts_stale_ok() with self.get() as",
"\"\"\" Turn this db record dict into the format this",
"License. \"\"\" Database code for Swift \"\"\" from contextlib import",
"%s to %s due to %s database') % \\ (self.db_dir,",
"not in str(err): raise conn.execute(\"\"\" ALTER TABLE %s_stat ADD COLUMN",
"than the timestamp using the given database connection. This function",
"= 'corrupted' elif 'disk I/O error' in str(exc_value): exc_hint =",
"); CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync BEGIN UPDATE",
"self.timeout = timeout self.logger = logger or logging.getLogger() self.account =",
"cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory = sqlite3.Row",
"(value, timestamp) in metadata.items(): key = key.lower() if value !=",
"outgoing_sync_insert AFTER INSERT ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at",
"self.db_file == ':memory:': self.merge_items([record]) return if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file,",
"key.startswith('x-container-meta')): prefix = 'x-account-meta-' if key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key",
"database is considered deleted :param conn: database conn :returns: True",
"is newer. To delete a key, set its value to",
"functions. :param conn: Database connection to reclaim metadata within. :param",
"raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file, self.timeout) else: self.conn",
"None: item_list = [] self._preallocate() if not os.path.getsize(self.pending_file): if item_list:",
"_newid(self, conn): # Override for additional work when receiving an",
"'DB %s already exists' % self.path class GreenDBConnection(sqlite3.Connection): \"\"\"SQLite DB",
"self.get() as conn: old_status = self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET",
"code for Swift \"\"\" from contextlib import contextmanager, closing import",
"%s < ? ''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute('''",
"if timeout is None: timeout = BROKER_TIMEOUT self.timeout = timeout",
"such column: metadata' not in str(err): raise metadata = ''",
"such column: updated_at' not in str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp)",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"value. \"\"\" metadata = self.get_raw_metadata() if metadata: metadata = json.loads(metadata)",
"in addition to .pending \"\"\" if self.db_file == ':memory:' or",
"% (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE FROM outgoing_sync WHERE",
"= \"%s-%s\" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail =",
"the most recent sync point for a server from the",
"\"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata = self.metadata #如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for",
"will instead return True if the database needs committing. This",
"= [] self._preallocate() if not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return",
"initialize(self, put_timestamp=None, storage_policy_index=None): \"\"\" Create the DB The storage_policy_index is",
"HTTPBadRequest #: Whether calls will be made to preallocate disk",
"unsafe options here and do a big fsync at the",
"not in md or timestamp > md[key][1]: md[key] = value_timestamp",
"the \"with\" statement; returns a database connection.\"\"\" if not self.conn:",
"make_tuple_for_pickle(self, record): \"\"\" Turn this db record dict into the",
"self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close() raise @contextmanager def lock(self): \"\"\"Use",
"create timestamp :param put_timestamp: put timestamp :param delete_timestamp: delete timestamp",
"= {} for k in self.metadata: cleared_meta[k] = ('', timestamp)",
"to DB :param timeout: timeout for connection :param okay_to_create: if",
"quarantine situation (malformed or corrupted database). If not, the original",
"try: conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close()",
"deleted :param conn: database conn :returns: True if the DB",
"with self.get() as conn: conn.execute(''' DELETE FROM %s WHERE deleted",
"the found records by feeding them to merge_items(). Assume that",
"and end \"\"\" self._commit_puts_stale_ok() with self.get() as conn: curs =",
"= container self._db_version = -1 def __str__(self): \"\"\" Returns a",
"of object rows to delete :param sync_timestamp: max update_at timestamp",
"if self.db_file == ':memory:': self.merge_items([record]) return if not os.path.exists(self.db_file): raise",
"# creating dbs implicitly does a lot of transactions, so",
"yield conn conn.rollback() self.conn = conn except sqlite3.DatabaseError: try: conn.close()",
"# distributed under the License is distributed on an \"AS",
"by connect?') conn.row_factory = sqlite3.Row conn.text_factory = str with closing(conn.cursor())",
"in str(err): raise conn.execute(\"\"\" ALTER TABLE %s_stat ADD COLUMN metadata",
"# Unless required by applicable law or agreed to in",
"exc_type, exc_value, exc_traceback): \"\"\" Checks the exception info to see",
"put_timestamp: put timestamp :param delete_timestamp: delete timestamp \"\"\" with self.get()",
"used in base64 encoding; so they are our # delimiter",
"self.db_type + 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False) except OSError",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"already exists' % self.path class GreenDBConnection(sqlite3.Connection): \"\"\"SQLite DB Connection handler",
"if os.path.exists(self.db_file): # It's as if there was a \"condition\"",
"from the db_contains_type table that are marked deleted and whose",
"max %d' % MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False): \"\"\" Updates",
"SELECT remote_id, sync_point FROM %s_sync ''' % ('incoming' if incoming",
"err: if 'no such column: metadata' not in str(err): raise",
"self.conn = conn except sqlite3.DatabaseError: try: conn.close() except Exception: pass",
"closing import hashlib import logging import os from uuid import",
"conn.executescript(\"\"\" CREATE TABLE outgoing_sync ( remote_id TEXT UNIQUE, sync_point INTEGER,",
"os.path.getsize(self.pending_file) except OSError as err: if err.errno != errno.ENOENT: raise",
"the database. The metadata dict values are tuples of (value,",
"the current timestamp. :param timestamp: internalized put timestamp \"\"\" with",
"not call commit on the conn, but will instead return",
"to be validated :raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE is",
"Get a list of objects in the database between start",
"def __str__(self): return 'DB connection error (%s, %s):\\n%s' % (",
"sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TABLE incoming_sync",
"replication. :returns: dict containing keys from get_info plus max_row and",
"(?, ?) ''', (sync_point, remote_id)) self._newid(conn) conn.commit() def _newid(self, conn):",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"statement; returns a database connection.\"\"\" if not self.conn: if self.db_file",
"journal_mode = MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory = sqlite3.Row conn.text_factory",
"been called. :param item_list: A list of items to commit",
"if self.db_file != ':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts()",
"os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return with open(self.pending_file, 'r+b') as fp:",
"as conn: try: metadata = conn.execute('SELECT metadata FROM %s_stat' %",
"containers \"\"\" if self.db_file == ':memory:': tmp_db_file = None conn",
"5m, 10m, 25m, 50m, then every 50m after. \"\"\" if",
"if not DB_PREALLOCATION or self.db_file == ':memory:': return MB =",
"!= ':memory:' and not os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with self.get()",
"if there was a \"condition\" where different parts # of",
"def dict_factory(crs, row): \"\"\" This should only be used when",
"entry in the account and container databases is XORed by",
"('', timestamp). These empty keys will eventually be removed by",
"def utf8encode(*args): return [(s.encode('utf8') if isinstance(s, unicode) else s) for",
"Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the",
"column: metadata' not in str(err): raise metadata = '' return",
"self.conn = None try: yield conn conn.rollback() self.conn = conn",
"validate_metadata(metadata): \"\"\" Validates that metadata_falls within acceptable limits. :param metadata:",
"); CREATE TABLE incoming_sync ( remote_id TEXT UNIQUE, sync_point INTEGER,",
"container being inserted :param timestamp: internalized timestamp of the new",
"with the \"with\" statement; locks a database.\"\"\" if not self.conn:",
"== ':memory:' or not os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file, self.pending_timeout):",
"age_timestamp: max created_at timestamp of object rows to delete :param",
"if incoming else 'outgoing')) result = [] for row in",
"a lot of transactions, so we # pick fast, unsafe",
"or not os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except",
"0 ); CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync BEGIN",
"!= ':memory:' and not okay_to_create: # attempt to detect and",
"for replication. :returns: dict containing keys from get_info plus max_row",
"broker to a human. The baseline implementation returns a full",
"database.\"\"\" if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file):",
"try: yield True except (Exception, Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level",
"\"\"\") if not put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index)",
"otherwise get the last outgoing sync :returns: list of {'remote_id',",
"= 0 meta_size = 0 for key, (value, timestamp) in",
"updated_at in the _sync tables. if 'no such column: updated_at'",
"if pending_size > PENDING_CAP: self._commit_puts([record]) else: #将对象记录写入数据库文件中 with open(self.pending_file, 'a+b')",
"def utf8encodekeys(metadata): uni_keys = [k for k in metadata if",
"ANY KIND, either express or # implied. # See the",
"pending pickles. \"\"\" raise NotImplementedError def merge_syncs(self, sync_points, incoming=True): \"\"\"",
"def update_metadata(self, metadata_updates, validate_metadata=False): \"\"\" Updates the metadata dict for",
"timeout self.msg = msg def __str__(self): return 'DB connection error",
"is vital for useful diagnostics. \"\"\" return self.db_file def initialize(self,",
"exc_hint = 'disk error while accessing' else: raise exc_type, exc_value,",
"stat = os.stat(self.db_file) file_size = stat.st_size allocated_size = stat.st_blocks *",
"\"\"\" Delete rows from the db_contains_type table that are marked",
"(Exception, Timeout): logging.exception( _('Broker error trying to rollback locked connection'))",
":param delete_timestamp: delete timestamp \"\"\" with self.get() as conn: old_status",
"DatabaseBroker's :func:`_reclaim` method. :param age_timestamp: max created_at timestamp of object",
"put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close() with open(tmp_db_file, 'r+b') as",
"device_path = os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined', self.db_type + 's',",
"as fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with self.get() as conn:",
"raise NotImplementedError def merge_syncs(self, sync_points, incoming=True): \"\"\" Merge a list",
"os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating dbs",
"# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under",
"of the remote database being rsynced in \"\"\" with self.get()",
"if err.errno != errno.ENOENT: raise if pending_size > PENDING_CAP: self._commit_puts([record])",
"path): self.path = path def __str__(self): return 'DB %s already",
"not os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except LockTimeout:",
"yield pm * MB while True: pm += 50 yield",
"% self.db_type) md = {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key, value_timestamp in",
"in metadata if isinstance(k, unicode)] for k in uni_keys: sv",
"!= errno.ENOENT: raise def _commit_puts_stale_ok(self): \"\"\" Catch failures of _commit_puts()",
"dbs_path = os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined',",
"for r in curs] def get_sync(self, id, incoming=True): \"\"\" Gets",
"= stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout = timeout self.logger =",
"self.db_file != ':memory:' and not os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with",
"an rsynced db. pass def _is_deleted(self, conn): \"\"\" Check if",
"= self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp),",
"(sync_point, remote_id) VALUES (?, ?) ''', (sync_point, remote_id)) self._newid(conn) conn.commit()",
"self.path = path self.timeout = timeout self.msg = msg def",
"ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW')",
"\"\"\" with self.get() as conn: old_status = self._is_deleted(conn) conn.execute(''' UPDATE",
"under the License is distributed on an \"AS IS\" BASIS,",
"return self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp, delete_timestamp): \"\"\" Used in",
"value, timestamp = value_timestamp if key not in md or",
"useful diagnostics. \"\"\" return self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None): \"\"\"",
"the system were \"racing\" each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file)",
"_reclaim(self, conn, timestamp): \"\"\" Removes any empty metadata values older",
"ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_insert AFTER INSERT ON",
"import os from uuid import uuid4 import sys import time",
"a DB BROKER_TIMEOUT = 25 #: Pickle protocol to use",
"%s_sync WHERE remote_id=?\" % ('incoming' if incoming else 'outgoing'), (id,)).fetchone()",
"Re-id the database. This should be called after an rsync.",
"INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id) VALUES (?, ?)",
"as conn: for rec in sync_points: try: conn.execute(''' INSERT INTO",
"row['ROWID'] if row else -1 conn.execute(''' INSERT OR REPLACE INTO",
"WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ?",
"timestamp): \"\"\" Removes any empty metadata values older than the",
"contextmanager, closing import hashlib import logging import os from uuid",
"if 'locked' not in str(e): raise sleep(retry_wait) retry_wait = min(retry_wait",
"UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID =",
"result.append({'remote_id': row[0], 'sync_point': row[1]}) return result def get_max_row(self): query =",
"2, 5, 10, 25, 50): yield pm * MB while",
"UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TRIGGER",
"from %s_stat' % self.db_type) curs.row_factory = dict_factory return curs.fetchone() #在数据库中添加一条记录",
"%s_sync ''' % ('incoming' if incoming else 'outgoing')) result =",
"new sqlite3.DatabaseError will be raised indicating the action taken. \"\"\"",
"512k of a boundary, it allocates to the next boundary.",
"and whose created_at timestamp is < age_timestamp. Also deletes rows",
"value_timestamp in metadata_updates.items(): value, timestamp = value_timestamp if key not",
"pm += 50 yield pm * MB stat = os.stat(self.db_file)",
":returns: DB connection object \"\"\" try: connect_time = time.time() conn",
"os.unlink(path) raise DatabaseConnectionError(path, 'DB file created by connect?') conn.row_factory =",
"keys_to_delete: for key in keys_to_delete: del md[key] conn.execute('UPDATE %s_stat SET",
"self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close() with open(tmp_db_file, 'r+b')",
"@staticmethod def validate_metadata(metadata): \"\"\" Validates that metadata_falls within acceptable limits.",
"value to ('', timestamp). These empty keys will eventually be",
"sqlite3.Row conn.text_factory = str conn.executescript(\"\"\" CREATE TABLE outgoing_sync ( remote_id",
"handle updating timestamps. :param created_at: create timestamp :param put_timestamp: put",
"put_timestamp. Only modifies it if it is greater than the",
"DB Errors.\"\"\" def __init__(self, path, msg, timeout=0): self.path = path",
"_('Quarantined %s to %s due to %s database') % \\",
"point, or -1 if the id doesn't exist. \"\"\" with",
"status_changed_at if the timestamp is greater than the current status_changed_at",
"sv = metadata[k] del metadata[k] metadata[k.encode('utf-8')] = sv def _db_timeout(timeout,",
"else {} utf8encodekeys(md) except sqlite3.OperationalError as err: if 'no such",
"= stat.st_blocks * 512 for point in prealloc_points(): if file_size",
"connection. This function will not call commit on the conn,",
"if entry: try: self._commit_puts_load(item_list, entry) except Exception: self.logger.exception( _('Invalid pending",
"self._db_version = -1 def __str__(self): \"\"\" Returns a string identifying",
"doesn't exist :returns: DB connection object \"\"\" try: connect_time =",
"remote_id: the ID of the remote database being rsynced in",
"\"\"\" raise NotImplementedError() def is_deleted(self): \"\"\" Check if the DB",
"protocol to use PICKLE_PROTOCOL = 2 #: Max number of",
"Returns a properly configured SQLite database connection. :param path: path",
"row in curs: result.append({'remote_id': row[0], 'sync_point': row[1]}) return result def",
"curs] def get_sync(self, id, incoming=True): \"\"\" Gets the most recent",
"= Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close() with",
"big fsync at the end. with closing(conn.cursor()) as cur: cur.execute('PRAGMA",
"in prealloc_points(): if file_size <= point - MB / 2:",
"and do a big fsync at the end. with closing(conn.cursor())",
"'no such column: metadata' not in str(err): raise return False",
"stat.st_size == 0 and stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path,",
"get the sync_point for :param incoming: if True, get the",
"def metadata(self): \"\"\" Returns the metadata dict for the database.",
"def initialize(self, put_timestamp=None, storage_policy_index=None): \"\"\" Create the DB The storage_policy_index",
"should be called \"\"\" try: md = conn.execute('SELECT metadata FROM",
"rsync. :param remote_id: the ID of the remote database being",
"FROM outgoing_sync WHERE updated_at < ? ''', (sync_timestamp,)) conn.execute(''' DELETE",
"timeout is None: timeout = BROKER_TIMEOUT self.timeout = timeout self.db_file",
"before this timestamp will be removed. :returns: True if conn.commit()",
"return 'DB connection error (%s, %s):\\n%s' % ( self.path, self.timeout,",
"conn.rollback() self.conn = conn except sqlite3.DatabaseError: try: conn.close() except Exception:",
"stats, and thus does not care for pending updates. \"\"\"",
"with open(self.pending_file, 'a+b') as fp: # Colons aren't used in",
"to the next boundary. Boundaries are 2m, 5m, 10m, 25m,",
"!= ':memory:' and os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file, self.timeout) except",
"%s_sync SET sync_point=max(?, sync_point) WHERE remote_id=? ''' % ('incoming' if",
"value \"\"\" if name is None: raise Exception('name is None!')",
"= STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER",
"timestamps. :param created_at: create timestamp :param put_timestamp: put timestamp :param",
"if conn.commit() should be called \"\"\" try: md = conn.execute('SELECT",
"the License. # You may obtain a copy of the",
"and outgoing_sync where the updated_at timestamp is < sync_timestamp. In",
"16)) def get_db_connection(path, timeout=30, okay_to_create=False): \"\"\" Returns a properly configured",
"def cursor(self, cls=None): if cls is None: cls = GreenDBCursor",
"disk space for database files. DB_PREALLOCATION = False #: Timeout",
"# See the License for the specific language governing permissions",
"pending_size = os.path.getsize(self.pending_file) except OSError as err: if err.errno !=",
"entity under broker to a human. The baseline implementation returns",
"rsynced db. pass def _is_deleted(self, conn): \"\"\" Check if the",
"Mark the DB as deleted :param timestamp: internalized delete timestamp",
"Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close() with open(tmp_db_file,",
"os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False) except OSError as e: if",
"database between start and end. :param start: start ROWID :param",
"if metadata: metadata = json.loads(metadata) utf8encodekeys(metadata) else: metadata = {}",
"\"\"\" self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute(''' SELECT",
"% self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp, sync_timestamp): \"\"\" Delete",
"in args] def utf8encodekeys(metadata): uni_keys = [k for k in",
"get the last incoming sync, otherwise get the last outgoing",
"raise return False def update_put_timestamp(self, timestamp): \"\"\" Update the put_timestamp.",
"self._commit_puts() except LockTimeout: if not self.stale_reads_ok: raise def _commit_puts_load(self, item_list,",
"metadata TEXT DEFAULT '' \"\"\" % self.db_type) md = {}",
"if True, create the DB if it doesn't exist :returns:",
"\"\"\" Returns the metadata dict for the database. The metadata",
"self._commit_puts() with self.get() as conn: conn.execute(''' DELETE FROM %s WHERE",
"= os.stat(self.db_file) file_size = stat.st_size allocated_size = stat.st_blocks * 512",
"get :returns: list of objects between start and end \"\"\"",
"class DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More friendly error messages for DB Errors.\"\"\" def",
"number to get :returns: list of objects between start and",
"self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None): \"\"\" Create the DB The",
"retry_wait = 0.001 while True: try: return call() except sqlite3.OperationalError",
"def lock(self): \"\"\"Use with the \"with\" statement; locks a database.\"\"\"",
"as if there was a \"condition\" where different parts #",
"elif 'file is encrypted or is not a database' in",
"\"\"\" Gets the most recent sync point for a server",
"the end. with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = OFF')",
"except (Exception, Timeout): logging.exception( _('Broker error trying to rollback locked",
"file stat = os.stat(path) if stat.st_size == 0 and stat.st_ctime",
"database connection.\"\"\" if not self.conn: if self.db_file != ':memory:' and",
"if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key, (value, timestamp) in metadata_updates.items(): if",
"LockTimeout from swift.common.swob import HTTPBadRequest #: Whether calls will be",
"meta_size = 0 for key, (value, timestamp) in metadata.items(): key",
"DB :param timeout: timeout for connection :param okay_to_create: if True,",
"going to serialize the results. \"\"\" return dict( ((col[0], row[idx])",
"is passed through to the subclass's ``_initialize`` method. It is",
"not a database' in str(exc_value): exc_hint = 'corrupted' elif 'disk",
"conn.execute(''' SELECT remote_id, sync_point FROM %s_sync ''' % ('incoming' if",
"a worker to limit transactions and commits from other related",
"or timestamp > md[key][1]: md[key] = value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md)",
"get_replication_info(self): \"\"\" Get information about the DB required for replication.",
"synchronous = NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store =",
"def get_replication_info(self): \"\"\" Get information about the DB required for",
"WHERE put_timestamp < ?' % self.db_type, (timestamp, timestamp)) conn.commit() def",
"as _ from tempfile import mkstemp from eventlet import sleep,",
"key = key.lower() if value != '' and (key.startswith('x-account-meta') or",
"except Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file,",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"factory=GreenDBConnection, timeout=timeout) if path != ':memory:' and not okay_to_create: #",
"for useful diagnostics. \"\"\" return self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None):",
"fp.flush() def _commit_puts(self, item_list=None): \"\"\" Scan for .pending files and",
"< ? ''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE",
"return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self, *args, **kwargs)) def",
"conn.text_factory = str conn.executescript(\"\"\" CREATE TABLE outgoing_sync ( remote_id TEXT",
"writing, software # distributed under the License is distributed on",
"'.pending' self.pending_timeout = pending_timeout or 10 self.stale_reads_ok = stale_reads_ok self.db_dir",
"not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): self.conn =",
"count): \"\"\" Get a list of objects in the database",
"logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False): \"\"\"Encapsulates working with a database.\"\"\"",
"and commit the found records by feeding them to merge_items().",
"get the last outgoing sync \"\"\" with self.get() as conn:",
"for s in args] def utf8encodekeys(metadata): uni_keys = [k for",
":param count: number to get :returns: list of objects between",
"# limitations under the License. \"\"\" Database code for Swift",
"timestamp of sync rows to delete \"\"\" if self.db_file !=",
"it indicates a quarantine situation (malformed or corrupted database). If",
"broker to be compatible with its :func:`merge_items`. \"\"\" raise NotImplementedError",
"= conn.execute('SELECT * from %s_stat' % self.db_type) curs.row_factory = dict_factory",
"WHERE remote_id=? ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'],",
"ROWID > ? ORDER BY ROWID ASC LIMIT ? '''",
"Old dbs didn't have updated_at in the _sync tables. if",
"string. \"\"\" info = self.get_info() info['count'] = info.pop('%s_count' % self.db_contains_type)",
"by ``AccountBroker``. :param put_timestamp: internalized timestamp of initial PUT request",
":param item_list: A list of items to commit in addition",
"self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None): \"\"\" Scan for .pending",
"so we # pick fast, unsafe options here and do",
"incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync",
"row = conn.execute(''' UPDATE %s_stat SET id=? ''' % self.db_type,",
"Cursor handler that plays well with eventlet.\"\"\" def __init__(self, *args,",
"**kwargs)) def dict_factory(crs, row): \"\"\" This should only be used",
"= {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key, value_timestamp in metadata_updates.items(): value, timestamp",
"\"\"\" Each entry in the account and container databases is",
"string identifying the entity under broker to a human. The",
"conn: for rec in sync_points: try: conn.execute(''' INSERT INTO %s_sync",
"None!') new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return '%032x' %",
"timestamp indicates when that key was set to that value.",
"def get_max_row(self): query = ''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE",
"to delete :param sync_timestamp: max update_at timestamp of sync rows",
"md = {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key, value_timestamp in metadata_updates.items(): value,",
"e: if 'locked' not in str(e): raise sleep(retry_wait) retry_wait =",
"item_list is None: item_list = [] self._preallocate() if not os.path.getsize(self.pending_file):",
"not in str(e): raise sleep(retry_wait) retry_wait = min(retry_wait * 2,",
"It's as if there was a \"condition\" where different parts",
"too large; max %d' % MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False):",
"the updated_at timestamp is < sync_timestamp. In addition, this calls",
"not in str(err): raise return False def update_put_timestamp(self, timestamp): \"\"\"",
"\\ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout from swift.common.swob",
"handler that plays well with eventlet.\"\"\" def __init__(self, *args, **kwargs):",
"the DB required for replication. :returns: dict containing keys from",
"exist. \"\"\" with self.get() as conn: row = conn.execute( \"SELECT",
"of _commit_puts() if broker is intended for reading of stats,",
"set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key, (value, timestamp) in metadata_updates.items(): if timestamp",
"image is malformed' in str(exc_value): exc_hint = 'malformed' elif 'file",
"= self.get_raw_metadata() info['max_row'] = self.get_max_row() return info def get_info(self): self._commit_puts_stale_ok()",
"PICKLE_PROTOCOL = 2 #: Max number of pending entries PENDING_CAP",
"are our # delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def",
"exc_value, exc_traceback prefix_path = os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path =",
"conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close() raise",
"-1 if the id doesn't exist. \"\"\" with self.get() as",
"try: conn.execute(''' DELETE FROM outgoing_sync WHERE updated_at < ? ''',",
"lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite Cursor handler that plays well",
"> md[key][1]: md[key] = value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat",
"timestamp): \"\"\" Update the put_timestamp. Only modifies it if it",
"%(entry)s'), {'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(),",
"fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): # It's as",
"'DB file created by connect?') conn.row_factory = sqlite3.Row conn.text_factory =",
"= self.conn self.conn = None orig_isolation_level = conn.isolation_level conn.isolation_level =",
"+ len(key) + len(value) if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too",
"(int(old, 16) ^ int(new, 16)) def get_db_connection(path, timeout=30, okay_to_create=False): \"\"\"",
"connect?') conn.row_factory = sqlite3.Row conn.text_factory = str with closing(conn.cursor()) as",
"this db record dict into the format this service uses",
"that are marked deleted and whose created_at timestamp is <",
"# pick fast, unsafe options here and do a big",
"= get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file,",
"of (value, timestamp) where the timestamp indicates when that key",
"properly configured SQLite database connection. :param path: path to DB",
"? ''' % self.db_contains_type, (start, count)) curs.row_factory = dict_factory return",
"the DatabaseBroker's :func:`_reclaim` method. :param age_timestamp: max created_at timestamp of",
"'disk I/O error' in str(exc_value): exc_hint = 'disk error while",
"start and end \"\"\" self._commit_puts_stale_ok() with self.get() as conn: curs",
"SQLITE_SEQUENCE.name == '%s' LIMIT 1 ''' % (self.db_contains_type) with self.get()",
"conn except (Exception, Timeout): logging.exception( _('Broker error trying to rollback",
"original exception will be reraised. If so, the database will",
"incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self): \"\"\" The",
"tuples of (value, timestamp) where the timestamp indicates when that",
"conn.commit() def _reclaim(self, conn, timestamp): \"\"\" Removes any empty metadata",
"timestamp of object rows to delete :param sync_timestamp: max update_at",
"(check + XOR) :param old: hex representation of the current",
"self.conn = None orig_isolation_level = conn.isolation_level conn.isolation_level = None conn.execute('BEGIN",
"remote_id) VALUES (?, ?) ''', (sync_point, remote_id)) self._newid(conn) conn.commit() def",
"language governing permissions and # limitations under the License. \"\"\"",
"hash value \"\"\" if name is None: raise Exception('name is",
"= ?' % self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp, sync_timestamp):",
"= value_timestamp if key not in md or timestamp >",
"with self.get() as conn: self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self, conn,",
"INSERT ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s',",
"okay_to_create: if True, create the DB if it doesn't exist",
"container=None, pending_timeout=None, stale_reads_ok=False): \"\"\"Encapsulates working with a database.\"\"\" self.conn =",
"deleted and whose created_at timestamp is < age_timestamp. Also deletes",
"conn.execute(''' UPDATE %s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp)",
"incoming_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT",
"raise @contextmanager def lock(self): \"\"\"Use with the \"with\" statement; locks",
"\"\"\" # first, clear the metadata cleared_meta = {} for",
"if the timestamp is newer. To delete a key, set",
"sqlite3.OperationalError as err: if 'no such column: metadata' not in",
"a hex representation of the new hash value \"\"\" if",
"is considered to be deleted, False otherwise \"\"\" if self.db_file",
"END; CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync BEGIN UPDATE",
"and end. :param start: start ROWID :param count: number to",
"working with a database.\"\"\" self.conn = None self.db_file = db_file",
"the timestamp indicates when that key was set to that",
"timestamp) conn.commit() def possibly_quarantine(self, exc_type, exc_value, exc_traceback): \"\"\" Checks the",
"self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?,",
"= BROKER_TIMEOUT self.timeout = timeout self.db_file = database super(GreenDBConnection, self).__init__(database,",
"= _('Quarantined %s to %s due to %s database') %",
"Timeout): conn.close() raise @contextmanager def lock(self): \"\"\"Use with the \"with\"",
"get_max_row(self): query = ''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name",
"idx, col in enumerate(crs.description))) def chexor(old, name, timestamp): \"\"\" Each",
"NotImplementedError() def is_deleted(self): \"\"\" Check if the DB is considered",
"with self.get() as conn: curs = conn.execute('SELECT * from %s_stat'",
"if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return try: with",
"raise if pending_size > PENDING_CAP: self._commit_puts([record]) else: #将对象记录写入数据库文件中 with open(self.pending_file,",
"os.stat(self.db_file) file_size = stat.st_size allocated_size = stat.st_blocks * 512 for",
"CREATE TABLE outgoing_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at",
"(malformed or corrupted database). If not, the original exception will",
"calls will be made to preallocate disk space for database",
"str(exc_value): exc_hint = 'disk error while accessing' else: raise exc_type,",
"= 'x-account-meta-' if key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key = key[len(prefix):]",
"lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0 try: pending_size = os.path.getsize(self.pending_file) except",
"internalized put timestamp \"\"\" with self.get() as conn: conn.execute( 'UPDATE",
"allocated_size < prealloc_size: with open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size))",
"DELETE FROM incoming_sync WHERE updated_at < ? ''', (sync_timestamp,)) except",
"on the conn, but will instead return True if the",
"timestamp): \"\"\" Update the status_changed_at field in the stat table.",
"conn: row = conn.execute( \"SELECT sync_point FROM %s_sync WHERE remote_id=?\"",
"OSError as err: if err.errno != errno.ENOENT: raise if pending_size",
"'' \"\"\" % self.db_type) md = {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key,",
"UPDATE %s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) '''",
"col in enumerate(crs.description))) def chexor(old, name, timestamp): \"\"\" Each entry",
"of stats, and thus does not care for pending updates.",
"None conn = get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file =",
":func:`merge_items`. \"\"\" raise NotImplementedError def make_tuple_for_pickle(self, record): \"\"\" Turn this",
"committing. This function was created as a worker to limit",
"about the DB required for replication. :returns: dict containing keys",
"such column: metadata' not in str(err): raise conn.execute(\"\"\" ALTER TABLE",
"items to commit in addition to .pending \"\"\" if self.db_file",
"str(err): raise return False def update_put_timestamp(self, timestamp): \"\"\" Update the",
"raise NotImplementedError() def is_deleted(self): \"\"\" Check if the DB is",
"meta_count = 0 meta_size = 0 for key, (value, timestamp)",
":param age_timestamp: max created_at timestamp of object rows to delete",
"of the new record :returns: a hex representation of the",
"of objects between start and end \"\"\" self._commit_puts_stale_ok() with self.get()",
"0 for key, (value, timestamp) in metadata.items(): key = key.lower()",
"MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items; max %d' % MAX_META_COUNT)",
"self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def get(self): \"\"\"Use with the \"with\"",
"(json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp, sync_timestamp): \"\"\" Delete rows from",
"{} utf8encodekeys(md) except sqlite3.OperationalError as err: if 'no such column:",
"where the updated_at timestamp is < sync_timestamp. In addition, this",
"key, (value, value_timestamp) in md.items(): if value == '' and",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"of the system were \"racing\" each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file,",
"last incoming sync, otherwise get the last outgoing sync :returns:",
"database). If not, the original exception will be reraised. If",
"to just \"count\" and metadata is the raw string. \"\"\"",
"sync_points, incoming=True): \"\"\" Merge a list of sync points with",
"pm * MB while True: pm += 50 yield pm",
"be removed by :func:`reclaim` \"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata = self.metadata #如果新添加的元数据是原来元数据的子集",
"age_timestamp, sync_timestamp): \"\"\" Delete rows from the db_contains_type table that",
"max created_at timestamp of object rows to delete :param sync_timestamp:",
"DESC LIMIT 1 ''' % self.db_contains_type).fetchone() sync_point = row['ROWID'] if",
"self.db_type, (timestamp, timestamp)) conn.commit() def update_status_changed_at(self, timestamp): \"\"\" Update the",
"database will be quarantined and a new sqlite3.DatabaseError will be",
"the database will be quarantined and a new sqlite3.DatabaseError will",
"!= self._is_deleted(conn): timestamp = Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self,",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"'no such column: metadata' not in str(err): raise metadata =",
"logger or logging.getLogger() self.account = account self.container = container self._db_version",
"'' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix = 'x-account-meta-' if key.startswith('x-container-meta-'):",
"broker is intended for reading of stats, and thus does",
"use PICKLE_PROTOCOL = 2 #: Max number of pending entries",
"import hashlib import logging import os from uuid import uuid4",
"%s_sync (sync_point, remote_id) VALUES (?, ?) ''' % ('incoming' if",
"exc_hint = 'corrupted' elif 'disk I/O error' in str(exc_value): exc_hint",
"OR REPLACE INTO incoming_sync (sync_point, remote_id) VALUES (?, ?) ''',",
"def get_items_since(self, start, count): \"\"\" Get a list of objects",
"= (1024 * 1024) def prealloc_points(): for pm in (1,",
"current status_changed_at timestamp. :param timestamp: internalized timestamp \"\"\" with self.get()",
"through to the subclass's ``_initialize`` method. It is ignored by",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"= min(retry_wait * 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More friendly error",
"I/O error' in str(exc_value): exc_hint = 'disk error while accessing'",
"[(s.encode('utf8') if isinstance(s, unicode) else s) for s in args]",
"to merge_items(). Assume that lock_parent_directory has already been called. :param",
"conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path, traceback.format_exc(),",
"% (name, timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old, 16) ^ int(new,",
"metadata too large; max %d' % MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates,",
"instead return True if the database needs committing. This function",
"conn.commit() def update_status_changed_at(self, timestamp): \"\"\" Update the status_changed_at field in",
"self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE %s_stat",
"column: metadata' not in str(err): raise return False def update_put_timestamp(self,",
"files. DB_PREALLOCATION = False #: Timeout for trying to connect",
"True: try: return call() except sqlite3.OperationalError as e: if 'locked'",
"start: start ROWID :param count: number to get :returns: list",
"see if it indicates a quarantine situation (malformed or corrupted",
"cur: cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA",
"incoming_sync_update AFTER UPDATE ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at",
"doesn't exist\") conn = self.conn self.conn = None try: yield",
"= Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self, start, count): \"\"\"",
"json, Timestamp, renamer, \\ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import",
"\"\"\" Merge a list of sync points with the incoming",
"key not in md or timestamp > md[key][1]: md[key] =",
"lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except LockTimeout: if not self.stale_reads_ok: raise def",
"care for pending updates. \"\"\" if self.db_file == ':memory:' or",
"\"\"\"SQLite Cursor handler that plays well with eventlet.\"\"\" def __init__(self,",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"conn.commit() should be called \"\"\" try: md = conn.execute('SELECT metadata",
"being rsynced in \"\"\" with self.get() as conn: row =",
"return info def get_info(self): self._commit_puts_stale_ok() with self.get() as conn: curs",
"_db_timeout(timeout, db_file, call): with LockTimeout(timeout, db_file): retry_wait = 0.001 while",
"dbs didn't have updated_at in the _sync tables. if 'no",
"#对数据库父目录加锁 with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0 try: pending_size =",
"tmp_db_file = None conn = get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd,",
"current timestamp. :param timestamp: internalized put timestamp \"\"\" with self.get()",
"MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory = sqlite3.Row conn.text_factory = str",
"of pending entries PENDING_CAP = 131072 def utf8encode(*args): return [(s.encode('utf8')",
"= OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = MEMORY')",
"specific language governing permissions and # limitations under the License.",
"\"\"\" with self.get() as conn: row = conn.execute( \"SELECT sync_point",
"conn: try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0]",
"hex representation of the current DB hash :param name: name",
"timestamp > old_metadata[key][1]: break else: #所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据 with self.get()",
"timeout = BROKER_TIMEOUT self.timeout = timeout self.db_file = database super(GreenDBConnection,",
"with open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with",
"created_at, put_timestamp, delete_timestamp): \"\"\" Used in replication to handle updating",
"table. :param incoming: if True, get the last incoming sync,",
"the last incoming sync, otherwise get the last outgoing sync",
"get(self): \"\"\"Use with the \"with\" statement; returns a database connection.\"\"\"",
"= [k for k in metadata if isinstance(k, unicode)] for",
"import logging import os from uuid import uuid4 import sys",
"open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file):",
"entry): \"\"\" Unmarshall the :param:entry and append it to :param:item_list.",
"if old_status != self._is_deleted(conn): timestamp = Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit()",
"outgoing_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT",
"self.get_raw_metadata() info['max_row'] = self.get_max_row() return info def get_info(self): self._commit_puts_stale_ok() with",
"list of sync points with the incoming sync table. :param",
"stat = os.stat(path) if stat.st_size == 0 and stat.st_ctime >=",
"FROM %s_sync WHERE remote_id=?\" % ('incoming' if incoming else 'outgoing'),",
"curs.row_factory = dict_factory return curs.fetchone() #在数据库中添加一条记录 def put_record(self, record): if",
"as conn: return self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp, delete_timestamp): \"\"\"",
"# you may not use this file except in compliance",
"being inserted :param timestamp: internalized timestamp of the new record",
"sleep(retry_wait) retry_wait = min(retry_wait * 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More",
"md[key] = value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata",
"MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded \"\"\" meta_count = 0 meta_size",
"be used when you need a real dict, i.e. when",
"deleted with self.get() as conn: self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self,",
"with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except LockTimeout: if not self.stale_reads_ok: raise",
"sync points with the incoming sync table. :param sync_points: list",
"**kwargs): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self, *args, **kwargs))",
"copy of the sync table. :param incoming: if True, get",
"str(exc_value): exc_hint = 'corrupted' elif 'disk I/O error' in str(exc_value):",
"is considered to be deleted, False otherwise \"\"\" raise NotImplementedError()",
"if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items; max",
"2010-2012 OpenStack Foundation # # Licensed under the Apache License,",
"space for database files. DB_PREALLOCATION = False #: Timeout for",
"\"\"\" from contextlib import contextmanager, closing import hashlib import logging",
"a \"condition\" where different parts # of the system were",
"just \"count\" and metadata is the raw string. \"\"\" info",
"the _sync tables. if 'no such column: updated_at' not in",
"if file_size <= point - MB / 2: prealloc_size =",
"self.get() as conn: curs = conn.execute(''' SELECT * FROM %s",
"creates the db file stat = os.stat(path) if stat.st_size ==",
"# implied. # See the License for the specific language",
"a database connection.\"\"\" if not self.conn: if self.db_file != ':memory:'",
"* 512 for point in prealloc_points(): if file_size <= point",
"outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except LockTimeout: if",
"self.metadata: cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta) # then mark the",
"of initial PUT request :param storage_policy_index: only required for containers",
"ROWID DESC LIMIT 1 ''' % self.db_contains_type).fetchone() sync_point = row['ROWID']",
"% self.db_contains_type).fetchone() sync_point = row['ROWID'] if row else -1 conn.execute('''",
"metadata_updates.items(): if timestamp > old_metadata[key][1]: break else: #所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据",
"self).__init__(database, 0, *args, **kwargs) def cursor(self, cls=None): if cls is",
"self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn = self.conn",
"XORed by the 128-bit hash on insert or delete. This",
"other related functions. :param conn: Database connection to reclaim metadata",
"object or container being inserted :param timestamp: internalized timestamp of",
"every 50m after. \"\"\" if not DB_PREALLOCATION or self.db_file ==",
"timestamp. :param timestamp: internalized timestamp \"\"\" with self.get() as conn:",
"metadata FROM %s_stat' % self.db_type).fetchone()[0] if md: md = json.loads(md)",
":param okay_to_create: if True, create the DB if it doesn't",
"thus does not care for pending updates. \"\"\" if self.db_file",
"try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] md",
"'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self): \"\"\" The idea is",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"#将对象记录写入数据库文件中 with open(self.pending_file, 'a+b') as fp: # Colons aren't used",
"% self.db_type) curs.row_factory = dict_factory return curs.fetchone() #在数据库中添加一条记录 def put_record(self,",
"prefix_path = os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path",
"= pending_timeout or 10 self.stale_reads_ok = stale_reads_ok self.db_dir = os.path.dirname(db_file)",
"self.db_type, (str(uuid4()),)) row = conn.execute(''' SELECT ROWID FROM %s ORDER",
"if 'no such column: metadata' not in str(err): raise metadata",
"= NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store = MEMORY')",
"with self.get() as conn: self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self, exc_type,",
"account=None, container=None, pending_timeout=None, stale_reads_ok=False): \"\"\"Encapsulates working with a database.\"\"\" self.conn",
"'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER outgoing_sync_update AFTER",
"was set to that value. Key/values will only be overwritten",
"TABLE outgoing_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT",
"self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute('SELECT * from",
"to be compatible with its :func:`merge_items`. \"\"\" raise NotImplementedError def",
"start and end. :param start: start ROWID :param count: number",
":param timestamp: internalized timestamp \"\"\" with self.get() as conn: self._update_status_changed_at(conn,",
"is < age_timestamp. Also deletes rows from incoming_sync and outgoing_sync",
"(value, timestamp) in metadata_updates.items(): if timestamp > old_metadata[key][1]: break else:",
"with eventlet.\"\"\" def __init__(self, *args, **kwargs): self.timeout = args[0].timeout self.db_file",
"\"\"\" return self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None): \"\"\" Create the",
"OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor',",
"def newid(self, remote_id): \"\"\" Re-id the database. This should be",
"Removes any empty metadata values older than the timestamp using",
"connection :param okay_to_create: if True, create the DB if it",
"_('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) if",
"lot of transactions, so we # pick fast, unsafe options",
"retry_wait = min(retry_wait * 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More friendly",
"= os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path =",
"Timestamp, renamer, \\ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout",
"dict values are tuples of (value, timestamp) where the timestamp",
"TEXT DEFAULT 0 ); CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON",
"conn: Database connection to reclaim metadata within. :param timestamp: Empty",
"'%032x' % (int(old, 16) ^ int(new, 16)) def get_db_connection(path, timeout=30,",
"^ int(new, 16)) def get_db_connection(path, timeout=30, okay_to_create=False): \"\"\" Returns a",
"[r for r in curs] def get_sync(self, id, incoming=True): \"\"\"",
"10m, 25m, 50m, then every 50m after. \"\"\" if not",
"len(value) if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items;",
"e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path = \"%s-%s\" %",
"value == '' and value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete:",
"many metadata items; max %d' % MAX_META_COUNT) if meta_size >",
"if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata = ?' %",
"= MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3, chexor) except",
"''', (sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync WHERE updated_at < ?",
"True: pm += 50 yield pm * MB stat =",
"items; max %d' % MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE: raise",
".pending files and commit the found records by feeding them",
"in the database between start and end. :param start: start",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"+ '.pending' self.pending_timeout = pending_timeout or 10 self.stale_reads_ok = stale_reads_ok",
"WHERE SQLITE_SEQUENCE.name == '%s' LIMIT 1 ''' % (self.db_contains_type) with",
"representation of the current DB hash :param name: name of",
"True self._commit_puts_stale_ok() with self.get() as conn: return self._is_deleted(conn) def merge_timestamps(self,",
"Also deletes rows from incoming_sync and outgoing_sync where the updated_at",
"pickle from swift import gettext_ as _ from tempfile import",
"os from uuid import uuid4 import sys import time import",
"passed through to the subclass's ``_initialize`` method. It is ignored",
"= json.loads(metadata) utf8encodekeys(metadata) else: metadata = {} return metadata @staticmethod",
"sync_timestamp: max update_at timestamp of sync rows to delete \"\"\"",
"'r+b') as fp: for entry in fp.read().split(':'): if entry: try:",
"INSERT INTO %s_sync (sync_point, remote_id) VALUES (?, ?) ''' %",
"or -1 if the id doesn't exist. \"\"\" with self.get()",
"in base64 encoding; so they are our # delimiter fp.write(':')",
"for a server from the sync table. :param id: remote",
"failures of _commit_puts() if broker is intended for reading of",
"error trying to rollback locked connection')) conn.close() def newid(self, remote_id):",
"self, *args, **kwargs)) def dict_factory(crs, row): \"\"\" This should only",
"action taken. \"\"\" if 'database disk image is malformed' in",
"= get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn",
"rows to delete \"\"\" if self.db_file != ':memory:' and os.path.exists(self.pending_file):",
"MEMORY') cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory =",
"return [(s.encode('utf8') if isinstance(s, unicode) else s) for s in",
"sys import time import errno import six.moves.cPickle as pickle from",
"= row['ROWID'] if row else -1 conn.execute(''' INSERT OR REPLACE",
"within. :param timestamp: Empty metadata items last updated before this",
"and a new sqlite3.DatabaseError will be raised indicating the action",
"traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn class DatabaseBroker(object): \"\"\"Encapsulates",
"(quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail = _('Quarantined %s to",
"Check if the database is considered deleted :param conn: database",
"where the timestamp indicates when that key was set to",
"# first, clear the metadata cleared_meta = {} for k",
"INSERT ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s',",
"= new.ROWID; END; CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync",
"create the DB if it doesn't exist :returns: DB connection",
"os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't",
"and append it to :param:item_list. This is implemented by a",
"key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key = key[len(prefix):] meta_count = meta_count",
"_commit_puts() if broker is intended for reading of stats, and",
"% MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False): \"\"\" Updates the metadata",
"= info.pop('%s_count' % self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row'] = self.get_max_row()",
"metadata' not in str(err): raise conn.execute(\"\"\" ALTER TABLE %s_stat ADD",
"and metadata Note:: get_info's <db_contains_type>_count is translated to just \"count\"",
"= conn except (Exception, Timeout): logging.exception( _('Broker error trying to",
"id=? ''' % self.db_type, (str(uuid4()),)) row = conn.execute(''' SELECT ROWID",
"== ':memory:' or not os.path.exists(self.pending_file): return if item_list is None:",
"str(err): raise metadata = '' return metadata @property def metadata(self):",
"with eventlet.\"\"\" def __init__(self, database, timeout=None, *args, **kwargs): if timeout",
"sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating dbs implicitly does a",
"class GreenDBConnection(sqlite3.Connection): \"\"\"SQLite DB Connection handler that plays well with",
"cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store",
"import MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils import json, Timestamp, renamer, \\",
"= account self.container = container self._db_version = -1 def __str__(self):",
"This serves as a rolling, order-independent hash of the contents.",
"be raised indicating the action taken. \"\"\" if 'database disk",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"as conn: self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self, conn, timestamp): conn.execute(",
"@contextmanager def get(self): \"\"\"Use with the \"with\" statement; returns a",
"is translated to just \"count\" and metadata is the raw",
"utf8encodekeys(metadata): uni_keys = [k for k in metadata if isinstance(k,",
"next boundary. Boundaries are 2m, 5m, 10m, 25m, 50m, then",
"will eventually be removed by :func:`reclaim` \"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata =",
"either express or # implied. # See the License for",
"_commit_puts_stale_ok(self): \"\"\" Catch failures of _commit_puts() if broker is intended",
"self.get() as conn: self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self, conn, timestamp):",
"exist\") #对数据库父目录加锁 with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0 try: pending_size",
"conn.execute(''' INSERT INTO %s_sync (sync_point, remote_id) VALUES (?, ?) '''",
"it doesn't exist :returns: DB connection object \"\"\" try: connect_time",
"-1 def get_replication_info(self): \"\"\" Get information about the DB required",
"this calls the DatabaseBroker's :func:`_reclaim` method. :param age_timestamp: max created_at",
"file_size = stat.st_size allocated_size = stat.st_blocks * 512 for point",
"this service uses for pending pickles. \"\"\" raise NotImplementedError def",
"curs.fetchone() #在数据库中添加一条记录 def put_record(self, record): if self.db_file == ':memory:': self.merge_items([record])",
"lock_parent_directory has already been called. :param item_list: A list of",
"def is_deleted(self): \"\"\" Check if the DB is considered to",
"cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: import",
"(Exception, Timeout): conn.close() raise @contextmanager def lock(self): \"\"\"Use with the",
"deleted, False otherwise \"\"\" if self.db_file != ':memory:' and not",
"+= 50 yield pm * MB stat = os.stat(self.db_file) file_size",
"metadata_updates.items(): value, timestamp = value_timestamp if key not in md",
"after. \"\"\" if not DB_PREALLOCATION or self.db_file == ':memory:': return",
"DEFAULT '' \"\"\" % self.db_type) md = {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for",
"? ''', (sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync WHERE updated_at <",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 );",
"to get the sync_point for :param incoming: if True, get",
"#: Pickle protocol to use PICKLE_PROTOCOL = 2 #: Max",
"key = key[len(prefix):] meta_count = meta_count + 1 meta_size =",
"dict, i.e. when you're going to serialize the results. \"\"\"",
"self.get() as conn: try: md = conn.execute('SELECT metadata FROM %s_stat'",
"(self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE FROM outgoing_sync WHERE updated_at",
"if path != ':memory:' and not okay_to_create: # attempt to",
":param incoming: if True, get the last incoming sync, otherwise",
"Create the DB The storage_policy_index is passed through to the",
"front of an expanding db. If it gets within 512k",
"start ROWID :param count: number to get :returns: list of",
"-1 return row['sync_point'] def get_syncs(self, incoming=True): \"\"\" Get a serialized",
"self.db_file == ':memory:' or not os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file,",
"UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TABLE",
"def get_info(self): self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute('SELECT",
"DB_PREALLOCATION or self.db_file == ':memory:': return MB = (1024 *",
"\"\"\" Update the put_timestamp. Only modifies it if it is",
"= timeout self.logger = logger or logging.getLogger() self.account = account",
"Note:: get_info's <db_contains_type>_count is translated to just \"count\" and metadata",
"\"\"\" Updates the metadata dict for the database. The metadata",
"status_changed_at timestamp. :param timestamp: internalized timestamp \"\"\" with self.get() as",
"the account and container databases is XORed by the 128-bit",
"SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1",
"if md: md = json.loads(md) keys_to_delete = [] for key,",
"sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TRIGGER outgoing_sync_insert",
"in keys_to_delete: del md[key] conn.execute('UPDATE %s_stat SET metadata = ?'",
"a quarantine situation (malformed or corrupted database). If not, the",
"self.get() as conn: row = conn.execute(query).fetchone() return row[0] if row",
"the timestamp is greater than the current status_changed_at timestamp. :param",
"remote_id): \"\"\" Re-id the database. This should be called after",
"called \"\"\" try: md = conn.execute('SELECT metadata FROM %s_stat' %",
"put_timestamp: internalized timestamp of initial PUT request :param storage_policy_index: only",
"\"with\" statement; returns a database connection.\"\"\" if not self.conn: if",
"self.get() as conn: return self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp, delete_timestamp):",
"conn.execute(''' DELETE FROM incoming_sync WHERE updated_at < ? ''', (sync_timestamp,))",
"value_timestamp if key not in md or timestamp > md[key][1]:",
"def get_sync(self, id, incoming=True): \"\"\" Gets the most recent sync",
"ID to get the sync_point for :param incoming: if True,",
"> MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too large; max %d' %",
"be deleted. :returns: True if the DB is considered to",
"msg def __str__(self): return 'DB connection error (%s, %s):\\n%s' %",
"that plays well with eventlet.\"\"\" def __init__(self, *args, **kwargs): self.timeout",
"for :param incoming: if True, get the last incoming sync,",
"timestamp: internalized timestamp \"\"\" with self.get() as conn: self._update_status_changed_at(conn, timestamp)",
"dict_factory(crs, row): \"\"\" This should only be used when you",
"cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA",
"elif 'disk I/O error' in str(exc_value): exc_hint = 'disk error",
"in front of an expanding db. If it gets within",
"for reading of stats, and thus does not care for",
"row = conn.execute( \"SELECT sync_point FROM %s_sync WHERE remote_id=?\" %",
"DatabaseBroker(object): \"\"\"Encapsulates working with a database.\"\"\" def __init__(self, db_file, timeout=BROKER_TIMEOUT,",
"TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync BEGIN UPDATE outgoing_sync SET",
"system were \"racing\" each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn",
"KIND, either express or # implied. # See the License",
"if it indicates a quarantine situation (malformed or corrupted database).",
"greater than the current timestamp. :param timestamp: internalized put timestamp",
"#所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据 with self.get() as conn: try: md =",
"\"\"\" Removes any empty metadata values older than the timestamp",
"otherwise \"\"\" raise NotImplementedError() def is_deleted(self): \"\"\" Check if the",
"of the current DB hash :param name: name of the",
"a full pathname to a database. This is vital for",
"method. It is ignored by ``AccountBroker``. :param put_timestamp: internalized timestamp",
"md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] md =",
"- MB / 2: prealloc_size = point break if allocated_size",
"This should only be used when you need a real",
"{} for k in self.metadata: cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta)",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"database being rsynced in \"\"\" with self.get() as conn: row",
"open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with self.get()",
"1 ''' % (self.db_contains_type) with self.get() as conn: row =",
"update_at timestamp of sync rows to delete \"\"\" if self.db_file",
"that key was set to that value. Key/values will only",
"path self.timeout = timeout self.msg = msg def __str__(self): return",
"conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating dbs implicitly",
"self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except",
"timestamp: internalized delete timestamp \"\"\" # first, clear the metadata",
"1024) def prealloc_points(): for pm in (1, 2, 5, 10,",
"DB is considered to be deleted, False otherwise \"\"\" if",
"pm * MB stat = os.stat(self.db_file) file_size = stat.st_size allocated_size",
"deletes rows from incoming_sync and outgoing_sync where the updated_at timestamp",
"utf8encodekeys(metadata) else: metadata = {} return metadata @staticmethod def validate_metadata(metadata):",
"as conn: conn.execute(''' DELETE FROM %s WHERE deleted = 1",
"if MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded \"\"\" meta_count = 0",
"self.msg = msg def __str__(self): return 'DB connection error (%s,",
"**kwargs): self.timeout = args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs)",
":param old: hex representation of the current DB hash :param",
"50m after. \"\"\" if not DB_PREALLOCATION or self.db_file == ':memory:':",
"\"\"\"Encapsulates working with a database.\"\"\" self.conn = None self.db_file =",
"if the database is considered deleted :param conn: database conn",
"point in prealloc_points(): if file_size <= point - MB /",
"are marked deleted and whose created_at timestamp is < age_timestamp.",
"put_timestamp, delete_timestamp): \"\"\" Used in replication to handle updating timestamps.",
"table. Only modifies status_changed_at if the timestamp is greater than",
"or self.db_file == ':memory:': return MB = (1024 * 1024)",
"\"\"\" % self.db_type) md = {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key, value_timestamp",
"#查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key, (value, timestamp) in metadata_updates.items(): if timestamp >",
"if not self.stale_reads_ok: raise def _commit_puts_load(self, item_list, entry): \"\"\" Unmarshall",
"the next boundary. Boundaries are 2m, 5m, 10m, 25m, 50m,",
"conn = get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp',",
"{'remote_id', 'sync_point'} \"\"\" with self.get() as conn: curs = conn.execute('''",
"prealloc_size: with open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self):",
"\"\"\" try: connect_time = time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection,",
"removed by :func:`reclaim` \"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata = self.metadata #如果新添加的元数据是原来元数据的子集 if",
"if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): try:",
"Updates the metadata dict for the database. The metadata dict",
"value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata = ?'",
"be deleted, False otherwise \"\"\" raise NotImplementedError() def is_deleted(self): \"\"\"",
"\"\"\" if self.db_file != ':memory:' and not os.path.exists(self.db_file): return True",
"of the sync table. :param incoming: if True, get the",
"parts # of the system were \"racing\" each other. raise",
"them to merge_items(). Assume that lock_parent_directory has already been called.",
"put_timestamp < ?' % self.db_type, (timestamp, timestamp)) conn.commit() def update_status_changed_at(self,",
"\"\"\" if name is None: raise Exception('name is None!') new",
"gettext_ as _ from tempfile import mkstemp from eventlet import",
"row[0] if row else -1 def get_replication_info(self): \"\"\" Get information",
"put_timestamp, delete_timestamp)) if old_status != self._is_deleted(conn): timestamp = Timestamp(time.time()) self._update_status_changed_at(conn,",
"_db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite Cursor handler",
":param metadata: to be validated :raises: HTTPBadRequest if MAX_META_COUNT or",
"if 'no such column: metadata' not in str(err): raise return",
"timeout=0): self.path = path self.timeout = timeout self.msg = msg",
"if it doesn't exist :returns: DB connection object \"\"\" try:",
"(created_at, put_timestamp, delete_timestamp)) if old_status != self._is_deleted(conn): timestamp = Timestamp(time.time())",
"whose created_at timestamp is < age_timestamp. Also deletes rows from",
"self.pending_timeout = pending_timeout or 10 self.stale_reads_ok = stale_reads_ok self.db_dir =",
"the database. This should be called after an rsync. :param",
":returns: True if the DB is considered to be deleted,",
"'entry': entry}) if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except OSError",
"self.path = path def __str__(self): return 'DB %s already exists'",
"as a rolling, order-independent hash of the contents. (check +",
"< sync_timestamp. In addition, this calls the DatabaseBroker's :func:`_reclaim` method.",
"under broker to a human. The baseline implementation returns a",
"update_metadata(self, metadata_updates, validate_metadata=False): \"\"\" Updates the metadata dict for the",
"%s WHERE deleted = 1 AND %s < ? '''",
"as cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes = OFF')",
"= sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating dbs implicitly does",
"else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self): \"\"\" The idea",
"and fail when connect creates the db file stat =",
"< prealloc_size: with open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size)) def",
"in sync_points: try: conn.execute(''' INSERT INTO %s_sync (sync_point, remote_id) VALUES",
"\"\"\" Update the status_changed_at field in the stat table. Only",
"dict into the format this service uses for pending pickles.",
"conn: conn.execute(''' DELETE FROM %s WHERE deleted = 1 AND",
"SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END;",
"or 10 self.stale_reads_ok = stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout =",
"= get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)",
"as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): # It's",
"sync_timestamp): \"\"\" Delete rows from the db_contains_type table that are",
"#如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key, (value, timestamp) in metadata_updates.items():",
"it to :param:item_list. This is implemented by a particular broker",
"expanding db. If it gets within 512k of a boundary,",
"be overwritten if the timestamp is newer. To delete a",
"sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite Cursor handler that plays well with",
"as conn: row = conn.execute(query).fetchone() return row[0] if row else",
"put_timestamp=None, storage_policy_index=None): \"\"\" Create the DB The storage_policy_index is passed",
"%s_stat SET id=? ''' % self.db_type, (str(uuid4()),)) row = conn.execute('''",
"quar_path = os.path.join(device_path, 'quarantined', self.db_type + 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir,",
"and stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file created",
"self._commit_puts_load(item_list, entry) except Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'),",
"except Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close() raise @contextmanager",
"WHERE ROWID = new.ROWID; END; \"\"\") if not put_timestamp: put_timestamp",
"meta_size + len(key) + len(value) if meta_count > MAX_META_COUNT: raise",
"= [] for key, (value, value_timestamp) in md.items(): if value",
"CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync BEGIN UPDATE incoming_sync",
"0 try: pending_size = os.path.getsize(self.pending_file) except OSError as err: if",
"self.metadata #如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key, (value, timestamp) in",
"DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn =",
"if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except OSError as err:",
"if timestamp > old_metadata[key][1]: break else: #所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据 with",
"25, 50): yield pm * MB while True: pm +=",
"# Colons aren't used in base64 encoding; so they are",
"get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd)",
"delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type, (created_at, put_timestamp, delete_timestamp)) if old_status",
"{} return metadata @staticmethod def validate_metadata(metadata): \"\"\" Validates that metadata_falls",
"def _commit_puts_stale_ok(self): \"\"\" Catch failures of _commit_puts() if broker is",
"use this file except in compliance with the License. #",
"or corrupted database). If not, the original exception will be",
"% \\ (self.db_dir, quar_path, exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def",
"self.db_type).fetchone()[0] except sqlite3.OperationalError as err: if 'no such column: metadata'",
"('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self):",
"that key was set to that value. \"\"\" metadata =",
"* 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More friendly error messages for",
"DB required for replication. :returns: dict containing keys from get_info",
"\"\"\" Used in replication to handle updating timestamps. :param created_at:",
"to handle updating timestamps. :param created_at: create timestamp :param put_timestamp:",
"BY ROWID ASC LIMIT ? ''' % self.db_contains_type, (start, count))",
"md: md = json.loads(md) keys_to_delete = [] for key, (value,",
"( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0",
"governing permissions and # limitations under the License. \"\"\" Database",
"metadata = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError",
"MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False): \"\"\" Updates the metadata dict",
"is greater than the current timestamp. :param timestamp: internalized put",
"args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self, *args, **kwargs): return _db_timeout(",
"deleted, False otherwise \"\"\" raise NotImplementedError() def is_deleted(self): \"\"\" Check",
"reclaim metadata within. :param timestamp: Empty metadata items last updated",
"from eventlet import sleep, Timeout import sqlite3 from swift.common.constraints import",
"FROM %s ORDER BY ROWID DESC LIMIT 1 ''' %",
"= conn.execute(''' SELECT remote_id, sync_point FROM %s_sync ''' % ('incoming'",
"_update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE %s_stat SET status_changed_at = ?'",
"= conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] md = json.loads(md)",
"self.conn = get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise",
"MAX_META_OVERALL_SIZE is exceeded \"\"\" meta_count = 0 meta_size = 0",
"timestamp: internalized timestamp of the new record :returns: a hex",
"traceback.format_exc(), timeout=timeout) return conn class DatabaseBroker(object): \"\"\"Encapsulates working with a",
"INTO %s_sync (sync_point, remote_id) VALUES (?, ?) ''' % ('incoming'",
"json.loads(md) keys_to_delete = [] for key, (value, value_timestamp) in md.items():",
"if self.db_file == ':memory:': tmp_db_file = None conn = get_db_connection(self.db_file,",
"50 yield pm * MB stat = os.stat(self.db_file) file_size =",
"database. The metadata dict values are tuples of (value, timestamp)",
"incoming sync table. :param sync_points: list of sync points where",
"value. Key/values will only be overwritten if the timestamp is",
"messages for DB Errors.\"\"\" def __init__(self, path, msg, timeout=0): self.path",
"and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file, \"DB",
"str conn.executescript(\"\"\" CREATE TABLE outgoing_sync ( remote_id TEXT UNIQUE, sync_point",
"timestamp) in metadata_updates.items(): if timestamp > old_metadata[key][1]: break else: #所有的元数据均过期,则不作任何处理",
"and not os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with self.get() as conn:",
"= -1 def __str__(self): \"\"\" Returns a string identifying the",
"tempfile import mkstemp from eventlet import sleep, Timeout import sqlite3",
"timestamp). These empty keys will eventually be removed by :func:`reclaim`",
"%s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) conn.commit() def",
"is None: cls = GreenDBCursor return sqlite3.Connection.cursor(self, cls) def commit(self):",
"object \"\"\" try: connect_time = time.time() conn = sqlite3.connect(path, check_same_thread=False,",
"delete_timestamp)) if old_status != self._is_deleted(conn): timestamp = Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal)",
"database') % \\ (self.db_dir, quar_path, exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager",
"\"\"\" if 'database disk image is malformed' in str(exc_value): exc_hint",
"created as a worker to limit transactions and commits from",
"when that key was set to that value. \"\"\" metadata",
"for containers \"\"\" if self.db_file == ':memory:': tmp_db_file = None",
"True, get the last incoming sync, otherwise get the last",
"conn = self.conn self.conn = None try: yield conn conn.rollback()",
"will be made to preallocate disk space for database files.",
"timestamp.internal) conn.commit() def get_items_since(self, start, count): \"\"\" Get a list",
"from get_info plus max_row and metadata Note:: get_info's <db_contains_type>_count is",
"return if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") #对数据库父目录加锁",
"plays well with eventlet.\"\"\" def __init__(self, database, timeout=None, *args, **kwargs):",
"for .pending files and commit the found records by feeding",
"timestamp) self.update_metadata(cleared_meta) # then mark the db as deleted with",
"conn: row = conn.execute(query).fetchone() return row[0] if row else -1",
"#到这里,就是存在需要更新的元数据 with self.get() as conn: try: md = conn.execute('SELECT metadata",
"for k in metadata if isinstance(k, unicode)] for k in",
"in compliance with the License. # You may obtain a",
"software # distributed under the License is distributed on an",
"yield pm * MB stat = os.stat(self.db_file) file_size = stat.st_size",
"?) ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id']))",
"conn.isolation_level = None conn.execute('BEGIN IMMEDIATE') try: yield True except (Exception,",
"GreenDBConnection(sqlite3.Connection): \"\"\"SQLite DB Connection handler that plays well with eventlet.\"\"\"",
"= conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] if md: md",
"lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get() as conn: conn.execute(''' DELETE FROM",
"int(prealloc_size)) def get_raw_metadata(self): with self.get() as conn: try: metadata =",
"Check if the DB is considered to be deleted. :returns:",
"MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too large;",
"Used in replication to handle updating timestamps. :param created_at: create",
"hex representation of the new hash value \"\"\" if name",
"_sync tables. if 'no such column: updated_at' not in str(err):",
"orig_isolation_level self.conn = conn except (Exception, Timeout): logging.exception( _('Broker error",
"= None self.db_file = db_file self.pending_file = self.db_file + '.pending'",
"raise sleep(retry_wait) retry_wait = min(retry_wait * 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError):",
"points where a sync point is a dict of {'sync_point',",
"'remote_id'} :param incoming: if True, get the last incoming sync,",
"path != ':memory:' and not okay_to_create: # attempt to detect",
"PENDING_CAP = 131072 def utf8encode(*args): return [(s.encode('utf8') if isinstance(s, unicode)",
"of {'remote_id', 'sync_point'} \"\"\" with self.get() as conn: curs =",
"we # pick fast, unsafe options here and do a",
"ASC LIMIT ? ''' % self.db_contains_type, (start, count)) curs.row_factory =",
"True except sqlite3.OperationalError as err: if 'no such column: metadata'",
"SET id=? ''' % self.db_type, (str(uuid4()),)) row = conn.execute(''' SELECT",
"an expanding db. If it gets within 512k of a",
":param sync_timestamp: max update_at timestamp of sync rows to delete",
"to %s due to %s database') % \\ (self.db_dir, quar_path,",
"try: os.ftruncate(fp.fileno(), 0) except OSError as err: if err.errno !=",
"if incoming else 'outgoing'), (id,)).fetchone() if not row: return -1",
"meta_count + 1 meta_size = meta_size + len(key) + len(value)",
"to a database. This is vital for useful diagnostics. \"\"\"",
"with self.get() as conn: return self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp,",
"rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET sync_point=max(?, sync_point) WHERE",
"Update the status_changed_at field in the stat table. Only modifies",
"insert or delete. This serves as a rolling, order-independent hash",
"ROWID :param count: number to get :returns: list of objects",
"fp: for entry in fp.read().split(':'): if entry: try: self._commit_puts_load(item_list, entry)",
"False def update_put_timestamp(self, timestamp): \"\"\" Update the put_timestamp. Only modifies",
"return [r for r in curs] def get_sync(self, id, incoming=True):",
"delete_db(self, timestamp): \"\"\" Mark the DB as deleted :param timestamp:",
"considered to be deleted. :returns: True if the DB is",
"%s database') % \\ (self.db_dir, quar_path, exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail)",
"%s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT",
"k in metadata if isinstance(k, unicode)] for k in uni_keys:",
"* from %s_stat' % self.db_type) curs.row_factory = dict_factory return curs.fetchone()",
"errno.ENOTEMPTY): raise quar_path = \"%s-%s\" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path,",
"%s_stat' % self.db_type).fetchone()[0] md = json.loads(md) if md else {}",
"self._is_deleted(conn): timestamp = Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self, start,",
"if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") #对数据库父目录加锁 with",
"with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): # It's as if there",
"a server from the sync table. :param id: remote ID",
"in metadata.items(): key = key.lower() if value != '' and",
"required for containers \"\"\" if self.db_file == ':memory:': tmp_db_file =",
"if isinstance(s, unicode) else s) for s in args] def",
"empty metadata values older than the timestamp using the given",
"addition to .pending \"\"\" if self.db_file == ':memory:' or not",
"the DB The storage_policy_index is passed through to the subclass's",
"return sqlite3.Connection.cursor(self, cls) def commit(self): return _db_timeout( self.timeout, self.db_file, lambda:",
"marked deleted and whose created_at timestamp is < age_timestamp. Also",
"{'sync_point', 'remote_id'} :param incoming: if True, get the last incoming",
"Unmarshall the :param:entry and append it to :param:item_list. This is",
"args] def utf8encodekeys(metadata): uni_keys = [k for k in metadata",
"trying to rollback locked connection')) conn.close() def newid(self, remote_id): \"\"\"",
"% ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def",
"return False def update_put_timestamp(self, timestamp): \"\"\" Update the put_timestamp. Only",
"DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn class DatabaseBroker(object): \"\"\"Encapsulates working with",
"with a database.\"\"\" def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None,",
"the new record :returns: a hex representation of the new",
"try: renamer(self.db_dir, quar_path, fsync=False) except OSError as e: if e.errno",
"for entry in fp.read().split(':'): if entry: try: self._commit_puts_load(item_list, entry) except",
"%s_stat' % self.db_type) curs.row_factory = dict_factory return curs.fetchone() #在数据库中添加一条记录 def",
"deleted :param timestamp: internalized delete timestamp \"\"\" # first, clear",
"timestamp will be removed. :returns: True if conn.commit() should be",
"outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID;",
"WHERE deleted = 1 AND %s < ? ''' %",
"removed. :returns: True if conn.commit() should be called \"\"\" try:",
"plus max_row and metadata Note:: get_info's <db_contains_type>_count is translated to",
"self.get_max_row() return info def get_info(self): self._commit_puts_stale_ok() with self.get() as conn:",
"they are our # delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush()",
"'x-container-meta-' key = key[len(prefix):] meta_count = meta_count + 1 meta_size",
"old_metadata[key][1]: break else: #所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据 with self.get() as conn:",
"with the License. # You may obtain a copy of",
"conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn = conn except (Exception, Timeout):",
"def _preallocate(self): \"\"\" The idea is to allocate space in",
"return '%032x' % (int(old, 16) ^ int(new, 16)) def get_db_connection(path,",
"DB_PREALLOCATION = False #: Timeout for trying to connect to",
"if row else -1 def get_replication_info(self): \"\"\" Get information about",
"renamer(self.db_dir, quar_path, fsync=False) except OSError as e: if e.errno not",
"UPDATE %s_stat SET id=? ''' % self.db_type, (str(uuid4()),)) row =",
"ignored by ``AccountBroker``. :param put_timestamp: internalized timestamp of initial PUT",
"keys_to_delete.append(key) if keys_to_delete: for key in keys_to_delete: del md[key] conn.execute('UPDATE",
"%s ORDER BY ROWID DESC LIMIT 1 ''' % self.db_contains_type).fetchone()",
"raise Exception('name is None!') new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest()",
"% self.db_type, (json.dumps(md),)) return True except sqlite3.OperationalError as err: if",
"self.pending_file = self.db_file + '.pending' self.pending_timeout = pending_timeout or 10",
"count)) curs.row_factory = dict_factory return [r for r in curs]",
"(sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn",
"except sqlite3.OperationalError as e: if 'locked' not in str(e): raise",
"the :param:entry and append it to :param:item_list. This is implemented",
"'no such column: updated_at' not in str(err): raise DatabaseBroker._reclaim(self, conn,",
"info = self.get_info() info['count'] = info.pop('%s_count' % self.db_contains_type) info['metadata'] =",
"a particular broker to be compatible with its :func:`merge_items`. \"\"\"",
"TEXT DEFAULT '' \"\"\" % self.db_type) md = {} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据",
"\"\"\" if self.db_file == ':memory:': tmp_db_file = None conn =",
"max %d' % MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total",
"self.db_file != ':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with",
"for rec in sync_points: try: conn.execute(''' INSERT INTO %s_sync (sync_point,",
"the db file stat = os.stat(path) if stat.st_size == 0",
"created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type, (created_at,",
"six.moves.cPickle as pickle from swift import gettext_ as _ from",
"connect to a DB BROKER_TIMEOUT = 25 #: Pickle protocol",
"preallocate disk space for database files. DB_PREALLOCATION = False #:",
"raise metadata = '' return metadata @property def metadata(self): \"\"\"",
"self.db_file != ':memory:' and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout) else:",
"MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils import json, Timestamp, renamer, \\ mkdirs,",
"incoming=True): \"\"\" Get a serialized copy of the sync table.",
"except in compliance with the License. # You may obtain",
"with self.get() as conn: old_status = self._is_deleted(conn) conn.execute(''' UPDATE %s_stat",
"key was set to that value. Key/values will only be",
"import HTTPBadRequest #: Whether calls will be made to preallocate",
"':memory:' and os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError,",
"\"\"\" if self.db_file != ':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout):",
"self.db_file) self.conn = get_db_connection(self.db_file, self.timeout) else: self.conn = conn def",
"= os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined', self.db_type + 's', os.path.basename(self.db_dir))",
"= json.loads(md) if md else {} utf8encodekeys(md) except sqlite3.OperationalError as",
"if the DB is considered to be deleted, False otherwise",
"last outgoing sync :returns: list of {'remote_id', 'sync_point'} \"\"\" with",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"any empty metadata values older than the timestamp using the",
"mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout from swift.common.swob import",
"call() except sqlite3.OperationalError as e: if 'locked' not in str(e):",
"human. The baseline implementation returns a full pathname to a",
"the last outgoing sync :returns: list of {'remote_id', 'sync_point'} \"\"\"",
"DB is considered to be deleted. :returns: True if the",
"def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False): \"\"\"Encapsulates",
"does a lot of transactions, so we # pick fast,",
"created by connect?') conn.row_factory = sqlite3.Row conn.text_factory = str with",
"== '%s' LIMIT 1 ''' % (self.db_contains_type) with self.get() as",
"if the DB is considered to be deleted. :returns: True",
"max_row and metadata Note:: get_info's <db_contains_type>_count is translated to just",
".pending \"\"\" if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return",
"delete timestamp \"\"\" # first, clear the metadata cleared_meta =",
"= conn.execute(''' SELECT * FROM %s WHERE ROWID > ?",
"timeout=None, *args, **kwargs): if timeout is None: timeout = BROKER_TIMEOUT",
"connection.\"\"\" if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file):",
"for DB Errors.\"\"\" def __init__(self, path, msg, timeout=0): self.path =",
"first, clear the metadata cleared_meta = {} for k in",
"info['count'] = info.pop('%s_count' % self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row'] =",
"id: remote ID to get the sync_point for :param incoming:",
"connection object \"\"\" try: connect_time = time.time() conn = sqlite3.connect(path,",
"== ':memory:': return MB = (1024 * 1024) def prealloc_points():",
"metadata items last updated before this timestamp will be removed.",
"not self.stale_reads_ok: raise def _commit_puts_load(self, item_list, entry): \"\"\" Unmarshall the",
"import mkstemp from eventlet import sleep, Timeout import sqlite3 from",
"otherwise \"\"\" if self.db_file != ':memory:' and not os.path.exists(self.db_file): return",
"from incoming_sync and outgoing_sync where the updated_at timestamp is <",
"conn.commit() def possibly_quarantine(self, exc_type, exc_value, exc_traceback): \"\"\" Checks the exception",
"for key in keys_to_delete: del md[key] conn.execute('UPDATE %s_stat SET metadata",
"updated_at TEXT DEFAULT 0 ); CREATE TABLE incoming_sync ( remote_id",
"1 meta_size = meta_size + len(key) + len(value) if meta_count",
"to a DB BROKER_TIMEOUT = 25 #: Pickle protocol to",
"hashlib import logging import os from uuid import uuid4 import",
"is intended for reading of stats, and thus does not",
"dict for the database. The metadata dict values are tuples",
"row[idx]) for idx, col in enumerate(crs.description))) def chexor(old, name, timestamp):",
"DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),))",
"%s_stat SET status_changed_at = ?' ' WHERE status_changed_at < ?'",
"exc_type, exc_value, exc_traceback prefix_path = os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path",
"# It's as if there was a \"condition\" where different",
"to rollback locked connection')) conn.close() def newid(self, remote_id): \"\"\" Re-id",
"== '' and value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete: for",
"except OSError as err: if err.errno != errno.ENOENT: raise if",
"with self.get() as conn: conn.execute( 'UPDATE %s_stat SET put_timestamp =",
"= 25 #: Pickle protocol to use PICKLE_PROTOCOL = 2",
"except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\")",
"conn.execute(''' INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id) VALUES (?,",
"the sync point, or -1 if the id doesn't exist.",
"fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with self.get() as conn: try:",
"import uuid4 import sys import time import errno import six.moves.cPickle",
"raised indicating the action taken. \"\"\" if 'database disk image",
"(rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self): \"\"\" The idea is to",
"if it is greater than the current timestamp. :param timestamp:",
"conn): # Override for additional work when receiving an rsynced",
"self.get() as conn: try: metadata = conn.execute('SELECT metadata FROM %s_stat'",
"returns a full pathname to a database. This is vital",
"new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old,",
"sync_point=max(?, sync_point) WHERE remote_id=? ''' % ('incoming' if incoming else",
"with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes",
"so, the database will be quarantined and a new sqlite3.DatabaseError",
"%s):\\n%s' % ( self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More friendly",
"pending_timeout=None, stale_reads_ok=False): \"\"\"Encapsulates working with a database.\"\"\" self.conn = None",
"taken. \"\"\" if 'database disk image is malformed' in str(exc_value):",
"info def get_info(self): self._commit_puts_stale_ok() with self.get() as conn: curs =",
"fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None): \"\"\" Scan",
"< ?' % self.db_type, (timestamp, timestamp)) conn.commit() def update_status_changed_at(self, timestamp):",
"if not row: return -1 return row['sync_point'] def get_syncs(self, incoming=True):",
"''', (sync_timestamp,)) except sqlite3.OperationalError as err: # Old dbs didn't",
"If not, the original exception will be reraised. If so,",
"def _newid(self, conn): # Override for additional work when receiving",
"considered deleted :param conn: database conn :returns: True if the",
"not DB_PREALLOCATION or self.db_file == ':memory:': return MB = (1024",
"swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils import json, Timestamp, renamer,",
"? ''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE FROM",
"raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") #对数据库父目录加锁 with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size",
"sync, otherwise get the last outgoing sync :returns: the sync",
"metadata = self.get_raw_metadata() if metadata: metadata = json.loads(metadata) utf8encodekeys(metadata) else:",
"timestamp: keys_to_delete.append(key) if keys_to_delete: for key in keys_to_delete: del md[key]",
"raw string. \"\"\" info = self.get_info() info['count'] = info.pop('%s_count' %",
"\"with\" statement; locks a database.\"\"\" if not self.conn: if self.db_file",
"of sync points with the incoming sync table. :param sync_points:",
"with self.get() as conn: row = conn.execute( \"SELECT sync_point FROM",
"check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path != ':memory:' and not okay_to_create:",
"= self.metadata #如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据 for key, (value, timestamp)",
"def _db_timeout(timeout, db_file, call): with LockTimeout(timeout, db_file): retry_wait = 0.001",
"is ignored by ``AccountBroker``. :param put_timestamp: internalized timestamp of initial",
"with open(self.pending_file, 'r+b') as fp: for entry in fp.read().split(':'): if",
"the sync table. :param incoming: if True, get the last",
"CONDITIONS OF ANY KIND, either express or # implied. #",
"incoming_sync and outgoing_sync where the updated_at timestamp is < sync_timestamp.",
"commits from other related functions. :param conn: Database connection to",
"if 'no such column: updated_at' not in str(err): raise DatabaseBroker._reclaim(self,",
"record): \"\"\" Turn this db record dict into the format",
"to be deleted, False otherwise \"\"\" raise NotImplementedError() def is_deleted(self):",
"the last outgoing sync :returns: the sync point, or -1",
"sqlite3.OperationalError as e: if 'locked' not in str(e): raise sleep(retry_wait)",
"or not os.path.exists(self.pending_file): return if item_list is None: item_list =",
"sync, otherwise get the last outgoing sync :returns: list of",
"exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def get(self): \"\"\"Use with the",
"return curs.fetchone() #在数据库中添加一条记录 def put_record(self, record): if self.db_file == ':memory:':",
"(str(uuid4()),)) row = conn.execute(''' SELECT ROWID FROM %s ORDER BY",
"files and commit the found records by feeding them to",
"self.db_type, (created_at, put_timestamp, delete_timestamp)) if old_status != self._is_deleted(conn): timestamp =",
"'NOW') WHERE ROWID = new.ROWID; END; \"\"\") if not put_timestamp:",
"if broker is intended for reading of stats, and thus",
"not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") #对数据库父目录加锁 with lock_parent_directory(self.pending_file,",
"''' % self.db_contains_type).fetchone() sync_point = row['ROWID'] if row else -1",
"s) for s in args] def utf8encodekeys(metadata): uni_keys = [k",
"the raw string. \"\"\" info = self.get_info() info['count'] = info.pop('%s_count'",
"with self.get() as conn: curs = conn.execute(''' SELECT remote_id, sync_point",
"info.pop('%s_count' % self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row'] = self.get_max_row() return",
"only required for containers \"\"\" if self.db_file == ':memory:': tmp_db_file",
"count_changes = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode =",
"creating dbs implicitly does a lot of transactions, so we",
"with the \"with\" statement; returns a database connection.\"\"\" if not",
"timestamp): \"\"\" Mark the DB as deleted :param timestamp: internalized",
"newid(self, remote_id): \"\"\" Re-id the database. This should be called",
"\"\"\" return dict( ((col[0], row[idx]) for idx, col in enumerate(crs.description)))",
"exc_value, exc_traceback): \"\"\" Checks the exception info to see if",
"128-bit hash on insert or delete. This serves as a",
":func:`reclaim` \"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata = self.metadata #如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)): #查询时间戳,由于网络存在乱序,所以,只更新时间最后请求的元数据",
"for pm in (1, 2, 5, 10, 25, 50): yield",
"be deleted, False otherwise \"\"\" if self.db_file != ':memory:' and",
"new.ROWID; END; CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync BEGIN",
"but will instead return True if the database needs committing.",
"Get a serialized copy of the sync table. :param incoming:",
"closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes =",
"of sync points where a sync point is a dict",
"def get_syncs(self, incoming=True): \"\"\" Get a serialized copy of the",
"= path self.timeout = timeout self.msg = msg def __str__(self):",
"# attempt to detect and fail when connect creates the",
"= sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path != ':memory:' and",
"self.db_file, lambda: sqlite3.Cursor.execute( self, *args, **kwargs)) def dict_factory(crs, row): \"\"\"",
"!= errno.ENOENT: raise if pending_size > PENDING_CAP: self._commit_puts([record]) else: #将对象记录写入数据库文件中",
"as cur: cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA temp_store = MEMORY')",
"file created by connect?') conn.row_factory = sqlite3.Row conn.text_factory = str",
"full pathname to a database. This is vital for useful",
">= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file created by connect?')",
"This should be called after an rsync. :param remote_id: the",
":param sync_points: list of sync points where a sync point",
"else: metadata = {} return metadata @staticmethod def validate_metadata(metadata): \"\"\"",
"well with eventlet.\"\"\" def __init__(self, database, timeout=None, *args, **kwargs): if",
"errno import six.moves.cPickle as pickle from swift import gettext_ as",
"Returns the metadata dict for the database. The metadata dict",
"conn: self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self, exc_type, exc_value, exc_traceback): \"\"\"",
"new record :returns: a hex representation of the new hash",
"with self.get() as conn: try: metadata = conn.execute('SELECT metadata FROM",
"options here and do a big fsync at the end.",
"entry in fp.read().split(':'): if entry: try: self._commit_puts_load(item_list, entry) except Exception:",
"not os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with self.get() as conn: return",
"= self.conn self.conn = None try: yield conn conn.rollback() self.conn",
"{} #遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key, value_timestamp in metadata_updates.items(): value, timestamp =",
"self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More friendly error messages for DB",
"time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path !=",
"updated_at < ? ''', (sync_timestamp,)) except sqlite3.OperationalError as err: #",
"\"\"\" if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return try:",
"a string identifying the entity under broker to a human.",
"timestamp :param delete_timestamp: delete timestamp \"\"\" with self.get() as conn:",
"timestamp = Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self, start, count):",
"self.timeout) else: self.conn = conn def delete_db(self, timestamp): \"\"\" Mark",
"metadata: to be validated :raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE",
"(Exception, Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn =",
"= msg def __str__(self): return 'DB connection error (%s, %s):\\n%s'",
"conn: row = conn.execute(''' UPDATE %s_stat SET id=? ''' %",
"prealloc_points(): if file_size <= point - MB / 2: prealloc_size",
"get_info plus max_row and metadata Note:: get_info's <db_contains_type>_count is translated",
"TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync BEGIN UPDATE outgoing_sync SET",
"timestamp > md[key][1]: md[key] = value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE",
"if item_list: self.merge_items(item_list) return with open(self.pending_file, 'r+b') as fp: for",
"conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] if md: md =",
"of {'sync_point', 'remote_id'} :param incoming: if True, get the last",
"\"DB doesn't exist\") conn = self.conn self.conn = None orig_isolation_level",
"cls = GreenDBCursor return sqlite3.Connection.cursor(self, cls) def commit(self): return _db_timeout(",
"self).__init__(*args, **kwargs) def execute(self, *args, **kwargs): return _db_timeout( self.timeout, self.db_file,",
"? ''', (sync_timestamp,)) except sqlite3.OperationalError as err: # Old dbs",
"will be raised indicating the action taken. \"\"\" if 'database",
"remote_id) VALUES (?, ?) ''' % ('incoming' if incoming else",
"incoming sync, otherwise get the last outgoing sync \"\"\" with",
"metadata within. :param timestamp: Empty metadata items last updated before",
"self.conn = get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\")",
"dict containing keys from get_info plus max_row and metadata Note::",
"break else: #所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据 with self.get() as conn: try:",
"WHERE updated_at < ? ''', (sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync",
"(id,)).fetchone() if not row: return -1 return row['sync_point'] def get_syncs(self,",
"conn.commit() if tmp_db_file: conn.close() with open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno())",
"encrypted or is not a database' in str(exc_value): exc_hint =",
"conn except sqlite3.DatabaseError: try: conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info()) except",
"okay_to_create: # attempt to detect and fail when connect creates",
"by :func:`reclaim` \"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata = self.metadata #如果新添加的元数据是原来元数据的子集 if set(metadata_updates).issubset(set(old_metadata)):",
"database.\"\"\" def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False):",
"request :param storage_policy_index: only required for containers \"\"\" if self.db_file",
"k in self.metadata: cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta) # then",
"(timestamp, timestamp)) conn.commit() def update_status_changed_at(self, timestamp): \"\"\" Update the status_changed_at",
"to limit transactions and commits from other related functions. :param",
"self.account = account self.container = container self._db_version = -1 def",
"in md.items(): if value == '' and value_timestamp < timestamp:",
"json.loads(md) if md else {} utf8encodekeys(md) except sqlite3.OperationalError as err:",
"_is_deleted(self, conn): \"\"\" Check if the database is considered deleted",
"metadata = ?' % self.db_type, (json.dumps(md),)) return True except sqlite3.OperationalError",
"''', (sync_point, remote_id)) self._newid(conn) conn.commit() def _newid(self, conn): # Override",
"except sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn",
"= '' return metadata @property def metadata(self): \"\"\" Returns the",
"created_at: create timestamp :param put_timestamp: put timestamp :param delete_timestamp: delete",
"__init__(self, database, timeout=None, *args, **kwargs): if timeout is None: timeout",
"not in str(err): raise metadata = '' return metadata @property",
"metadata: metadata = json.loads(metadata) utf8encodekeys(metadata) else: metadata = {} return",
"age_timestamp) conn.commit() def _reclaim(self, conn, timestamp): \"\"\" Removes any empty",
"conn: self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE",
"conn.execute( 'UPDATE %s_stat SET put_timestamp = ?' ' WHERE put_timestamp",
"eventlet.\"\"\" def __init__(self, database, timeout=None, *args, **kwargs): if timeout is",
"to see if it indicates a quarantine situation (malformed or",
"on insert or delete. This serves as a rolling, order-independent",
"quar_path = \"%s-%s\" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail",
"baseline implementation returns a full pathname to a database. This",
"as fp: # Colons aren't used in base64 encoding; so",
"set its value to ('', timestamp). These empty keys will",
"< ? ''', (sync_timestamp,)) except sqlite3.OperationalError as err: # Old",
"indicates when that key was set to that value. \"\"\"",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m",
"self.get() as conn: conn.execute( 'UPDATE %s_stat SET put_timestamp = ?'",
"0.05) class DatabaseConnectionError(sqlite3.DatabaseError): \"\"\"More friendly error messages for DB Errors.\"\"\"",
"STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_update",
"modifies it if it is greater than the current timestamp.",
"Delete rows from the db_contains_type table that are marked deleted",
"= db_file self.pending_file = self.db_file + '.pending' self.pending_timeout = pending_timeout",
"row else -1 conn.execute(''' INSERT OR REPLACE INTO incoming_sync (sync_point,",
"value != '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix = 'x-account-meta-'",
"sync_point FROM %s_sync ''' % ('incoming' if incoming else 'outgoing'))",
"indicates when that key was set to that value. Key/values",
"SET sync_point=max(?, sync_point) WHERE remote_id=? ''' % ('incoming' if incoming",
"a key, set its value to ('', timestamp). These empty",
"already been called. :param item_list: A list of items to",
"DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn = self.conn self.conn = None",
"and not okay_to_create: # attempt to detect and fail when",
"BY ROWID DESC LIMIT 1 ''' % self.db_contains_type).fetchone() sync_point =",
"the DB if it doesn't exist :returns: DB connection object",
"self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self, exc_type, exc_value, exc_traceback): \"\"\" Checks",
"= dict_factory return curs.fetchone() #在数据库中添加一条记录 def put_record(self, record): if self.db_file",
"import sys import time import errno import six.moves.cPickle as pickle",
"unicode)] for k in uni_keys: sv = metadata[k] del metadata[k]",
"FROM %s WHERE deleted = 1 AND %s < ?",
"raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn = self.conn self.conn =",
"('incoming' if incoming else 'outgoing')) result = [] for row",
"pending updates. \"\"\" if self.db_file == ':memory:' or not os.path.exists(self.pending_file):",
"Timeout): logging.exception( _('Broker error trying to rollback locked connection')) conn.close()",
"temp_store = MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3, chexor)",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or #",
"'a+b') as fp: # Colons aren't used in base64 encoding;",
"the conn, but will instead return True if the database",
"timeout=timeout) if path != ':memory:' and not okay_to_create: # attempt",
"boundary, it allocates to the next boundary. Boundaries are 2m,",
"a database. This is vital for useful diagnostics. \"\"\" return",
"Version 2.0 (the \"License\"); # you may not use this",
"db. pass def _is_deleted(self, conn): \"\"\" Check if the database",
"= ('', timestamp) self.update_metadata(cleared_meta) # then mark the db as",
"get the last outgoing sync :returns: the sync point, or",
"Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry':",
"newer. To delete a key, set its value to ('',",
"('', timestamp) self.update_metadata(cleared_meta) # then mark the db as deleted",
"status_changed_at field in the stat table. Only modifies status_changed_at if",
"'corrupted' elif 'disk I/O error' in str(exc_value): exc_hint = 'disk",
"END; CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync BEGIN UPDATE",
"= key[len(prefix):] meta_count = meta_count + 1 meta_size = meta_size",
"def commit(self): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor):",
"the current status_changed_at timestamp. :param timestamp: internalized timestamp \"\"\" with",
"as conn: row = conn.execute(''' UPDATE %s_stat SET id=? '''",
"and metadata is the raw string. \"\"\" info = self.get_info()",
"is malformed' in str(exc_value): exc_hint = 'malformed' elif 'file is",
"a boundary, it allocates to the next boundary. Boundaries are",
"MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too large; max %d' % MAX_META_OVERALL_SIZE)",
"def chexor(old, name, timestamp): \"\"\" Each entry in the account",
"conn.execute( 'UPDATE %s_stat SET status_changed_at = ?' ' WHERE status_changed_at",
"old: hex representation of the current DB hash :param name:",
"DatabaseAlreadyExists(sqlite3.DatabaseError): \"\"\"More friendly error messages for DB Errors.\"\"\" def __init__(self,",
"**kwargs): if timeout is None: timeout = BROKER_TIMEOUT self.timeout =",
"where a sync point is a dict of {'sync_point', 'remote_id'}",
"and container databases is XORed by the 128-bit hash on",
"to detect and fail when connect creates the db file",
"'s', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False) except OSError as e:",
"% self.db_type, (str(uuid4()),)) row = conn.execute(''' SELECT ROWID FROM %s",
"fp.read().split(':'): if entry: try: self._commit_puts_load(item_list, entry) except Exception: self.logger.exception( _('Invalid",
"%s_stat' % self.db_type).fetchone()[0] if md: md = json.loads(md) keys_to_delete =",
"key, set its value to ('', timestamp). These empty keys",
"get_info(self): self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute('SELECT *",
"inserted :param timestamp: internalized timestamp of the new record :returns:",
"by applicable law or agreed to in writing, software #",
"get_db_connection(self.db_file, self.timeout) else: self.conn = conn def delete_db(self, timestamp): \"\"\"",
"'UPDATE %s_stat SET status_changed_at = ?' ' WHERE status_changed_at <",
"?' ' WHERE put_timestamp < ?' % self.db_type, (timestamp, timestamp))",
"None self.db_file = db_file self.pending_file = self.db_file + '.pending' self.pending_timeout",
"self.db_contains_type, (start, count)) curs.row_factory = dict_factory return [r for r",
"self.pending_timeout): pending_size = 0 try: pending_size = os.path.getsize(self.pending_file) except OSError",
"self.get() as conn: conn.execute(''' DELETE FROM %s WHERE deleted =",
"updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; \"\"\")",
"OR CONDITIONS OF ANY KIND, either express or # implied.",
"'x-account-meta-' if key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key = key[len(prefix):] meta_count",
"column: metadata' not in str(err): raise conn.execute(\"\"\" ALTER TABLE %s_stat",
"path: path to DB :param timeout: timeout for connection :param",
"HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded \"\"\" meta_count =",
"def make_tuple_for_pickle(self, record): \"\"\" Turn this db record dict into",
"the database needs committing. This function was created as a",
"return call() except sqlite3.OperationalError as e: if 'locked' not in",
"the DB is considered to be deleted. :returns: True if",
"count: number to get :returns: list of objects between start",
"> MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items; max %d' %",
"DB as deleted :param timestamp: internalized delete timestamp \"\"\" #",
"#遍历待更新的所有元数据,只更新不存在的元数据或时间戳最新的元数据 for key, value_timestamp in metadata_updates.items(): value, timestamp = value_timestamp",
"Foundation # # Licensed under the Apache License, Version 2.0",
"else -1 def get_replication_info(self): \"\"\" Get information about the DB",
"in the _sync tables. if 'no such column: updated_at' not",
"the action taken. \"\"\" if 'database disk image is malformed'",
"after an rsync. :param remote_id: the ID of the remote",
"= sqlite3.Row conn.text_factory = str conn.executescript(\"\"\" CREATE TABLE outgoing_sync (",
"err.errno != errno.ENOENT: raise if pending_size > PENDING_CAP: self._commit_puts([record]) else:",
"self.db_dir = os.path.dirname(db_file) self.timeout = timeout self.logger = logger or",
"renamer(self.db_dir, quar_path, fsync=False) detail = _('Quarantined %s to %s due",
"conn.isolation_level = orig_isolation_level self.conn = conn except (Exception, Timeout): logging.exception(",
"\"\"\"SQLite DB Connection handler that plays well with eventlet.\"\"\" def",
"self.stale_reads_ok: raise def _commit_puts_load(self, item_list, entry): \"\"\" Unmarshall the :param:entry",
"if key not in md or timestamp > md[key][1]: md[key]",
"aren't used in base64 encoding; so they are our #",
":param put_timestamp: put timestamp :param delete_timestamp: delete timestamp \"\"\" with",
"Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn = conn",
"account self.container = container self._db_version = -1 def __str__(self): \"\"\"",
"serialized copy of the sync table. :param incoming: if True,",
"metadata items; max %d' % MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE:",
"sync_point = row['ROWID'] if row else -1 conn.execute(''' INSERT OR",
"NotImplementedError def merge_syncs(self, sync_points, incoming=True): \"\"\" Merge a list of",
"except (Exception, Timeout): conn.close() raise @contextmanager def lock(self): \"\"\"Use with",
"This function will not call commit on the conn, but",
"len(key) + len(value) if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many",
"conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError as err:",
"is a dict of {'sync_point', 'remote_id'} :param incoming: if True,",
"ROWID = new.ROWID; END; \"\"\") if not put_timestamp: put_timestamp =",
"def __init__(self, path, msg, timeout=0): self.path = path self.timeout =",
"utf8encode(*args): return [(s.encode('utf8') if isinstance(s, unicode) else s) for s",
"applicable law or agreed to in writing, software # distributed",
"get_db_connection(path, timeout=30, okay_to_create=False): \"\"\" Returns a properly configured SQLite database",
"than the current status_changed_at timestamp. :param timestamp: internalized timestamp \"\"\"",
"_preallocate(self): \"\"\" The idea is to allocate space in front",
"row['sync_point'] def get_syncs(self, incoming=True): \"\"\" Get a serialized copy of",
"timestamp \"\"\" # first, clear the metadata cleared_meta = {}",
"self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file,",
":param remote_id: the ID of the remote database being rsynced",
"\"\"\" Check if the database is considered deleted :param conn:",
"except (Exception, Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn",
"overwritten if the timestamp is newer. To delete a key,",
"= value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata =",
"'quarantined', self.db_type + 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False) except",
"False otherwise \"\"\" if self.db_file != ':memory:' and not os.path.exists(self.db_file):",
"new.ROWID; END; CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync BEGIN",
"not put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if",
"when connect creates the db file stat = os.stat(path) if",
"self.get() as conn: row = conn.execute( \"SELECT sync_point FROM %s_sync",
"0) except OSError as err: if err.errno != errno.ENOENT: raise",
"last outgoing sync \"\"\" with self.get() as conn: for rec",
"this timestamp will be removed. :returns: True if conn.commit() should",
"internalized delete timestamp \"\"\" # first, clear the metadata cleared_meta",
"def get(self): \"\"\"Use with the \"with\" statement; returns a database",
"from contextlib import contextmanager, closing import hashlib import logging import",
"% self.db_contains_type, (start, count)) curs.row_factory = dict_factory return [r for",
"* 1024) def prealloc_points(): for pm in (1, 2, 5,",
"due to %s database') % \\ (self.db_dir, quar_path, exc_hint) self.logger.error(detail)",
"initial PUT request :param storage_policy_index: only required for containers \"\"\"",
"os.path.dirname(db_file) self.timeout = timeout self.logger = logger or logging.getLogger() self.account",
"= None try: yield conn conn.rollback() self.conn = conn except",
"timestamp using the given database connection. This function will not",
"eventually be removed by :func:`reclaim` \"\"\" #从数据库中查询元数据信息,生成字典格式,保存到old_metadata old_metadata = self.metadata",
"= os.path.dirname(db_file) self.timeout = timeout self.logger = logger or logging.getLogger()",
"metadata = ?' % self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp,",
"list of sync points where a sync point is a",
":param id: remote ID to get the sync_point for :param",
"!= '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix = 'x-account-meta-' if",
"# You may obtain a copy of the License at",
"delete_timestamp): \"\"\" Used in replication to handle updating timestamps. :param",
"10, 25, 50): yield pm * MB while True: pm",
"for key, (value, timestamp) in metadata.items(): key = key.lower() if",
"DB BROKER_TIMEOUT = 25 #: Pickle protocol to use PICKLE_PROTOCOL",
"DB The storage_policy_index is passed through to the subclass's ``_initialize``",
"fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with self.get() as conn: try: metadata",
"quar_path, exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def get(self): \"\"\"Use with",
"[] for row in curs: result.append({'remote_id': row[0], 'sync_point': row[1]}) return",
"to the subclass's ``_initialize`` method. It is ignored by ``AccountBroker``.",
"1 AND %s < ? ''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,))",
"meta_count = meta_count + 1 meta_size = meta_size + len(key)",
"CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync BEGIN UPDATE incoming_sync",
"2m, 5m, 10m, 25m, 50m, then every 50m after. \"\"\"",
"% MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too",
"sync table. :param incoming: if True, get the last incoming",
"with open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if",
"FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s' LIMIT 1 ''' %",
"then mark the db as deleted with self.get() as conn:",
"= 0.001 while True: try: return call() except sqlite3.OperationalError as",
"file_size <= point - MB / 2: prealloc_size = point",
"is None!') new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return '%032x'",
"(value, value_timestamp) in md.items(): if value == '' and value_timestamp",
"while True: pm += 50 yield pm * MB stat",
"needs committing. This function was created as a worker to",
"= timeout self.db_file = database super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs)",
"for database files. DB_PREALLOCATION = False #: Timeout for trying",
"= conn except sqlite3.DatabaseError: try: conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info())",
"from swift.common.exceptions import LockTimeout from swift.common.swob import HTTPBadRequest #: Whether",
"else: #所有的元数据均过期,则不作任何处理 return #到这里,就是存在需要更新的元数据 with self.get() as conn: try: md",
"def delete_db(self, timestamp): \"\"\" Mark the DB as deleted :param",
"be quarantined and a new sqlite3.DatabaseError will be raised indicating",
"outgoing_sync WHERE updated_at < ? ''', (sync_timestamp,)) conn.execute(''' DELETE FROM",
"sync_timestamp. In addition, this calls the DatabaseBroker's :func:`_reclaim` method. :param",
"else: raise DatabaseConnectionError(self.db_file, \"DB doesn't exist\") conn = self.conn self.conn",
"DB Connection handler that plays well with eventlet.\"\"\" def __init__(self,",
":param start: start ROWID :param count: number to get :returns:",
"= key.lower() if value != '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')):",
"True if the database needs committing. This function was created",
"The metadata dict values are tuples of (value, timestamp) where",
"as e: if 'locked' not in str(e): raise sleep(retry_wait) retry_wait",
"the format this service uses for pending pickles. \"\"\" raise",
"str(err): raise conn.execute(\"\"\" ALTER TABLE %s_stat ADD COLUMN metadata TEXT",
":returns: dict containing keys from get_info plus max_row and metadata",
"be called \"\"\" try: md = conn.execute('SELECT metadata FROM %s_stat'",
"results. \"\"\" return dict( ((col[0], row[idx]) for idx, col in",
"list of objects between start and end \"\"\" self._commit_puts_stale_ok() with",
"%s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError as err: if 'no such",
"the timestamp is newer. To delete a key, set its",
"self.get() as conn: self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self, exc_type, exc_value,",
"uuid import uuid4 import sys import time import errno import",
"os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with self.get() as conn: return self._is_deleted(conn)",
"sync :returns: the sync point, or -1 if the id",
"True if conn.commit() should be called \"\"\" try: md =",
"list of items to commit in addition to .pending \"\"\"",
"% ('incoming' if incoming else 'outgoing')) result = [] for",
"hash :param name: name of the object or container being",
"is None: raise Exception('name is None!') new = hashlib.md5(('%s-%s' %",
"self.conn self.conn = None try: yield conn conn.rollback() self.conn =",
"self._preallocate() if not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return with open(self.pending_file,",
"50): yield pm * MB while True: pm += 50",
"storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close() with open(tmp_db_file, 'r+b') as fp:",
"raise HTTPBadRequest('Total metadata too large; max %d' % MAX_META_OVERALL_SIZE) def",
"statement; locks a database.\"\"\" if not self.conn: if self.db_file !=",
"object rows to delete :param sync_timestamp: max update_at timestamp of",
"'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_update AFTER",
"db. If it gets within 512k of a boundary, it",
"self.pending_timeout): self._commit_puts() with self.get() as conn: conn.execute(''' DELETE FROM %s",
"CREATE TABLE incoming_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at",
"dict_factory return [r for r in curs] def get_sync(self, id,",
"def merge_timestamps(self, created_at, put_timestamp, delete_timestamp): \"\"\" Used in replication to",
"storage_policy_index=None): \"\"\" Create the DB The storage_policy_index is passed through",
"there was a \"condition\" where different parts # of the",
"= stat.st_size allocated_size = stat.st_blocks * 512 for point in",
"PENDING_CAP: self._commit_puts([record]) else: #将对象记录写入数据库文件中 with open(self.pending_file, 'a+b') as fp: #",
"\"License\"); # you may not use this file except in",
"class GreenDBCursor(sqlite3.Cursor): \"\"\"SQLite Cursor handler that plays well with eventlet.\"\"\"",
"old_status = self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?,",
"incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID;",
"put timestamp :param delete_timestamp: delete timestamp \"\"\" with self.get() as",
"25 #: Pickle protocol to use PICKLE_PROTOCOL = 2 #:",
"metadata_falls within acceptable limits. :param metadata: to be validated :raises:",
"the contents. (check + XOR) :param old: hex representation of",
"del metadata[k] metadata[k.encode('utf-8')] = sv def _db_timeout(timeout, db_file, call): with",
"= False #: Timeout for trying to connect to a",
"at the end. with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous =",
"by a particular broker to be compatible with its :func:`merge_items`.",
"from swift.common.swob import HTTPBadRequest #: Whether calls will be made",
"fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None): \"\"\" Scan for",
"in str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def _reclaim(self, conn,",
"if not put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit()",
"when you need a real dict, i.e. when you're going",
"-1 conn.execute(''' INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id) VALUES",
"metadata[k] del metadata[k] metadata[k.encode('utf-8')] = sv def _db_timeout(timeout, db_file, call):",
"a database.\"\"\" self.conn = None self.db_file = db_file self.pending_file =",
"os.path.exists(self.pending_file): return if item_list is None: item_list = [] self._preallocate()",
"delete :param sync_timestamp: max update_at timestamp of sync rows to",
"self.update_metadata(cleared_meta) # then mark the db as deleted with self.get()",
"renamer, \\ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout from",
"= timeout self.msg = msg def __str__(self): return 'DB connection",
"Errors.\"\"\" def __init__(self, path): self.path = path def __str__(self): return",
"(self.db_dir, quar_path, exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def get(self): \"\"\"Use",
"metadata[k.encode('utf-8')] = sv def _db_timeout(timeout, db_file, call): with LockTimeout(timeout, db_file):",
"didn't have updated_at in the _sync tables. if 'no such",
"rollback locked connection')) conn.close() def newid(self, remote_id): \"\"\" Re-id the",
"r in curs] def get_sync(self, id, incoming=True): \"\"\" Gets the",
"Colons aren't used in base64 encoding; so they are our",
"timeout=30, okay_to_create=False): \"\"\" Returns a properly configured SQLite database connection.",
"else 'outgoing')) result = [] for row in curs: result.append({'remote_id':",
"limit transactions and commits from other related functions. :param conn:",
"call commit on the conn, but will instead return True",
"return True if the database needs committing. This function was",
"self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute(''' SELECT *",
"from tempfile import mkstemp from eventlet import sleep, Timeout import",
"is < sync_timestamp. In addition, this calls the DatabaseBroker's :func:`_reclaim`",
"* FROM %s WHERE ROWID > ? ORDER BY ROWID",
"worker to limit transactions and commits from other related functions.",
"err.errno != errno.ENOENT: raise def _commit_puts_stale_ok(self): \"\"\" Catch failures of",
"raise def _commit_puts_stale_ok(self): \"\"\" Catch failures of _commit_puts() if broker"
] |
[
"limit for mass props. MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN,",
"value, e.g. \"3\", \"3.5\"', default='0', ) #: Prop limit for",
"is not None def generate_enum_regex(self) -> str: \"\"\"Generate regex from",
"by unit pattern. Args: quantity_pattern (str): Pattern to match the",
"limit for any props requiring a positive integer such as",
"Pattern to match all accepted mass units, or empty string.",
"for that # quantity type as XDL app uses this",
"g\"', default='0 g' ) #: Prop limit for mol props.",
"+ r'))$', hint='Expecting number followed by standard volume units, e.g.",
"e.g. \"50 mbar\", \"1 atm\".', default='1013.25 mbar' ) #: Prop",
"number followed by standard time units, e.g. \"15 mins\", \"3",
"accepted time units, or empty string. TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #:",
"regex for validating the input to a given prop. For",
"type is given and prop type is ``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit",
"enum=[ 'neutralize', 'precipitate', 'dissolve', 'basify', 'acidify', 'dilute', ] ) #:",
"= self.generate_enum_hint() else: self.hint = hint # Otherwise just set",
"PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM value, e.g. \"400",
"float value, e.g. \"3\", \"3.5\"', default='0', ) #: Prop limit",
"to match all accepted volumes units case insensitvely, or empty",
"followed by standard pressure units, e.g. \"50 mbar\", \"1 atm\".',",
"limit for volume props. VOLUME_PROP_LIMIT: PropLimit = PropLimit( regex=r'^(all|(' +",
"#: e.g. '0', '-1', '1', '-10.3', '10.3', '0.0' would all",
"mins\", \"3 hrs\".', default='0 secs' ) #: Prop limit for",
"limit is used to check that input supplied is valid",
"#: Prop limit for ``Separate`` ``purpose`` prop. One of 'extract'",
"rotation speed units, or empty string. ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?' #:",
"Useful hint for what valid value should look like, e.g.",
"appropriate units are used or a value is within a",
"enum: raise ValueError( 'Either `regex` or `enum` argument must be",
"hint='Expecting RPM value, e.g. \"400 RPM\".', default='400 RPM', ) #:",
"units of the quantity involved, e.g. for volume, '0 mL'.",
"``enum``. Arguments: regex (str): Regex pattern that should match with",
"MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern to match all accepted",
"'activating-agent' ] ) #: Prop limit for ``Component`` ``component_type`` prop.",
"SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom']) #: Prop limit for ``Add``",
"or \"solvent\".', default='True' ) #: Prop limit for ``Separate`` ``purpose``",
"prop. 'dissolve' is only option. STIR_PURPOSE_PROP_LIMIT = PropLimit( enum=['dissolve'] )",
"'acid', 'base' or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT = PropLimit( enum=[ 'solvent', 'reagent',",
"type is ``int``. POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit( r'[0-9]+', hint='Expecting positive",
"#: prop type is given and prop type is ``float``.",
"f'\"{item}\", ' s = s[:-2] + f' or \"{self.enum[-1]}\".' return",
"#: 'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT = PropLimit(",
"case as the #: value can be ``True``, ``False`` or",
"#: Prop limit for rotation speed props. ROTATION_SPEED_PROP_LIMIT: PropLimit =",
"default='false', ) #: Prop limit for ``WashSolid`` ``stir`` prop. This",
"for ``Component`` ``component_type`` prop. One of 'reactor', #: 'filter', 'separator',",
"float. Used if no explicit #: prop type is given",
"'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit( enum=['control-exotherm', 'reaction', 'unstable-reagent'] ) #: Prop",
"default='0 g' ) #: Prop limit for mol props. MOL_PROP_LIMIT:",
"from 0-100 representing a percentage, e.g. \"50\", \"8.5\".', default='0', )",
"#: Pattern matching float between 0 and 100. Used for",
"to match all accepted pressure units, or empty string. PRESSURE_UNITS_PATTERN",
"\"\"\" return PropLimit( regex=r'^((' + quantity_pattern + r'[ ]?'\\ +",
"recommended, at least when using ``regex`` not ``enum``. Arguments: regex",
"that should match with valid values and not match with",
"accepted volumes units case insensitvely, or empty string. VOLUME_UNITS_PATTERN: str",
"for any props requiring a boolean value. Used if no",
"prop. One of 'top' or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top',",
"for that property. \"\"\" import re from typing import List,",
"+ r'|' regex = regex[:-1] + r')' return regex def",
"= s[:-2] + f' or \"{self.enum[-1]}\".' return s ################## #",
"all accepted temperature units, or empty string. TEMP_UNITS_PATTERN: str =",
"are used. hint (str): Hint for the prop limit to",
"volume units, e.g. \"5.5 mL\"', default='0 mL', ) #: Prop",
"'0.0' would all be matched by this #: pattern. FLOAT_PATTERN:",
"#: Pattern to match all accepted mass units, or empty",
"the prop limit to tell the user what correct input",
"PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting number followed by mol",
"'10.3', '0.0' would all be matched by this pattern, but",
"prop type is given and prop type is ``float``. POSITIVE_FLOAT_PROP_LIMIT:",
"One of 'top' or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom'])",
"a value is within a certain range. Either ``regex`` or",
"items in :py:attr:`enum`. Returns: str: Regex that will match any",
"PropLimit( enum=[ 'neutralize', 'precipitate', 'dissolve', 'basify', 'acidify', 'dilute', ] )",
"-> str: \"\"\"Generate hint from :py:attr:`enum`. Hint will list all",
"Arguments: regex (str): Regex pattern that should match with valid",
"# Regex patterns # ################## #: Pattern to match a",
"the items in :py:attr:`enum`. Returns: str: Regex that will match",
"This is a special case as the #: value can",
"to tell the user what correct input should look like",
"r'([0-9]+(?:[.][0-9]+)?)' #: Pattern to match boolean strings, specifically matching 'true'",
"units expected or empty string. Empty string is matched as",
"to match all accepted mass units, or empty string. MASS_UNITS_PATTERN:",
"any float between 10.000 and 99.999. _ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)'",
"= r'(' for item in self.enum: regex += item +",
"units, or empty string. TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern to",
"' for item in self.enum[:-1]: s += f'\"{item}\", ' s",
"def validate(self, value: str) -> bool: \"\"\"Validate given value against",
"= r'(100(?:[.][0]+)?)' #: Pattern matching any float between 10.000 and",
"one of ' for item in self.enum[:-1]: s += f'\"{item}\",",
"'wash']) #: Prop limit for ``Separate`` ``product_phase`` prop. One of",
"all accepted mass units, or empty string. MASS_UNITS_PATTERN: str =",
"``enum`` must be given when instantiating. If ``enum`` is given",
"valid value should look like, e.g. \"Volume should be a",
"RPM', ) #: Prop limit for wavelength props. WAVELENGTH_PROP_LIMIT: PropLimit",
"= generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting number followed by mol or",
"will override whatever is given for ``regex`` and ``hint``. ``hint``",
"POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting number followed by standard pressure units, e.g.",
"regex = regex[:-1] + r')' return regex def generate_enum_hint(self) ->",
"#: Pattern to match all accepted rotation speed units, or",
"a positive float. Used if no explicit #: prop type",
"regex from :py:attr:`enum`. Regex will match any of the items",
"self.enum: regex += item + r'|' regex = regex[:-1] +",
"Prop limit for ``HeatChill`` ``purpose`` prop. One of 'control-exotherm', #:",
"= PropLimit(enum=['extract', 'wash']) #: Prop limit for ``Separate`` ``product_phase`` prop.",
"speed units, or empty string. ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?' #: Pattern",
"Empty string is matched as not including units is allowed",
"PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting wavelength, e.g. \"400 nm\".',",
"unit for that # quantity type as XDL app uses",
"is within a certain range. Either ``regex`` or ``enum`` must",
"A prop limit is essentially a regex for validating the",
"Value to validate against prop limit. Returns: bool: True if",
"units for the prop involved. \"\"\" return PropLimit( regex=r'^((' +",
"not ``enum``. Arguments: regex (str): Regex pattern that should match",
"pressure props. PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting number",
"\"400 RPM\".', default='400 RPM', ) #: Prop limit for wavelength",
"add in default units. #: Prop limit for volume props.",
"list of allowed values. \"\"\" def __init__( self, regex: Optional[str]",
"be ``True``, ``False`` or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit( r'('",
"number followed by standard temperature units, e.g. \"25\", \"25°C\", \"298",
"mass units, e.g. \"2.3 g\"', default='0 g' ) #: Prop",
"and 99.999. _ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern matching any",
"if the value matches the prop limit, otherwise False. \"\"\"",
"(str): Pattern to match the units expected or empty string.",
"from this self.enum = enum if enum: if not regex:",
"#: Prop limit for temp props. TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(",
"hint (str): Useful hint for what valid value should look",
"List, Optional class PropLimit(object): \"\"\"Convenience class for storing prop limit.",
"PropLimit( regex=r'^((' + quantity_pattern + r'[ ]?'\\ + units_pattern +",
"item in self.enum: regex += item + r'|' regex =",
"'-1', '1', '-10.3', '10.3', '0.0' would all be matched by",
"for mass props. MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting",
"'bottom']) #: Prop limit for ``Add`` ``purpose`` prop. One of",
"List of values that the prop can take. This is",
"between 0 and 100. Used for percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit =",
"= generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting number in degrees celsius or",
"are both optional, but recommended, at least when using ``regex``",
"any props requiring a positive integer such as ``repeats``. #:",
"string. TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern to match all accepted",
"empty string. PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern to match all",
"= 'Expecting one of ' for item in self.enum[:-1]: s",
"by standard time units, e.g. \"15 mins\", \"3 hrs\".', default='0",
"for the prop limit, should use standard units for the",
"+ r'$)|(^' + quantity_pattern + r'))$', hint=hint, default=default ) #",
") #: Prop limit for ``HeatChill`` ``purpose`` prop. One of",
"#: Prop limit for ``Stir`` ``purpose`` prop. 'dissolve' is only",
"'control-exotherm', #: 'reaction' or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit( enum=['control-exotherm', 'reaction',",
"all be matched by this #: pattern. FLOAT_PATTERN: str =",
"Prop limit for mol props. MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN,",
"POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting number followed by standard time units, e.g.",
"ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?' #: Pattern to match all accepted length",
"#: Prop limit for mol props. MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(",
"self.regex = self.generate_enum_regex() else: self.regex = regex if not hint:",
"props. MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting number followed",
"'dilute'. ADD_PURPOSE_PROP_LIMIT = PropLimit( enum=[ 'neutralize', 'precipitate', 'dissolve', 'basify', 'acidify',",
"e.g. for volume, '0 mL'. enum (List[str]): List of values",
"prop. One of 'reactor', #: 'filter', 'separator', 'rotavap' or 'flask'.",
"Prop limit for ``Stir`` ``purpose`` prop. 'dissolve' is only option.",
"will list all items in :py:attr:`enum`. Returns: str: Hint listing",
"the standard unit for that # quantity type as XDL",
"standard units for the prop involved. \"\"\" return PropLimit( regex=r'^(('",
"string is matched as not including units is allowed as",
"(List[str]): List of values that the prop can take. This",
"ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM value, e.g. \"400 RPM\".', default='400 RPM', )",
"checking appropriate units are used or a value is within",
"_zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)' #: Pattern matching float between 0",
"value (str): Value to validate against prop limit. Returns: bool:",
"list. \"\"\" regex = r'(' for item in self.enum: regex",
"hint='Expecting wavelength, e.g. \"400 nm\".', default='400 nm' ) #: Prop",
"or number followed by standard temperature units, e.g. \"25\", \"25°C\",",
"regex from the list of allowed values. \"\"\" def __init__(",
"for validating the input to a given prop. For example,",
"given and prop type is ``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit(",
"Pattern to match all accepted pressure units, or empty string.",
"= '', default: Optional[str] = '' ) -> PropLimit: \"\"\"",
"mbar\", \"1 atm\".', default='1013.25 mbar' ) #: Prop limit for",
"MASS_UNITS_PATTERN, hint='Expecting number followed by standard mass units, e.g. \"2.3",
"PropLimit = PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive float value, e.g. \"3\",",
"string. Empty string is matched as not including units is",
"'-1'. POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)' #: Pattern to match boolean",
"expected or empty string. Empty string is matched as not",
"= PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive float value, e.g. \"3\", \"3.5\"',",
"what valid value should look like, e.g. \"Volume should be",
"MOL_UNITS_PATTERN, hint='Expecting number followed by mol or mmol, e.g. \"2.3",
"PropLimit( r'^(' + _hundred_float + '|'\\ + _ten_to_ninety_nine_float + '|'",
"match the number expected. This will typically be ``POSITIVE_FLOAT_PATTERN`` or",
"\"25\", \"25°C\", \"298 K\".', default='25°C', ) #: Prop limit for",
"'basify', 'acidify' or 'dilute'. ADD_PURPOSE_PROP_LIMIT = PropLimit( enum=[ 'neutralize', 'precipitate',",
"PropLimit( r'(' + BOOL_PATTERN + r'|solvent)', enum=['true', 'solvent', 'false'], hint='Expecting",
"[], ): if not regex and not enum: raise ValueError(",
"float between 0 and 9.999. _zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)' #:",
"hint='Expecting number followed by mol or mmol, e.g. \"2.3 mol\".',",
"valid for that property. \"\"\" import re from typing import",
"``enum`` is given it will override whatever is given for",
"number followed by volume units, e.g. '5 mL'.\" default (str):",
"explicit prop #: type is given and prop type is",
"by this pattern, but not #: '-10.3' or '-1'. POSITIVE_FLOAT_PATTERN:",
"hint='Expecting one of \"false\" or \"true\".', default='false', ) #: Prop",
"# ############### def generate_quantity_units_pattern( quantity_pattern: str, units_pattern: str, hint: Optional[str]",
"in this case standard units are used. hint (str): Hint",
"self.hint = hint def validate(self, value: str) -> bool: \"\"\"Validate",
"to match all accepted rotation speed units, or empty string.",
"the :py:attr:`enum` list. \"\"\" regex = r'(' for item in",
"PropLimit = PropLimit(enum=['top', 'bottom']) #: Prop limit for ``Add`` ``purpose``",
"value is within a certain range. Either ``regex`` or ``enum``",
"'substrate', 'acid', 'base', 'activating-agent' ] ) #: Prop limit for",
"BOOL_PATTERN: str = r'(false|False|true|True)' #: Pattern to match all accepted",
"'neutralize', 'precipitate', #: 'dissolve', 'basify', 'acidify' or 'dilute'. ADD_PURPOSE_PROP_LIMIT =",
"property is given and prop type is ``int``. POSITIVE_INT_PROP_LIMIT: PropLimit",
"r'(rpm|RPM)?' #: Pattern to match all accepted length units, or",
"= '', enum: Optional[List[str]] = [], ): if not regex",
"r'[ ]?'\\ + units_pattern + r'$)|(^' + quantity_pattern + r'))$',",
"``product_phase`` prop. One of 'top' or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit =",
"g' ) #: Prop limit for mol props. MOL_PROP_LIMIT: PropLimit",
"): if not regex and not enum: raise ValueError( 'Either",
"limit to tell the user what correct input should look",
"generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting number followed by standard time units,",
"str = r'([0-9](?:[.][0-9]+)?)' #: Pattern matching float between 0 and",
"self.regex = regex if not hint: self.hint = self.generate_enum_hint() else:",
"VOLUME_PROP_LIMIT: PropLimit = PropLimit( regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\\",
"K\".', default='25°C', ) #: Prop limit for time props. TIME_PROP_LIMIT:",
"Pattern matching float between 0 and 100. Used for percentages.",
"``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit( r'(' + BOOL_PATTERN + r'|solvent)',",
"followed by standard time units, e.g. \"15 mins\", \"3 hrs\".',",
"r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern matching any float between 0 and 9.999.",
"r'(' for item in self.enum: regex += item + r'|'",
"\"false\" or \"true\".', default='false', ) #: Prop limit for ``WashSolid``",
"matching float between 0 and 100. Used for percentages. PERCENT_RANGE_PROP_LIMIT:",
"e.g. \"25\", \"25°C\", \"298 K\".', default='25°C', ) #: Prop limit",
"' s = s[:-2] + f' or \"{self.enum[-1]}\".' return s",
"= PropLimit( enum=['reactor', 'filter', 'separator', 'rotavap', 'flask'] ) #: Pattern",
") -> PropLimit: \"\"\" Convenience function to generate PropLimit object",
"+ _ten_to_ninety_nine_float + '|' + _zero_to_ten_float + ')$', hint='Expecting number",
"match all accepted pressure units, or empty string. PRESSURE_UNITS_PATTERN =",
"PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern to match all accepted rotation",
"] ) #: Prop limit for ``HeatChill`` ``purpose`` prop. One",
"value, e.g. \"3\"', default='1', ) #: Prop limit for any",
"e.g. '100', '100.0', '100.000' would #: all be matched. _hundred_float:",
"= r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern to match all accepted mass units,",
"r'(°C|K|F)?' #: Pattern to match all accepted time units, or",
"volume, '0 mL'. enum (List[str]): List of values that the",
"all be matched. _hundred_float: str = r'(100(?:[.][0]+)?)' #: Pattern matching",
"= r'([0-9](?:[.][0-9]+)?)' #: Pattern matching float between 0 and 100.",
"= r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern matching any float between 0 and",
"f' or \"{self.enum[-1]}\".' return s ################## # Regex patterns #",
") #: Prop limit for pressure props. PRESSURE_PROP_LIMIT: PropLimit =",
"requiring a boolean value. Used if no explicit prop #:",
"case standard units are used. hint (str): Hint for the",
"for ``regex`` and ``hint``. ``hint`` and ``default`` are both optional,",
"RPM value, e.g. \"400 RPM\".', default='400 RPM', ) #: Prop",
"listing all items in :py:attr:`enum`. \"\"\" s = 'Expecting one",
"e.g. \"400 nm\".', default='400 nm' ) #: Prop limit for",
"``purpose`` prop. One of 'neutralize', 'precipitate', #: 'dissolve', 'basify', 'acidify'",
"and not match with invalid values. hint (str): Useful hint",
"``component_type`` prop. One of 'reactor', #: 'filter', 'separator', 'rotavap' or",
"'filter', 'separator', 'rotavap' or 'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit( enum=['reactor',",
"= PropLimit( r'[0-9]+', hint='Expecting positive integer value, e.g. \"3\"', default='1',",
"matched as not including units is allowed as in this",
"_zero_to_ten_float + ')$', hint='Expecting number from 0-100 representing a percentage,",
"#: Pattern to match all accepted length units, or empty",
"Args: value (str): Value to validate against prop limit. Returns:",
"default # If enum given generate regex from this self.enum",
"STIR_PURPOSE_PROP_LIMIT = PropLimit( enum=['dissolve'] ) #: Prop limit for ``Reagent``",
"Optional[str] = None, hint: Optional[str] = '', default: Optional[str] =",
"elements. For example, a volume property should be a positive",
"is important here that defaults use the standard unit for",
"else: self.regex = regex self.hint = hint def validate(self, value:",
"given prop. For example, checking appropriate units are used or",
"all accepted pressure units, or empty string. PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?'",
"for any props requiring a positive integer such as ``repeats``.",
"if no explicit property is given and prop type is",
"enum=['control-exotherm', 'reaction', 'unstable-reagent'] ) #: Prop limit for ``Stir`` ``purpose``",
"``HeatChill`` ``purpose`` prop. One of 'control-exotherm', #: 'reaction' or 'unstable-reagent'.",
"hint=hint, default=default ) # NOTE: It is important here that",
"hint for what valid value should look like, e.g. \"Volume",
"a special case as the #: value can be ``True``,",
"e.g. '0', 1', '10.3', '0.0' would all be matched by",
"+ r'[ ]?'\\ + VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN +",
"\"298 K\".', default='25°C', ) #: Prop limit for time props.",
"NOTE: It is important here that defaults use the standard",
"default (str): Default valid value. Should use standard units of",
"value 100, e.g. '100', '100.0', '100.000' would #: all be",
"a certain range. Either ``regex`` or ``enum`` must be given",
"or mmol, e.g. \"2.3 mol\".', default='0 mol', ) #: Prop",
"case insensitvely. BOOL_PATTERN: str = r'(false|False|true|True)' #: Pattern to match",
"#: Pattern to match all accepted pressure units, or empty",
"use standard units of the quantity involved, e.g. for volume,",
"mass units, or empty string. MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #:",
"number followed by standard volume units, e.g. \"5.5 mL\"', default='0",
"prop limit is used to check that input supplied is",
"is given it will override whatever is given for ``regex``",
"given and prop type is ``bool``. BOOL_PROP_LIMIT: PropLimit = PropLimit(",
"limits # ############### def generate_quantity_units_pattern( quantity_pattern: str, units_pattern: str, hint:",
"Hint will list all items in :py:attr:`enum`. Returns: str: Hint",
"is matched as not including units is allowed as in",
"Pattern to match a positive float, #: e.g. '0', 1',",
"automatically generate a regex from the list of allowed values.",
"= [], ): if not regex and not enum: raise",
"optional, but recommended, at least when using ``regex`` not ``enum``.",
"items in :py:attr:`enum`. \"\"\" s = 'Expecting one of '",
"str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern to match all accepted mass",
"prop. For example, checking appropriate units are used or a",
"# Otherwise just set regex as attribute else: self.regex =",
"e.g. '5 mL'.\" default (str): Default valid value. Should use",
"r')' return regex def generate_enum_hint(self) -> str: \"\"\"Generate hint from",
"1', '10.3', '0.0' would all be matched by this pattern,",
"limit for wavelength props. WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN,",
"'solvent', 'false'], hint='Expecting one of \"true\", \"false\" or \"solvent\".', default='True'",
"or empty string. PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern to match",
"enum=[ 'solvent', 'reagent', 'catalyst', 'substrate', 'acid', 'base', 'activating-agent' ] )",
"r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern to match all accepted pressure units, or",
"for ``Add`` ``purpose`` prop. One of 'neutralize', 'precipitate', #: 'dissolve',",
"default='1', ) #: Prop limit for any props requiring a",
"str = r'(°C|K|F)?' #: Pattern to match all accepted time",
"not None def generate_enum_regex(self) -> str: \"\"\"Generate regex from :py:attr:`enum`.",
"bool: \"\"\"Validate given value against prop limit regex. Args: value",
"here that defaults use the standard unit for that #",
"'Expecting one of ' for item in self.enum[:-1]: s +=",
"e.g. \"3\"', default='1', ) #: Prop limit for any props",
"that will match any of the strings in the :py:attr:`enum`",
"-> str: \"\"\"Generate regex from :py:attr:`enum`. Regex will match any",
"TIME_UNITS_PATTERN, hint='Expecting number followed by standard time units, e.g. \"15",
"#: Prop limit for wavelength props. WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(",
"rotation speed props. ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting",
"] ) #: Prop limit for ``Component`` ``component_type`` prop. One",
"= r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern to match all accepted rotation speed",
"different quantity types, i.e. for variations on the number followed",
"'filter', 'separator', 'rotavap', 'flask'] ) #: Pattern matching a float",
"props requiring a boolean value. Used if no explicit prop",
"\"\"\" def __init__( self, regex: Optional[str] = None, hint: Optional[str]",
"to match all accepted mol units, or empty string. MOL_UNITS_PATTERN",
"str, units_pattern: str, hint: Optional[str] = '', default: Optional[str] =",
"enum: Optional[List[str]] = [], ): if not regex and not",
"units, e.g. \"25\", \"25°C\", \"298 K\".', default='25°C', ) #: Prop",
"PropLimit( BOOL_PATTERN, hint='Expecting one of \"false\" or \"true\".', default='false', )",
"class for storing prop limit. A prop limit is essentially",
"limit for ``WashSolid`` ``stir`` prop. This is a special case",
"+ '|' + _zero_to_ten_float + ')$', hint='Expecting number from 0-100",
"is given and prop type is ``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit =",
"number followed by mol or mmol, e.g. \"2.3 mol\".', default='0",
"generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting number followed by standard mass units,",
"a volume property should be a positive number, optionally followed",
"essentially a regex for validating the input to a given",
"for mol props. MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting",
"Returns: str: Hint listing all items in :py:attr:`enum`. \"\"\" s",
"mol props. MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting number",
"\"\"\" import re from typing import List, Optional class PropLimit(object):",
"any props requiring a positive float. Used if no explicit",
"# quantity type as XDL app uses this to add",
"#: Pattern to match a positive or negative float, #:",
"or \"true\".', default='false', ) #: Prop limit for ``WashSolid`` ``stir``",
") #: Prop limit for ``Separate`` ``purpose`` prop. One of",
"for temp props. TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting",
"and 9.999. _zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)' #: Pattern matching float",
"regex += item + r'|' regex = regex[:-1] + r')'",
") #: Prop limit for ``Component`` ``component_type`` prop. One of",
"default='0 secs' ) #: Prop limit for pressure props. PRESSURE_PROP_LIMIT:",
"from :py:attr:`enum`. Regex will match any of the items in",
"\"\"\" Convenience function to generate PropLimit object for different quantity",
"'0.0' would all be matched by this pattern, but not",
"= r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern to match all accepted temperature units,",
"integer value, e.g. \"3\"', default='1', ) #: Prop limit for",
"`enum` argument must be given.') self.default = default # If",
"r'(' + BOOL_PATTERN + r'|solvent)', enum=['true', 'solvent', 'false'], hint='Expecting one",
"\"3.5\"', default='0', ) #: Prop limit for any props requiring",
"units, e.g. \"50 mbar\", \"1 atm\".', default='1013.25 mbar' ) #:",
"by standard pressure units, e.g. \"50 mbar\", \"1 atm\".', default='1013.25",
"hint def validate(self, value: str) -> bool: \"\"\"Validate given value",
"of allowed values. \"\"\" def __init__( self, regex: Optional[str] =",
"Hint for the prop limit to tell the user what",
"units, e.g. '5 mL'.\" default (str): Default valid value. Should",
"limit for ``Component`` ``component_type`` prop. One of 'reactor', #: 'filter',",
"number from 0-100 representing a percentage, e.g. \"50\", \"8.5\".', default='0',",
"it will override whatever is given for ``regex`` and ``hint``.",
"Prop limit for rotation speed props. ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(",
"attribute else: self.regex = regex self.hint = hint def validate(self,",
"accepted mol units, or empty string. MOL_UNITS_PATTERN = r'(mmol|mol)?' ###############",
"range. Either ``regex`` or ``enum`` must be given when instantiating.",
"#: Pattern matching any float between 0 and 9.999. _zero_to_ten_float:",
"for the prop involved. \"\"\" return PropLimit( regex=r'^((' + quantity_pattern",
"and prop type is ``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit( regex=POSITIVE_FLOAT_PATTERN,",
"+ quantity_pattern + r'[ ]?'\\ + units_pattern + r'$)|(^' +",
"\"\"\" return re.match(self.regex, value) is not None def generate_enum_regex(self) ->",
"PropLimit: \"\"\" Convenience function to generate PropLimit object for different",
"Prop limit for ``Separate`` ``product_phase`` prop. One of 'top' or",
"time units, e.g. \"15 mins\", \"3 hrs\".', default='0 secs' )",
"For example, a volume property should be a positive number,",
"prop. One of 'control-exotherm', #: 'reaction' or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT =",
"r'([0-9](?:[.][0-9]+)?)' #: Pattern matching float between 0 and 100. Used",
"against prop limit regex. Args: value (str): Value to validate",
"otherwise False. \"\"\" return re.match(self.regex, value) is not None def",
"generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM value, e.g. \"400 RPM\".', default='400",
"not including units is allowed as in this case standard",
"return PropLimit( regex=r'^((' + quantity_pattern + r'[ ]?'\\ + units_pattern",
"One of 'neutralize', 'precipitate', #: 'dissolve', 'basify', 'acidify' or 'dilute'.",
"'0', 1', '10.3', '0.0' would all be matched by this",
"Optional[str] = '', default: Optional[str] = '', enum: Optional[List[str]] =",
"#: Pattern matching a float of value 100, e.g. '100',",
"default='0', ) #: Prop limit for any props requiring a",
"when using ``regex`` not ``enum``. Arguments: regex (str): Regex pattern",
"value can be ``True``, ``False`` or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit =",
"################## #: Pattern to match a positive or negative float,",
"percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit( r'^(' + _hundred_float + '|'\\",
"'activating-agent'. REAGENT_ROLE_PROP_LIMIT = PropLimit( enum=[ 'solvent', 'reagent', 'catalyst', 'substrate', 'acid',",
"``regex`` or ``enum`` must be given when instantiating. If ``enum``",
"'acidify' or 'dilute'. ADD_PURPOSE_PROP_LIMIT = PropLimit( enum=[ 'neutralize', 'precipitate', 'dissolve',",
"#: Prop limit for ``HeatChill`` ``purpose`` prop. One of 'control-exotherm',",
"enum if enum: if not regex: self.regex = self.generate_enum_regex() else:",
"typing import List, Optional class PropLimit(object): \"\"\"Convenience class for storing",
"the prop can take. This is used to automatically generate",
"def __init__( self, regex: Optional[str] = None, hint: Optional[str] =",
"item in self.enum[:-1]: s += f'\"{item}\", ' s = s[:-2]",
"wavelength, e.g. \"400 nm\".', default='400 nm' ) #: Prop limit",
"this pattern, but not #: '-10.3' or '-1'. POSITIVE_FLOAT_PATTERN: str",
"``Add`` ``purpose`` prop. One of 'neutralize', 'precipitate', #: 'dissolve', 'basify',",
"volume units, e.g. '5 mL'.\" default (str): Default valid value.",
"the prop involved. \"\"\" return PropLimit( regex=r'^((' + quantity_pattern +",
":py:attr:`enum`. Regex will match any of the items in :py:attr:`enum`.",
"property should be a positive number, optionally followed by volume",
"PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting number followed by",
"'rotavap' or 'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit( enum=['reactor', 'filter', 'separator',",
"to match all accepted length units, or empty string. DISTANCE_UNITS_PATTERN",
"\"3 hrs\".', default='0 secs' ) #: Prop limit for pressure",
"PropLimit = PropLimit( r'(' + BOOL_PATTERN + r'|solvent)', enum=['true', 'solvent',",
"VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern to match all accepted",
"value. Used if no explicit prop #: type is given",
"#: Pattern to match boolean strings, specifically matching 'true' and",
"default='1013.25 mbar' ) #: Prop limit for rotation speed props.",
"POSITIVE_FLOAT_PATTERN + r'[ ]?'\\ + VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN",
"accepted length units, or empty string. DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?' #:",
"from the list of allowed values. \"\"\" def __init__( self,",
"and prop type is ``int``. POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit( r'[0-9]+',",
"Prop limit for any props requiring a positive float. Used",
"#: Prop limit for any props requiring a boolean value.",
"'reaction' or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit( enum=['control-exotherm', 'reaction', 'unstable-reagent'] )",
"TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting number in degrees",
"regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\\ + VOLUME_UNITS_PATTERN + r')|('",
"PropLimit( r'[0-9]+', hint='Expecting positive integer value, e.g. \"3\"', default='1', )",
"'separator', 'rotavap' or 'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit( enum=['reactor', 'filter',",
"but recommended, at least when using ``regex`` not ``enum``. Arguments:",
"temperature units, e.g. \"25\", \"25°C\", \"298 K\".', default='25°C', ) #:",
"matched by this #: pattern. FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)' #:",
"value, e.g. \"400 RPM\".', default='400 RPM', ) #: Prop limit",
"self, regex: Optional[str] = None, hint: Optional[str] = '', default:",
"#: e.g. '0', 1', '10.3', '0.0' would all be matched",
"for wavelength props. WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting",
"number followed by standard pressure units, e.g. \"50 mbar\", \"1",
"check that input supplied is valid for that property. \"\"\"",
"'0', '-1', '1', '-10.3', '10.3', '0.0' would all be matched",
"bool: True if the value matches the prop limit, otherwise",
"matching a float of value 100, e.g. '100', '100.0', '100.000'",
"``hint`` and ``default`` are both optional, but recommended, at least",
"\"false\" or \"solvent\".', default='True' ) #: Prop limit for ``Separate``",
"'precipitate', 'dissolve', 'basify', 'acidify', 'dilute', ] ) #: Prop limit",
"limit. Returns: bool: True if the value matches the prop",
") #: Prop limit for ``WashSolid`` ``stir`` prop. This is",
"``Separate`` ``product_phase`` prop. One of 'top' or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit",
"(str): Pattern to match the number expected. This will typically",
"+ quantity_pattern + r'))$', hint=hint, default=default ) # NOTE: It",
"of 'neutralize', 'precipitate', #: 'dissolve', 'basify', 'acidify' or 'dilute'. ADD_PURPOSE_PROP_LIMIT",
"``regex`` and ``hint``. ``hint`` and ``default`` are both optional, but",
"'5 mL'.\" default (str): Default valid value. Should use standard",
"PropLimit = generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting number in degrees celsius",
"of ' for item in self.enum[:-1]: s += f'\"{item}\", '",
"#: Prop limit for ``Add`` ``purpose`` prop. One of 'neutralize',",
"are used to validate the input given to xdl elements.",
"will match any of the strings in the :py:attr:`enum` list.",
"= r'(nm|µm|mm|cm|m|km)?' #: Pattern to match all accepted mol units,",
"all accepted time units, or empty string. TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?'",
"str = r'(100(?:[.][0]+)?)' #: Pattern matching any float between 10.000",
"string. PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern to match all accepted",
"the case of an errror. default (str): Default value for",
"a positive number, optionally followed by volume units. The prop",
"str, hint: Optional[str] = '', default: Optional[str] = '' )",
"props. ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM value,",
"type is ``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive",
"standard pressure units, e.g. \"50 mbar\", \"1 atm\".', default='1013.25 mbar'",
"least when using ``regex`` not ``enum``. Arguments: regex (str): Regex",
"and ``hint``. ``hint`` and ``default`` are both optional, but recommended,",
"= hint # Otherwise just set regex as attribute else:",
"Default valid value. Should use standard units of the quantity",
"values. \"\"\" def __init__( self, regex: Optional[str] = None, hint:",
"+ '|'\\ + _ten_to_ninety_nine_float + '|' + _zero_to_ten_float + ')$',",
"or ``enum`` must be given when instantiating. If ``enum`` is",
"and prop type is ``bool``. BOOL_PROP_LIMIT: PropLimit = PropLimit( BOOL_PATTERN,",
"limit for ``Stir`` ``purpose`` prop. 'dissolve' is only option. STIR_PURPOSE_PROP_LIMIT",
"i.e. for variations on the number followed by unit pattern.",
"followed by volume units, e.g. '5 mL'.\" default (str): Default",
"def generate_enum_hint(self) -> str: \"\"\"Generate hint from :py:attr:`enum`. Hint will",
"prop type is ``int``. POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit( r'[0-9]+', hint='Expecting",
"'solvent', 'reagent', #: 'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT",
"MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting number followed by",
"to add in default units. #: Prop limit for volume",
"for item in self.enum: regex += item + r'|' regex",
"or empty string. MOL_UNITS_PATTERN = r'(mmol|mol)?' ############### # Prop limits",
"take. This is used to automatically generate a regex from",
"user what correct input should look like in the case",
"``stir`` prop. This is a special case as the #:",
"be given.') self.default = default # If enum given generate",
"\"{self.enum[-1]}\".' return s ################## # Regex patterns # ################## #:",
"= default # If enum given generate regex from this",
"for volume, '0 mL'. enum (List[str]): List of values that",
"not enum: raise ValueError( 'Either `regex` or `enum` argument must",
"units, e.g. \"15 mins\", \"3 hrs\".', default='0 secs' ) #:",
"insensitvely, or empty string. VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern",
"'10.3', '0.0' would all be matched by this #: pattern.",
"+ POSITIVE_FLOAT_PATTERN + r'))$', hint='Expecting number followed by standard volume",
"is allowed as in this case standard units are used.",
"quantity type as XDL app uses this to add in",
"\"true\".', default='false', ) #: Prop limit for ``WashSolid`` ``stir`` prop.",
"example, checking appropriate units are used or a value is",
"Pattern to match a positive or negative float, #: e.g.",
"regex: Optional[str] = None, hint: Optional[str] = '', default: Optional[str]",
"Pattern matching a float of value 100, e.g. '100', '100.0',",
"generate_enum_regex(self) -> str: \"\"\"Generate regex from :py:attr:`enum`. Regex will match",
"quantity_pattern + r'[ ]?'\\ + units_pattern + r'$)|(^' + quantity_pattern",
") #: Prop limit for wavelength props. WAVELENGTH_PROP_LIMIT: PropLimit =",
"a regex for validating the input to a given prop.",
"list all items in :py:attr:`enum`. Returns: str: Hint listing all",
"float of value 100, e.g. '100', '100.0', '100.000' would #:",
"limit for ``Reagent`` ``role`` prop. One of 'solvent', 'reagent', #:",
"hint='Expecting one of \"true\", \"false\" or \"solvent\".', default='True' ) #:",
"hint: self.hint = self.generate_enum_hint() else: self.hint = hint # Otherwise",
"PropLimit object for different quantity types, i.e. for variations on",
"None def generate_enum_regex(self) -> str: \"\"\"Generate regex from :py:attr:`enum`. Regex",
"is ``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive float",
"string. VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern to match all",
"``role`` prop. One of 'solvent', 'reagent', #: 'catalyst', 'substrate', 'acid',",
"'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT = PropLimit( enum=[",
"value for the prop limit, should use standard units for",
"for volume props. VOLUME_PROP_LIMIT: PropLimit = PropLimit( regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN",
"for ``Reagent`` ``role`` prop. One of 'solvent', 'reagent', #: 'catalyst',",
"r'(nm|µm|mm|cm|m|km)?' #: Pattern to match all accepted mol units, or",
"``Stir`` ``purpose`` prop. 'dissolve' is only option. STIR_PURPOSE_PROP_LIMIT = PropLimit(",
"limit for ``HeatChill`` ``purpose`` prop. One of 'control-exotherm', #: 'reaction'",
"\"5.5 mL\"', default='0 mL', ) #: Prop limit for mass",
"as the #: value can be ``True``, ``False`` or ``'solvent'``.",
"that the prop can take. This is used to automatically",
"or '-1'. POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)' #: Pattern to match",
"pattern. FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern to match a",
"FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern to match a positive",
"e.g. \"15 mins\", \"3 hrs\".', default='0 secs' ) #: Prop",
"pattern, but not #: '-10.3' or '-1'. POSITIVE_FLOAT_PATTERN: str =",
"errror. default (str): Default value for the prop limit, should",
"be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``. units_pattern (str): Pattern to match the",
"be matched by this pattern, but not #: '-10.3' or",
"or empty string. VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern to",
"= regex if not hint: self.hint = self.generate_enum_hint() else: self.hint",
"str) -> bool: \"\"\"Validate given value against prop limit regex.",
"defaults use the standard unit for that # quantity type",
"celsius or number followed by standard temperature units, e.g. \"25\",",
"of value 100, e.g. '100', '100.0', '100.000' would #: all",
"self.default = default # If enum given generate regex from",
"or empty string. TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern to match",
"= r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern to match all accepted pressure units,",
"Used for percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit( r'^(' + _hundred_float",
"str = r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern matching any float between 0",
"valid values and not match with invalid values. hint (str):",
"= r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern to match a positive float, #:",
"for ``Stir`` ``purpose`` prop. 'dissolve' is only option. STIR_PURPOSE_PROP_LIMIT =",
"this self.enum = enum if enum: if not regex: self.regex",
"followed by standard temperature units, e.g. \"25\", \"25°C\", \"298 K\".',",
"given.') self.default = default # If enum given generate regex",
"prop limit, should use standard units for the prop involved.",
"of an errror. default (str): Default value for the prop",
"by volume units. The prop limit is used to check",
"e.g. \"2.3 g\"', default='0 g' ) #: Prop limit for",
"\"Volume should be a number followed by volume units, e.g.",
"of 'reactor', #: 'filter', 'separator', 'rotavap' or 'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit",
") #: Prop limit for temp props. TEMP_PROP_LIMIT: PropLimit =",
"a boolean value. Used if no explicit prop #: type",
"within a certain range. Either ``regex`` or ``enum`` must be",
"_ten_to_ninety_nine_float + '|' + _zero_to_ten_float + ')$', hint='Expecting number from",
"\"2.3 mol\".', default='0 mol', ) #: Prop limit for temp",
"not #: '-10.3' or '-1'. POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)' #:",
"hint='Expecting number from 0-100 representing a percentage, e.g. \"50\", \"8.5\".',",
"POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM value, e.g. \"400 RPM\".', default='400 RPM',",
"#: Pattern to match a positive float, #: e.g. '0',",
"not match with invalid values. hint (str): Useful hint for",
"units, e.g. \"2.3 g\"', default='0 g' ) #: Prop limit",
"generate_quantity_units_pattern( quantity_pattern: str, units_pattern: str, hint: Optional[str] = '', default:",
"are used or a value is within a certain range.",
"\"\"\"Validate given value against prop limit regex. Args: value (str):",
"generate_enum_hint(self) -> str: \"\"\"Generate hint from :py:attr:`enum`. Hint will list",
"#: Prop limit for ``Separate`` ``product_phase`` prop. One of 'top'",
"'', default: Optional[str] = '' ) -> PropLimit: \"\"\" Convenience",
"This is used to automatically generate a regex from the",
"the list of allowed values. \"\"\" def __init__( self, regex:",
"Regex pattern that should match with valid values and not",
"True if the value matches the prop limit, otherwise False.",
"argument must be given.') self.default = default # If enum",
"regex: self.regex = self.generate_enum_regex() else: self.regex = regex if not",
"str = r'([0-9]+(?:[.][0-9]+)?)' #: Pattern to match boolean strings, specifically",
"in the case of an errror. default (str): Default value",
"= r'(false|False|true|True)' #: Pattern to match all accepted volumes units",
"regex self.hint = hint def validate(self, value: str) -> bool:",
"PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive float value, e.g. \"3\", \"3.5\"', default='0',",
"#: 'reaction' or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit( enum=['control-exotherm', 'reaction', 'unstable-reagent']",
"explicit #: prop type is given and prop type is",
"self.generate_enum_hint() else: self.hint = hint # Otherwise just set regex",
"wavelength props. WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting wavelength,",
"all accepted volumes units case insensitvely, or empty string. VOLUME_UNITS_PATTERN:",
"DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?' #: Pattern to match all accepted mol",
"= r'(mmol|mol)?' ############### # Prop limits # ############### def generate_quantity_units_pattern(",
"PropLimit(object): \"\"\"Convenience class for storing prop limit. A prop limit",
"quantity types, i.e. for variations on the number followed by",
"type is ``bool``. BOOL_PROP_LIMIT: PropLimit = PropLimit( BOOL_PATTERN, hint='Expecting one",
"``True``, ``False`` or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit( r'(' +",
"to match a positive or negative float, #: e.g. '0',",
"is essentially a regex for validating the input to a",
"Pattern to match all accepted time units, or empty string.",
"explicit property is given and prop type is ``int``. POSITIVE_INT_PROP_LIMIT:",
"the #: value can be ``True``, ``False`` or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT:",
"to match the number expected. This will typically be ``POSITIVE_FLOAT_PATTERN``",
"should match with valid values and not match with invalid",
"limit regex. Args: value (str): Value to validate against prop",
"default='25°C', ) #: Prop limit for time props. TIME_PROP_LIMIT: PropLimit",
"including units is allowed as in this case standard units",
"for any props requiring a positive float. Used if no",
"'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom']) #: Prop limit for",
"any of the items in :py:attr:`enum`. Returns: str: Regex that",
"standard temperature units, e.g. \"25\", \"25°C\", \"298 K\".', default='25°C', )",
"hint='Expecting number followed by standard time units, e.g. \"15 mins\",",
"when instantiating. If ``enum`` is given it will override whatever",
"an errror. default (str): Default value for the prop limit,",
"the input given to xdl elements. For example, a volume",
"REAGENT_ROLE_PROP_LIMIT = PropLimit( enum=[ 'solvent', 'reagent', 'catalyst', 'substrate', 'acid', 'base',",
"a positive integer such as ``repeats``. #: Used if no",
"of 'top' or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom']) #:",
"if not regex: self.regex = self.generate_enum_regex() else: self.regex = regex",
"look like, e.g. \"Volume should be a number followed by",
"= '' ) -> PropLimit: \"\"\" Convenience function to generate",
"or empty string. ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?' #: Pattern to match",
"PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting number followed by standard",
"or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit( r'(' + BOOL_PATTERN +",
"_ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern matching any float between",
"units, or empty string. PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern to",
"units, or empty string. MOL_UNITS_PATTERN = r'(mmol|mol)?' ############### # Prop",
"TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern to match all accepted pressure",
"for time props. TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting",
"invalid values. hint (str): Useful hint for what valid value",
"PropLimit = PropLimit( r'[0-9]+', hint='Expecting positive integer value, e.g. \"3\"',",
"for ``Separate`` ``purpose`` prop. One of 'extract' or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT:",
"accepted temperature units, or empty string. TEMP_UNITS_PATTERN: str = r'(°C|K|F)?'",
"generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting wavelength, e.g. \"400 nm\".', default='400 nm'",
"hint='Expecting number followed by standard volume units, e.g. \"5.5 mL\"',",
"re from typing import List, Optional class PropLimit(object): \"\"\"Convenience class",
"such as ``repeats``. #: Used if no explicit property is",
"hint='Expecting number followed by standard pressure units, e.g. \"50 mbar\",",
"to automatically generate a regex from the list of allowed",
"of values that the prop can take. This is used",
"One of 'control-exotherm', #: 'reaction' or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit(",
"allowed values. \"\"\" def __init__( self, regex: Optional[str] = None,",
"all be matched by this pattern, but not #: '-10.3'",
"'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash']) #: Prop limit for",
"number expected. This will typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``. units_pattern",
"both optional, but recommended, at least when using ``regex`` not",
"MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting number followed by",
"integer such as ``repeats``. #: Used if no explicit property",
"'reagent', #: 'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT =",
"#: all be matched. _hundred_float: str = r'(100(?:[.][0]+)?)' #: Pattern",
"``purpose`` prop. One of 'control-exotherm', #: 'reaction' or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT",
"pressure units, or empty string. PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern",
"regex = r'(' for item in self.enum: regex += item",
"+ r')' return regex def generate_enum_hint(self) -> str: \"\"\"Generate hint",
"or \"{self.enum[-1]}\".' return s ################## # Regex patterns # ##################",
"#: Used if no explicit property is given and prop",
"app uses this to add in default units. #: Prop",
"_hundred_float + '|'\\ + _ten_to_ninety_nine_float + '|' + _zero_to_ten_float +",
"units are used or a value is within a certain",
"match the units expected or empty string. Empty string is",
"by this #: pattern. FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern",
"import re from typing import List, Optional class PropLimit(object): \"\"\"Convenience",
"#: Prop limit for time props. TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(",
"BOOL_PATTERN, hint='Expecting one of \"false\" or \"true\".', default='false', ) #:",
"matching any float between 10.000 and 99.999. _ten_to_ninety_nine_float: str =",
"Should use standard units of the quantity involved, e.g. for",
"number followed by unit pattern. Args: quantity_pattern (str): Pattern to",
"'-10.3' or '-1'. POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)' #: Pattern to",
"regex[:-1] + r')' return regex def generate_enum_hint(self) -> str: \"\"\"Generate",
"degrees celsius or number followed by standard temperature units, e.g.",
"values that the prop can take. This is used to",
"mL\"', default='0 mL', ) #: Prop limit for mass props.",
"positive float, #: e.g. '0', 1', '10.3', '0.0' would all",
"Prop limit for any props requiring a boolean value. Used",
"only option. STIR_PURPOSE_PROP_LIMIT = PropLimit( enum=['dissolve'] ) #: Prop limit",
"Returns: bool: True if the value matches the prop limit,",
"hint (str): Hint for the prop limit to tell the",
"mol', ) #: Prop limit for temp props. TEMP_PROP_LIMIT: PropLimit",
"should look like, e.g. \"Volume should be a number followed",
"on the number followed by unit pattern. Args: quantity_pattern (str):",
"``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive float value,",
"all items in :py:attr:`enum`. Returns: str: Hint listing all items",
"empty string. ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?' #: Pattern to match all",
"or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT = PropLimit( enum=[ 'solvent', 'reagent', 'catalyst', 'substrate',",
"#: Prop limit for pressure props. PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(",
"this #: pattern. FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern to",
"Prop limit for temp props. TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( FLOAT_PATTERN,",
"``hint``. ``hint`` and ``default`` are both optional, but recommended, at",
"= self.generate_enum_regex() else: self.regex = regex if not hint: self.hint",
"using ``regex`` not ``enum``. Arguments: regex (str): Regex pattern that",
"match a positive float, #: e.g. '0', 1', '10.3', '0.0'",
"= PropLimit( enum=['dissolve'] ) #: Prop limit for ``Reagent`` ``role``",
"props. TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting number followed",
"regex=r'^((' + quantity_pattern + r'[ ]?'\\ + units_pattern + r'$)|(^'",
"pressure units, e.g. \"50 mbar\", \"1 atm\".', default='1013.25 mbar' )",
"props requiring a positive float. Used if no explicit #:",
"r'$)|(^' + quantity_pattern + r'))$', hint=hint, default=default ) # NOTE:",
"Prop limits # ############### def generate_quantity_units_pattern( quantity_pattern: str, units_pattern: str,",
"match all accepted mass units, or empty string. MASS_UNITS_PATTERN: str",
"Pattern to match boolean strings, specifically matching 'true' and 'false'",
"units_pattern (str): Pattern to match the units expected or empty",
"str: Hint listing all items in :py:attr:`enum`. \"\"\" s =",
"Used if no explicit prop #: type is given and",
"matches the prop limit, otherwise False. \"\"\" return re.match(self.regex, value)",
"= PropLimit( r'(' + BOOL_PATTERN + r'|solvent)', enum=['true', 'solvent', 'false'],",
"HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit( enum=['control-exotherm', 'reaction', 'unstable-reagent'] ) #: Prop limit",
"\"\"\" s = 'Expecting one of ' for item in",
"in the :py:attr:`enum` list. \"\"\" regex = r'(' for item",
"#: Prop limit for any props requiring a positive float.",
"volume units. The prop limit is used to check that",
"mL'.\" default (str): Default valid value. Should use standard units",
"'rotavap', 'flask'] ) #: Pattern matching a float of value",
"'Either `regex` or `enum` argument must be given.') self.default =",
"temp props. TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting number",
"number followed by standard mass units, e.g. \"2.3 g\"', default='0",
"'false'], hint='Expecting one of \"true\", \"false\" or \"solvent\".', default='True' )",
"is valid for that property. \"\"\" import re from typing",
"99.999. _ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern matching any float",
"matching 'true' and 'false' #: case insensitvely. BOOL_PATTERN: str =",
"no explicit prop #: type is given and prop type",
"else: self.hint = hint # Otherwise just set regex as",
"r'[0-9]+', hint='Expecting positive integer value, e.g. \"3\"', default='1', ) #:",
"by standard temperature units, e.g. \"25\", \"25°C\", \"298 K\".', default='25°C',",
"`regex` or `enum` argument must be given.') self.default = default",
"Regex patterns # ################## #: Pattern to match a positive",
"+ BOOL_PATTERN + r'|solvent)', enum=['true', 'solvent', 'false'], hint='Expecting one of",
"Otherwise just set regex as attribute else: self.regex = regex",
"'acidify', 'dilute', ] ) #: Prop limit for ``HeatChill`` ``purpose``",
"None, hint: Optional[str] = '', default: Optional[str] = '', enum:",
"that defaults use the standard unit for that # quantity",
"with invalid values. hint (str): Useful hint for what valid",
"values. hint (str): Useful hint for what valid value should",
"example, a volume property should be a positive number, optionally",
"quantity involved, e.g. for volume, '0 mL'. enum (List[str]): List",
"match all accepted temperature units, or empty string. TEMP_UNITS_PATTERN: str",
"for ``WashSolid`` ``stir`` prop. This is a special case as",
"override whatever is given for ``regex`` and ``hint``. ``hint`` and",
"\"\"\"Generate regex from :py:attr:`enum`. Regex will match any of the",
"+ f' or \"{self.enum[-1]}\".' return s ################## # Regex patterns",
"'0 mL'. enum (List[str]): List of values that the prop",
"for the prop limit to tell the user what correct",
"str: \"\"\"Generate regex from :py:attr:`enum`. Regex will match any of",
"or a value is within a certain range. Either ``regex``",
"function to generate PropLimit object for different quantity types, i.e.",
"#: Pattern matching any float between 10.000 and 99.999. _ten_to_ninety_nine_float:",
"whatever is given for ``regex`` and ``hint``. ``hint`` and ``default``",
"matching any float between 0 and 9.999. _zero_to_ten_float: str =",
"POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting wavelength, e.g. \"400 nm\".', default='400 nm' )",
"self.hint = hint # Otherwise just set regex as attribute",
"value: str) -> bool: \"\"\"Validate given value against prop limit",
"length units, or empty string. DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?' #: Pattern",
"r')|(' + POSITIVE_FLOAT_PATTERN + r'))$', hint='Expecting number followed by standard",
"can be ``True``, ``False`` or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit(",
"# ################## #: Pattern to match a positive or negative",
"'substrate', 'acid', 'base' or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT = PropLimit( enum=[ 'solvent',",
"if enum: if not regex: self.regex = self.generate_enum_regex() else: self.regex",
"WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit( r'(' + BOOL_PATTERN + r'|solvent)', enum=['true',",
"units case insensitvely, or empty string. VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?'",
"9.999. _zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)' #: Pattern matching float between",
"<reponame>mcrav/xdl<filename>xdl/utils/prop_limits.py \"\"\"Prop limits are used to validate the input given",
"+ _zero_to_ten_float + ')$', hint='Expecting number from 0-100 representing a",
"match any of the items in :py:attr:`enum`. Returns: str: Regex",
"r'(false|False|true|True)' #: Pattern to match all accepted volumes units case",
"units are used. hint (str): Hint for the prop limit",
"or negative float, #: e.g. '0', '-1', '1', '-10.3', '10.3',",
"BOOL_PROP_LIMIT: PropLimit = PropLimit( BOOL_PATTERN, hint='Expecting one of \"false\" or",
"enum given generate regex from this self.enum = enum if",
"(str): Regex pattern that should match with valid values and",
"mbar' ) #: Prop limit for rotation speed props. ROTATION_SPEED_PROP_LIMIT:",
"as attribute else: self.regex = regex self.hint = hint def",
"string. ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?' #: Pattern to match all accepted",
"#: Prop limit for ``WashSolid`` ``stir`` prop. This is a",
"100. Used for percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit( r'^(' +",
"by standard mass units, e.g. \"2.3 g\"', default='0 g' )",
"input should look like in the case of an errror.",
"of 'extract' or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash']) #:",
"prop can take. This is used to automatically generate a",
"\"3\"', default='1', ) #: Prop limit for any props requiring",
"= generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting number followed by standard time",
"= PropLimit(enum=['top', 'bottom']) #: Prop limit for ``Add`` ``purpose`` prop.",
"r'))$', hint='Expecting number followed by standard volume units, e.g. \"5.5",
"props. WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting wavelength, e.g.",
"'solvent', 'reagent', 'catalyst', 'substrate', 'acid', 'base', 'activating-agent' ] ) #:",
"pattern. Args: quantity_pattern (str): Pattern to match the number expected.",
"Prop limit for ``WashSolid`` ``stir`` prop. This is a special",
"in :py:attr:`enum`. \"\"\" s = 'Expecting one of ' for",
"+ r')|(' + POSITIVE_FLOAT_PATTERN + r'))$', hint='Expecting number followed by",
"(str): Default valid value. Should use standard units of the",
"\"true\", \"false\" or \"solvent\".', default='True' ) #: Prop limit for",
"be matched by this #: pattern. FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)'",
"VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN + r'))$', hint='Expecting number followed",
"empty string. MOL_UNITS_PATTERN = r'(mmol|mol)?' ############### # Prop limits #",
"return regex def generate_enum_hint(self) -> str: \"\"\"Generate hint from :py:attr:`enum`.",
"= regex[:-1] + r')' return regex def generate_enum_hint(self) -> str:",
"just set regex as attribute else: self.regex = regex self.hint",
"Prop limit for ``Add`` ``purpose`` prop. One of 'neutralize', 'precipitate',",
"If enum given generate regex from this self.enum = enum",
"'basify', 'acidify', 'dilute', ] ) #: Prop limit for ``HeatChill``",
") #: Prop limit for any props requiring a boolean",
"would all be matched by this #: pattern. FLOAT_PATTERN: str",
"'|' + _zero_to_ten_float + ')$', hint='Expecting number from 0-100 representing",
"generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting number followed by standard pressure units,",
"important here that defaults use the standard unit for that",
"temperature units, or empty string. TEMP_UNITS_PATTERN: str = r'(°C|K|F)?' #:",
"is given for ``regex`` and ``hint``. ``hint`` and ``default`` are",
"string. TEMP_UNITS_PATTERN: str = r'(°C|K|F)?' #: Pattern to match all",
"units, or empty string. TEMP_UNITS_PATTERN: str = r'(°C|K|F)?' #: Pattern",
"not regex: self.regex = self.generate_enum_regex() else: self.regex = regex if",
"WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting wavelength, e.g. \"400",
"def generate_enum_regex(self) -> str: \"\"\"Generate regex from :py:attr:`enum`. Regex will",
"0 and 100. Used for percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit(",
"')$', hint='Expecting number from 0-100 representing a percentage, e.g. \"50\",",
"property. \"\"\" import re from typing import List, Optional class",
"POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive float value, e.g.",
"#: Pattern to match all accepted time units, or empty",
"def generate_quantity_units_pattern( quantity_pattern: str, units_pattern: str, hint: Optional[str] = '',",
"+ POSITIVE_FLOAT_PATTERN + r'[ ]?'\\ + VOLUME_UNITS_PATTERN + r')|(' +",
"of 'control-exotherm', #: 'reaction' or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit( enum=['control-exotherm',",
"optionally followed by volume units. The prop limit is used",
"limit for any props requiring a positive float. Used if",
"boolean value. Used if no explicit prop #: type is",
"= PropLimit( BOOL_PATTERN, hint='Expecting one of \"false\" or \"true\".', default='false',",
") #: Prop limit for rotation speed props. ROTATION_SPEED_PROP_LIMIT: PropLimit",
"one of \"true\", \"false\" or \"solvent\".', default='True' ) #: Prop",
"used to validate the input given to xdl elements. For",
"prop. One of 'solvent', 'reagent', #: 'catalyst', 'substrate', 'acid', 'base'",
") #: Prop limit for time props. TIME_PROP_LIMIT: PropLimit =",
"types, i.e. for variations on the number followed by unit",
"``purpose`` prop. 'dissolve' is only option. STIR_PURPOSE_PROP_LIMIT = PropLimit( enum=['dissolve']",
"not regex and not enum: raise ValueError( 'Either `regex` or",
":py:attr:`enum` list. \"\"\" regex = r'(' for item in self.enum:",
"specifically matching 'true' and 'false' #: case insensitvely. BOOL_PATTERN: str",
"be matched. _hundred_float: str = r'(100(?:[.][0]+)?)' #: Pattern matching any",
"look like in the case of an errror. default (str):",
"for storing prop limit. A prop limit is essentially a",
"= PropLimit( enum=[ 'solvent', 'reagent', 'catalyst', 'substrate', 'acid', 'base', 'activating-agent'",
"s += f'\"{item}\", ' s = s[:-2] + f' or",
"Pattern to match all accepted temperature units, or empty string.",
"default (str): Default value for the prop limit, should use",
"#: 'dissolve', 'basify', 'acidify' or 'dilute'. ADD_PURPOSE_PROP_LIMIT = PropLimit( enum=[",
"default: Optional[str] = '', enum: Optional[List[str]] = [], ): if",
"#: Prop limit for any props requiring a positive integer",
") #: Prop limit for any props requiring a positive",
"Prop limit for volume props. VOLUME_PROP_LIMIT: PropLimit = PropLimit( regex=r'^(all|('",
"]?'\\ + VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN + r'))$', hint='Expecting",
"\"\"\"Generate hint from :py:attr:`enum`. Hint will list all items in",
"empty string. MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern to match",
") #: Pattern matching a float of value 100, e.g.",
"'base', 'activating-agent' ] ) #: Prop limit for ``Component`` ``component_type``",
"standard mass units, e.g. \"2.3 g\"', default='0 g' ) #:",
"float, #: e.g. '0', 1', '10.3', '0.0' would all be",
"certain range. Either ``regex`` or ``enum`` must be given when",
"would #: all be matched. _hundred_float: str = r'(100(?:[.][0]+)?)' #:",
"PropLimit( enum=[ 'solvent', 'reagent', 'catalyst', 'substrate', 'acid', 'base', 'activating-agent' ]",
"a positive or negative float, #: e.g. '0', '-1', '1',",
"type as XDL app uses this to add in default",
"'' ) -> PropLimit: \"\"\" Convenience function to generate PropLimit",
"PropLimit = PropLimit( BOOL_PATTERN, hint='Expecting one of \"false\" or \"true\".',",
"positive or negative float, #: e.g. '0', '-1', '1', '-10.3',",
"units is allowed as in this case standard units are",
"'reagent', 'catalyst', 'substrate', 'acid', 'base', 'activating-agent' ] ) #: Prop",
"= r'([0-9]+(?:[.][0-9]+)?)' #: Pattern to match boolean strings, specifically matching",
"ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM value, e.g.",
"with valid values and not match with invalid values. hint",
"XDL app uses this to add in default units. #:",
"-> PropLimit: \"\"\" Convenience function to generate PropLimit object for",
"mass props. MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting number",
"should be a number followed by volume units, e.g. '5",
"will typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``. units_pattern (str): Pattern to",
"to check that input supplied is valid for that property.",
") #: Prop limit for mass props. MASS_PROP_LIMIT: PropLimit =",
"time props. TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting number",
"mol units, or empty string. MOL_UNITS_PATTERN = r'(mmol|mol)?' ############### #",
"of 'solvent', 'reagent', #: 'catalyst', 'substrate', 'acid', 'base' or 'activating-agent'.",
"is used to check that input supplied is valid for",
"Hint listing all items in :py:attr:`enum`. \"\"\" s = 'Expecting",
"a regex from the list of allowed values. \"\"\" def",
"props requiring a positive integer such as ``repeats``. #: Used",
"'dissolve' is only option. STIR_PURPOSE_PROP_LIMIT = PropLimit( enum=['dissolve'] ) #:",
"# Prop limits # ############### def generate_quantity_units_pattern( quantity_pattern: str, units_pattern:",
"+ units_pattern + r'$)|(^' + quantity_pattern + r'))$', hint=hint, default=default",
"default='0 mol', ) #: Prop limit for temp props. TEMP_PROP_LIMIT:",
"PropLimit(enum=['extract', 'wash']) #: Prop limit for ``Separate`` ``product_phase`` prop. One",
"mol\".', default='0 mol', ) #: Prop limit for temp props.",
"'catalyst', 'substrate', 'acid', 'base', 'activating-agent' ] ) #: Prop limit",
"+ r'|solvent)', enum=['true', 'solvent', 'false'], hint='Expecting one of \"true\", \"false\"",
"involved. \"\"\" return PropLimit( regex=r'^((' + quantity_pattern + r'[ ]?'\\",
"'acid', 'base', 'activating-agent' ] ) #: Prop limit for ``Component``",
"followed by mol or mmol, e.g. \"2.3 mol\".', default='0 mol',",
"#: '-10.3' or '-1'. POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)' #: Pattern",
"e.g. \"5.5 mL\"', default='0 mL', ) #: Prop limit for",
"'', default: Optional[str] = '', enum: Optional[List[str]] = [], ):",
"variations on the number followed by unit pattern. Args: quantity_pattern",
"prop type is ``float``. POSITIVE_FLOAT_PROP_LIMIT: PropLimit = PropLimit( regex=POSITIVE_FLOAT_PATTERN, hint='Expecting",
"enum=['reactor', 'filter', 'separator', 'rotavap', 'flask'] ) #: Pattern matching a",
"in degrees celsius or number followed by standard temperature units,",
"PropLimit( enum=['dissolve'] ) #: Prop limit for ``Reagent`` ``role`` prop.",
"DISTANCE_UNITS_PATTERN, hint='Expecting wavelength, e.g. \"400 nm\".', default='400 nm' ) #:",
"_hundred_float: str = r'(100(?:[.][0]+)?)' #: Pattern matching any float between",
"ValueError( 'Either `regex` or `enum` argument must be given.') self.default",
"One of 'reactor', #: 'filter', 'separator', 'rotavap' or 'flask'. COMPONENT_TYPE_PROP_LIMIT:",
"if no explicit #: prop type is given and prop",
"or 'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit( enum=['reactor', 'filter', 'separator', 'rotavap',",
"= PropLimit( regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\\ + VOLUME_UNITS_PATTERN",
"match all accepted time units, or empty string. TIME_UNITS_PATTERN =",
"object for different quantity types, i.e. for variations on the",
"limit for time props. TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN,",
"to a given prop. For example, checking appropriate units are",
"\"solvent\".', default='True' ) #: Prop limit for ``Separate`` ``purpose`` prop.",
"``WashSolid`` ``stir`` prop. This is a special case as the",
"Pattern to match all accepted rotation speed units, or empty",
"matched by this pattern, but not #: '-10.3' or '-1'.",
"that property. \"\"\" import re from typing import List, Optional",
"empty string. DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?' #: Pattern to match all",
"followed by standard mass units, e.g. \"2.3 g\"', default='0 g'",
"a float of value 100, e.g. '100', '100.0', '100.000' would",
"this to add in default units. #: Prop limit for",
"validate the input given to xdl elements. For example, a",
"Prop limit for ``Reagent`` ``role`` prop. One of 'solvent', 'reagent',",
"enum (List[str]): List of values that the prop can take.",
"'precipitate', #: 'dissolve', 'basify', 'acidify' or 'dilute'. ADD_PURPOSE_PROP_LIMIT = PropLimit(",
"'dissolve', 'basify', 'acidify' or 'dilute'. ADD_PURPOSE_PROP_LIMIT = PropLimit( enum=[ 'neutralize',",
"generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting number in degrees celsius or number",
"match all accepted mol units, or empty string. MOL_UNITS_PATTERN =",
"any props requiring a boolean value. Used if no explicit",
"input given to xdl elements. For example, a volume property",
"False. \"\"\" return re.match(self.regex, value) is not None def generate_enum_regex(self)",
"value) is not None def generate_enum_regex(self) -> str: \"\"\"Generate regex",
"nm' ) #: Prop limit for any props requiring a",
"in self.enum: regex += item + r'|' regex = regex[:-1]",
"POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit( r'[0-9]+', hint='Expecting positive integer value, e.g.",
"Regex will match any of the items in :py:attr:`enum`. Returns:",
"One of 'extract' or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash'])",
"r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern to match all accepted mass units, or",
"= generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, DISTANCE_UNITS_PATTERN, hint='Expecting wavelength, e.g. \"400 nm\".', default='400",
"props. TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting number in",
"Prop limit for ``Component`` ``component_type`` prop. One of 'reactor', #:",
"all accepted rotation speed units, or empty string. ROTATION_SPEED_UNITS_PATTERN =",
"It is important here that defaults use the standard unit",
"``Component`` ``component_type`` prop. One of 'reactor', #: 'filter', 'separator', 'rotavap'",
"= generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting number followed by standard mass",
"hrs\".', default='0 secs' ) #: Prop limit for pressure props.",
"PropLimit = PropLimit( enum=['reactor', 'filter', 'separator', 'rotavap', 'flask'] ) #:",
") #: Prop limit for ``Reagent`` ``role`` prop. One of",
"default='0 mL', ) #: Prop limit for mass props. MASS_PROP_LIMIT:",
"= '', default: Optional[str] = '', enum: Optional[List[str]] = [],",
"self.enum[:-1]: s += f'\"{item}\", ' s = s[:-2] + f'",
"positive integer value, e.g. \"3\"', default='1', ) #: Prop limit",
"(str): Default value for the prop limit, should use standard",
"limit for pressure props. PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN,",
"Returns: str: Regex that will match any of the strings",
"by mol or mmol, e.g. \"2.3 mol\".', default='0 mol', )",
"#: Pattern to match all accepted volumes units case insensitvely,",
"is only option. STIR_PURPOSE_PROP_LIMIT = PropLimit( enum=['dissolve'] ) #: Prop",
"\"15 mins\", \"3 hrs\".', default='0 secs' ) #: Prop limit",
"regex def generate_enum_hint(self) -> str: \"\"\"Generate hint from :py:attr:`enum`. Hint",
"'1', '-10.3', '10.3', '0.0' would all be matched by this",
"prop #: type is given and prop type is ``bool``.",
"############### # Prop limits # ############### def generate_quantity_units_pattern( quantity_pattern: str,",
"the user what correct input should look like in the",
"given generate regex from this self.enum = enum if enum:",
"used to automatically generate a regex from the list of",
"'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit( enum=['reactor', 'filter', 'separator', 'rotavap', 'flask']",
"s = s[:-2] + f' or \"{self.enum[-1]}\".' return s ##################",
"validate against prop limit. Returns: bool: True if the value",
"BOOL_PATTERN + r'|solvent)', enum=['true', 'solvent', 'false'], hint='Expecting one of \"true\",",
"############### def generate_quantity_units_pattern( quantity_pattern: str, units_pattern: str, hint: Optional[str] =",
"units_pattern: str, hint: Optional[str] = '', default: Optional[str] = ''",
"Optional[List[str]] = [], ): if not regex and not enum:",
"limit, otherwise False. \"\"\" return re.match(self.regex, value) is not None",
"Prop limit for time props. TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN,",
"regex=POSITIVE_FLOAT_PATTERN, hint='Expecting positive float value, e.g. \"3\", \"3.5\"', default='0', )",
"by volume units, e.g. '5 mL'.\" default (str): Default valid",
"input to a given prop. For example, checking appropriate units",
"Pattern to match the units expected or empty string. Empty",
"volume property should be a positive number, optionally followed by",
"unit pattern. Args: quantity_pattern (str): Pattern to match the number",
"#: Prop limit for ``Reagent`` ``role`` prop. One of 'solvent',",
"accepted rotation speed units, or empty string. ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?'",
"return re.match(self.regex, value) is not None def generate_enum_regex(self) -> str:",
"'-10.3', '10.3', '0.0' would all be matched by this #:",
"but not #: '-10.3' or '-1'. POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)'",
"validate(self, value: str) -> bool: \"\"\"Validate given value against prop",
":py:attr:`enum`. Returns: str: Hint listing all items in :py:attr:`enum`. \"\"\"",
"regex and not enum: raise ValueError( 'Either `regex` or `enum`",
"and ``default`` are both optional, but recommended, at least when",
"# If enum given generate regex from this self.enum =",
"used or a value is within a certain range. Either",
"match any of the strings in the :py:attr:`enum` list. \"\"\"",
"to match the units expected or empty string. Empty string",
"= r'(°C|K|F)?' #: Pattern to match all accepted time units,",
"the input to a given prop. For example, checking appropriate",
"empty string. VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #: Pattern to match",
":py:attr:`enum`. Returns: str: Regex that will match any of the",
"accepted mass units, or empty string. MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?'",
"'base' or 'activating-agent'. REAGENT_ROLE_PROP_LIMIT = PropLimit( enum=[ 'solvent', 'reagent', 'catalyst',",
"``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``. units_pattern (str): Pattern to match the units",
"or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom']) #: Prop limit",
"what correct input should look like in the case of",
"#: Prop limit for volume props. VOLUME_PROP_LIMIT: PropLimit = PropLimit(",
"that # quantity type as XDL app uses this to",
"]?'\\ + units_pattern + r'$)|(^' + quantity_pattern + r'))$', hint=hint,",
"mL', ) #: Prop limit for mass props. MASS_PROP_LIMIT: PropLimit",
"TIME_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting number followed by",
"\"400 nm\".', default='400 nm' ) #: Prop limit for any",
"typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``. units_pattern (str): Pattern to match",
"'unstable-reagent'] ) #: Prop limit for ``Stir`` ``purpose`` prop. 'dissolve'",
"negative float, #: e.g. '0', '-1', '1', '-10.3', '10.3', '0.0'",
"# NOTE: It is important here that defaults use the",
"uses this to add in default units. #: Prop limit",
"or `enum` argument must be given.') self.default = default #",
"default='400 RPM', ) #: Prop limit for wavelength props. WAVELENGTH_PROP_LIMIT:",
"#: Pattern to match all accepted temperature units, or empty",
"default=default ) # NOTE: It is important here that defaults",
"instantiating. If ``enum`` is given it will override whatever is",
"set regex as attribute else: self.regex = regex self.hint =",
"is ``int``. POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit( r'[0-9]+', hint='Expecting positive integer",
"default: Optional[str] = '' ) -> PropLimit: \"\"\" Convenience function",
"to generate PropLimit object for different quantity types, i.e. for",
"r'[ ]?'\\ + VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN + r'))$',",
"enum: if not regex: self.regex = self.generate_enum_regex() else: self.regex =",
"Regex that will match any of the strings in the",
"POSITIVE_FLOAT_PATTERN + r'))$', hint='Expecting number followed by standard volume units,",
"standard units of the quantity involved, e.g. for volume, '0",
"Used if no explicit property is given and prop type",
"values and not match with invalid values. hint (str): Useful",
"limit. A prop limit is essentially a regex for validating",
"the quantity involved, e.g. for volume, '0 mL'. enum (List[str]):",
"be given when instantiating. If ``enum`` is given it will",
"= PropLimit( enum=[ 'neutralize', 'precipitate', 'dissolve', 'basify', 'acidify', 'dilute', ]",
"at least when using ``regex`` not ``enum``. Arguments: regex (str):",
"PropLimit(enum=['top', 'bottom']) #: Prop limit for ``Add`` ``purpose`` prop. One",
"limit for any props requiring a boolean value. Used if",
"hint: Optional[str] = '', default: Optional[str] = '', enum: Optional[List[str]]",
"+ r'[ ]?'\\ + units_pattern + r'$)|(^' + quantity_pattern +",
"``Separate`` ``purpose`` prop. One of 'extract' or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit",
"match boolean strings, specifically matching 'true' and 'false' #: case",
"option. STIR_PURPOSE_PROP_LIMIT = PropLimit( enum=['dissolve'] ) #: Prop limit for",
"Optional[str] = '', enum: Optional[List[str]] = [], ): if not",
"given value against prop limit regex. Args: value (str): Value",
"float between 0 and 100. Used for percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit",
"``default`` are both optional, but recommended, at least when using",
"speed props. ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM",
"for pressure props. PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting",
"Pattern to match the number expected. This will typically be",
"is given and prop type is ``int``. POSITIVE_INT_PROP_LIMIT: PropLimit =",
"must be given.') self.default = default # If enum given",
"to xdl elements. For example, a volume property should be",
"to validate against prop limit. Returns: bool: True if the",
"default='400 nm' ) #: Prop limit for any props requiring",
"Optional class PropLimit(object): \"\"\"Convenience class for storing prop limit. A",
"``regex`` not ``enum``. Arguments: regex (str): Regex pattern that should",
"given for ``regex`` and ``hint``. ``hint`` and ``default`` are both",
"should be a positive number, optionally followed by volume units.",
"RPM\".', default='400 RPM', ) #: Prop limit for wavelength props.",
"hint: Optional[str] = '', default: Optional[str] = '' ) ->",
"r'|' regex = regex[:-1] + r')' return regex def generate_enum_hint(self)",
"-> bool: \"\"\"Validate given value against prop limit regex. Args:",
"10.000 and 99.999. _ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern matching",
"from typing import List, Optional class PropLimit(object): \"\"\"Convenience class for",
"= generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN, hint='Expecting RPM value, e.g. \"400 RPM\".',",
"to match a positive float, #: e.g. '0', 1', '10.3',",
"all accepted mol units, or empty string. MOL_UNITS_PATTERN = r'(mmol|mol)?'",
"limits are used to validate the input given to xdl",
"Optional[str] = '' ) -> PropLimit: \"\"\" Convenience function to",
"like in the case of an errror. default (str): Default",
"e.g. \"400 RPM\".', default='400 RPM', ) #: Prop limit for",
"#: type is given and prop type is ``bool``. BOOL_PROP_LIMIT:",
"prop limit is essentially a regex for validating the input",
"r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern to match a positive float, #: e.g.",
"PropLimit( enum=['reactor', 'filter', 'separator', 'rotavap', 'flask'] ) #: Pattern matching",
"requiring a positive integer such as ``repeats``. #: Used if",
"number in degrees celsius or number followed by standard temperature",
"like, e.g. \"Volume should be a number followed by volume",
"hint='Expecting positive float value, e.g. \"3\", \"3.5\"', default='0', ) #:",
"\"\"\" regex = r'(' for item in self.enum: regex +=",
"str: \"\"\"Generate hint from :py:attr:`enum`. Hint will list all items",
"by standard volume units, e.g. \"5.5 mL\"', default='0 mL', )",
"strings, specifically matching 'true' and 'false' #: case insensitvely. BOOL_PATTERN:",
"self.generate_enum_regex() else: self.regex = regex if not hint: self.hint =",
"for what valid value should look like, e.g. \"Volume should",
"r'))$', hint=hint, default=default ) # NOTE: It is important here",
"prop limit. A prop limit is essentially a regex for",
"s[:-2] + f' or \"{self.enum[-1]}\".' return s ################## # Regex",
"for ``Separate`` ``product_phase`` prop. One of 'top' or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT:",
"validating the input to a given prop. For example, checking",
"for ``HeatChill`` ``purpose`` prop. One of 'control-exotherm', #: 'reaction' or",
"special case as the #: value can be ``True``, ``False``",
"not hint: self.hint = self.generate_enum_hint() else: self.hint = hint #",
"0 and 9.999. _zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)' #: Pattern matching",
"regex (str): Regex pattern that should match with valid values",
"for percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit( r'^(' + _hundred_float +",
"'100', '100.0', '100.000' would #: all be matched. _hundred_float: str",
"the number expected. This will typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``.",
"float, #: e.g. '0', '-1', '1', '-10.3', '10.3', '0.0' would",
"and 'false' #: case insensitvely. BOOL_PATTERN: str = r'(false|False|true|True)' #:",
"the number followed by unit pattern. Args: quantity_pattern (str): Pattern",
"\"\"\"Convenience class for storing prop limit. A prop limit is",
"#: Prop limit for mass props. MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern(",
"one of \"false\" or \"true\".', default='false', ) #: Prop limit",
"expected. This will typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``. units_pattern (str):",
"and 100. Used for percentages. PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit( r'^('",
"'false' #: case insensitvely. BOOL_PATTERN: str = r'(false|False|true|True)' #: Pattern",
"this case standard units are used. hint (str): Hint for",
"class PropLimit(object): \"\"\"Convenience class for storing prop limit. A prop",
"should use standard units for the prop involved. \"\"\" return",
"units. The prop limit is used to check that input",
"all items in :py:attr:`enum`. \"\"\" s = 'Expecting one of",
"is a special case as the #: value can be",
"positive number, optionally followed by volume units. The prop limit",
"PropLimit = PropLimit( r'^(' + _hundred_float + '|'\\ + _ten_to_ninety_nine_float",
"and not enum: raise ValueError( 'Either `regex` or `enum` argument",
"str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern to match all accepted temperature",
"value. Should use standard units of the quantity involved, e.g.",
"#: case insensitvely. BOOL_PATTERN: str = r'(false|False|true|True)' #: Pattern to",
"#: pattern. FLOAT_PATTERN: str = r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern to match",
"r'|solvent)', enum=['true', 'solvent', 'false'], hint='Expecting one of \"true\", \"false\" or",
"enum=['true', 'solvent', 'false'], hint='Expecting one of \"true\", \"false\" or \"solvent\".',",
"regex. Args: value (str): Value to validate against prop limit.",
"for rotation speed props. ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, ROTATION_SPEED_UNITS_PATTERN,",
"SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash']) #: Prop limit for ``Separate``",
"must be given when instantiating. If ``enum`` is given it",
"'true' and 'false' #: case insensitvely. BOOL_PATTERN: str = r'(false|False|true|True)'",
"is ``bool``. BOOL_PROP_LIMIT: PropLimit = PropLimit( BOOL_PATTERN, hint='Expecting one of",
"= generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting number followed by standard pressure",
"Pattern to match all accepted length units, or empty string.",
"prop. One of 'extract' or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract',",
"#: Prop limit for ``Component`` ``component_type`` prop. One of 'reactor',",
"generate regex from this self.enum = enum if enum: if",
"as in this case standard units are used. hint (str):",
"__init__( self, regex: Optional[str] = None, hint: Optional[str] = '',",
"quantity_pattern: str, units_pattern: str, hint: Optional[str] = '', default: Optional[str]",
"default='True' ) #: Prop limit for ``Separate`` ``purpose`` prop. One",
"the prop limit, should use standard units for the prop",
"FLOAT_PATTERN, TEMP_UNITS_PATTERN, hint='Expecting number in degrees celsius or number followed",
"given it will override whatever is given for ``regex`` and",
"limit for ``Separate`` ``purpose`` prop. One of 'extract' or 'wash'.",
"'|'\\ + _ten_to_ninety_nine_float + '|' + _zero_to_ten_float + ')$', hint='Expecting",
"prop involved. \"\"\" return PropLimit( regex=r'^((' + quantity_pattern + r'[",
"be a number followed by volume units, e.g. '5 mL'.\"",
"string. MOL_UNITS_PATTERN = r'(mmol|mol)?' ############### # Prop limits # ###############",
"This will typically be ``POSITIVE_FLOAT_PATTERN`` or ``FLOAT_PATTERN``. units_pattern (str): Pattern",
"in self.enum[:-1]: s += f'\"{item}\", ' s = s[:-2] +",
"volumes units case insensitvely, or empty string. VOLUME_UNITS_PATTERN: str =",
"'100.0', '100.000' would #: all be matched. _hundred_float: str =",
"to match all accepted time units, or empty string. TIME_UNITS_PATTERN",
"COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit( enum=['reactor', 'filter', 'separator', 'rotavap', 'flask'] )",
"or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash']) #: Prop limit",
"\"50 mbar\", \"1 atm\".', default='1013.25 mbar' ) #: Prop limit",
"Prop limit for wavelength props. WAVELENGTH_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN,",
"quantity_pattern (str): Pattern to match the number expected. This will",
"#: Pattern to match all accepted mol units, or empty",
"hint='Expecting positive integer value, e.g. \"3\"', default='1', ) #: Prop",
"hint='Expecting number followed by standard mass units, e.g. \"2.3 g\"',",
":py:attr:`enum`. Hint will list all items in :py:attr:`enum`. Returns: str:",
"given to xdl elements. For example, a volume property should",
"units, or empty string. DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?' #: Pattern to",
"hint from :py:attr:`enum`. Hint will list all items in :py:attr:`enum`.",
"props. PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, PRESSURE_UNITS_PATTERN, hint='Expecting number followed",
"regex as attribute else: self.regex = regex self.hint = hint",
"Args: quantity_pattern (str): Pattern to match the number expected. This",
"involved, e.g. for volume, '0 mL'. enum (List[str]): List of",
"value should look like, e.g. \"Volume should be a number",
"PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting number followed by standard",
"e.g. \"3\", \"3.5\"', default='0', ) #: Prop limit for any",
"= hint def validate(self, value: str) -> bool: \"\"\"Validate given",
"float between 10.000 and 99.999. _ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)' #:",
"limit is essentially a regex for validating the input to",
"\"2.3 g\"', default='0 g' ) #: Prop limit for mol",
"'reactor', #: 'filter', 'separator', 'rotavap' or 'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit =",
"'', enum: Optional[List[str]] = [], ): if not regex and",
"a given prop. For example, checking appropriate units are used",
"Pattern to match all accepted volumes units case insensitvely, or",
"props. MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting number followed",
"standard time units, e.g. \"15 mins\", \"3 hrs\".', default='0 secs'",
"no explicit #: prop type is given and prop type",
"+ ')$', hint='Expecting number from 0-100 representing a percentage, e.g.",
"units, or empty string. MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern",
"followed by unit pattern. Args: quantity_pattern (str): Pattern to match",
"POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting number followed by mol or mmol, e.g.",
"Default value for the prop limit, should use standard units",
"= regex self.hint = hint def validate(self, value: str) ->",
"as not including units is allowed as in this case",
"+ _hundred_float + '|'\\ + _ten_to_ninety_nine_float + '|' + _zero_to_ten_float",
"``False`` or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit = PropLimit( r'(' + BOOL_PATTERN",
"limit, should use standard units for the prop involved. \"\"\"",
"as ``repeats``. #: Used if no explicit property is given",
"\"3\", \"3.5\"', default='0', ) #: Prop limit for any props",
"value against prop limit regex. Args: value (str): Value to",
"volume props. VOLUME_PROP_LIMIT: PropLimit = PropLimit( regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN +",
"(str): Hint for the prop limit to tell the user",
"Pattern matching any float between 10.000 and 99.999. _ten_to_ninety_nine_float: str",
"Optional[str] = '', default: Optional[str] = '' ) -> PropLimit:",
"positive integer such as ``repeats``. #: Used if no explicit",
"of \"false\" or \"true\".', default='false', ) #: Prop limit for",
"+= f'\"{item}\", ' s = s[:-2] + f' or \"{self.enum[-1]}\".'",
"of \"true\", \"false\" or \"solvent\".', default='True' ) #: Prop limit",
"secs' ) #: Prop limit for pressure props. PRESSURE_PROP_LIMIT: PropLimit",
"would all be matched by this pattern, but not #:",
"default units. #: Prop limit for volume props. VOLUME_PROP_LIMIT: PropLimit",
"limit for ``Separate`` ``product_phase`` prop. One of 'top' or 'bottom'.",
"regex if not hint: self.hint = self.generate_enum_hint() else: self.hint =",
"``purpose`` prop. One of 'extract' or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit =",
"'neutralize', 'precipitate', 'dissolve', 'basify', 'acidify', 'dilute', ] ) #: Prop",
"should look like in the case of an errror. default",
"Pattern to match all accepted mol units, or empty string.",
"\"25°C\", \"298 K\".', default='25°C', ) #: Prop limit for time",
"units, or empty string. ROTATION_SPEED_UNITS_PATTERN = r'(rpm|RPM)?' #: Pattern to",
"the prop limit, otherwise False. \"\"\" return re.match(self.regex, value) is",
"self.regex = regex self.hint = hint def validate(self, value: str)",
"limit for mol props. MOL_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN,",
"r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #: Pattern to match all accepted rotation speed units,",
"generate a regex from the list of allowed values. \"\"\"",
"patterns # ################## #: Pattern to match a positive or",
"positive float value, e.g. \"3\", \"3.5\"', default='0', ) #: Prop",
"used to check that input supplied is valid for that",
"units, e.g. \"5.5 mL\"', default='0 mL', ) #: Prop limit",
"``Reagent`` ``role`` prop. One of 'solvent', 'reagent', #: 'catalyst', 'substrate',",
"str = r'([-]?[0-9]+(?:[.][0-9]+)?)' #: Pattern to match a positive float,",
"nm\".', default='400 nm' ) #: Prop limit for any props",
"prop limit regex. Args: value (str): Value to validate against",
"PERCENT_RANGE_PROP_LIMIT: PropLimit = PropLimit( r'^(' + _hundred_float + '|'\\ +",
"be a positive number, optionally followed by volume units. The",
"insensitvely. BOOL_PATTERN: str = r'(false|False|true|True)' #: Pattern to match all",
"match a positive or negative float, #: e.g. '0', '-1',",
"units_pattern + r'$)|(^' + quantity_pattern + r'))$', hint=hint, default=default )",
"or 'dilute'. ADD_PURPOSE_PROP_LIMIT = PropLimit( enum=[ 'neutralize', 'precipitate', 'dissolve', 'basify',",
"'dilute', ] ) #: Prop limit for ``HeatChill`` ``purpose`` prop.",
"limit for ``Add`` ``purpose`` prop. One of 'neutralize', 'precipitate', #:",
"r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern to match all accepted temperature units, or",
"For example, checking appropriate units are used or a value",
"followed by standard volume units, e.g. \"5.5 mL\"', default='0 mL',",
"return s ################## # Regex patterns # ################## #: Pattern",
"accepted pressure units, or empty string. PRESSURE_UNITS_PATTERN = r'(mbar|bar|torr|Torr|mmhg|mmHg|atm|Pa|pa)?' #:",
"#: value can be ``True``, ``False`` or ``'solvent'``. WASH_SOLID_STIR_PROP_LIMIT: PropLimit",
"################## # Regex patterns # ################## #: Pattern to match",
"items in :py:attr:`enum`. Returns: str: Hint listing all items in",
"if not hint: self.hint = self.generate_enum_hint() else: self.hint = hint",
"'dissolve', 'basify', 'acidify', 'dilute', ] ) #: Prop limit for",
"valid value. Should use standard units of the quantity involved,",
"'extract' or 'wash'. SEPARATION_PURPOSE_PROP_LIMIT: PropLimit = PropLimit(enum=['extract', 'wash']) #: Prop",
"allowed as in this case standard units are used. hint",
"will match any of the items in :py:attr:`enum`. Returns: str:",
"MOL_UNITS_PATTERN = r'(mmol|mol)?' ############### # Prop limits # ############### def",
"prop. One of 'neutralize', 'precipitate', #: 'dissolve', 'basify', 'acidify' or",
"time units, or empty string. TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern",
"'flask'] ) #: Pattern matching a float of value 100,",
"for item in self.enum[:-1]: s += f'\"{item}\", ' s =",
"in :py:attr:`enum`. Returns: str: Hint listing all items in :py:attr:`enum`.",
"The prop limit is used to check that input supplied",
"generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, MOL_UNITS_PATTERN, hint='Expecting number followed by mol or mmol,",
"value matches the prop limit, otherwise False. \"\"\" return re.match(self.regex,",
"prop limit, otherwise False. \"\"\" return re.match(self.regex, value) is not",
"tell the user what correct input should look like in",
"One of 'solvent', 'reagent', #: 'catalyst', 'substrate', 'acid', 'base' or",
"r'(100(?:[.][0]+)?)' #: Pattern matching any float between 10.000 and 99.999.",
"``repeats``. #: Used if no explicit property is given and",
"(str): Value to validate against prop limit. Returns: bool: True",
"prop limit to tell the user what correct input should",
"prop limit. Returns: bool: True if the value matches the",
"= enum if enum: if not regex: self.regex = self.generate_enum_regex()",
"use standard units for the prop involved. \"\"\" return PropLimit(",
"PropLimit = PropLimit(enum=['extract', 'wash']) #: Prop limit for ``Separate`` ``product_phase``",
"correct input should look like in the case of an",
"or empty string. TEMP_UNITS_PATTERN: str = r'(°C|K|F)?' #: Pattern to",
"against prop limit. Returns: bool: True if the value matches",
"regex from this self.enum = enum if enum: if not",
"quantity_pattern + r'))$', hint=hint, default=default ) # NOTE: It is",
"can take. This is used to automatically generate a regex",
"that input supplied is valid for that property. \"\"\" import",
"e.g. '0', '-1', '1', '-10.3', '10.3', '0.0' would all be",
"case of an errror. default (str): Default value for the",
"hint='Expecting number in degrees celsius or number followed by standard",
"mmol, e.g. \"2.3 mol\".', default='0 mol', ) #: Prop limit",
"POSITIVE_FLOAT_PATTERN: str = r'([0-9]+(?:[.][0-9]+)?)' #: Pattern to match boolean strings,",
"= r'(rpm|RPM)?' #: Pattern to match all accepted length units,",
"from :py:attr:`enum`. Hint will list all items in :py:attr:`enum`. Returns:",
"in :py:attr:`enum`. Returns: str: Regex that will match any of",
"standard units are used. hint (str): Hint for the prop",
"or empty string. Empty string is matched as not including",
"input supplied is valid for that property. \"\"\" import re",
"Convenience function to generate PropLimit object for different quantity types,",
") #: Prop limit for mol props. MOL_PROP_LIMIT: PropLimit =",
") # NOTE: It is important here that defaults use",
"between 10.000 and 99.999. _ten_to_ninety_nine_float: str = r'([0-9][0-9](?:[.][0-9]+)?)' #: Pattern",
"Prop limit for any props requiring a positive integer such",
"+ r'))$', hint=hint, default=default ) # NOTE: It is important",
"generate PropLimit object for different quantity types, i.e. for variations",
"POSITIVE_FLOAT_PATTERN, MASS_UNITS_PATTERN, hint='Expecting number followed by standard mass units, e.g.",
"Pattern matching any float between 0 and 9.999. _zero_to_ten_float: str",
"``int``. POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit( r'[0-9]+', hint='Expecting positive integer value,",
"str = r'(false|False|true|True)' #: Pattern to match all accepted volumes",
"atm\".', default='1013.25 mbar' ) #: Prop limit for rotation speed",
"used. hint (str): Hint for the prop limit to tell",
"Prop limit for pressure props. PRESSURE_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN,",
"e.g. \"Volume should be a number followed by volume units,",
"case insensitvely, or empty string. VOLUME_UNITS_PATTERN: str = r'(l|L|litre|litres|liter|liters|ml|mL|cm3|cc|milliltre|millilitres|milliliter|milliliters|cl|cL|centiltre|centilitres|centiliter|centiliters|dl|dL|deciltre|decilitres|deciliter|deciliters|ul|uL|μl|μL|microlitre|microlitres|microliter|microliters)?' #:",
"Prop limit for ``Separate`` ``purpose`` prop. One of 'extract' or",
"``FLOAT_PATTERN``. units_pattern (str): Pattern to match the units expected or",
"requiring a positive float. Used if no explicit #: prop",
"between 0 and 9.999. _zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)' #: Pattern",
"if no explicit prop #: type is given and prop",
"use the standard unit for that # quantity type as",
"xdl elements. For example, a volume property should be a",
"pattern that should match with valid values and not match",
"any of the strings in the :py:attr:`enum` list. \"\"\" regex",
"mol or mmol, e.g. \"2.3 mol\".', default='0 mol', ) #:",
"a positive float, #: e.g. '0', 1', '10.3', '0.0' would",
"PRESSURE_UNITS_PATTERN, hint='Expecting number followed by standard pressure units, e.g. \"50",
"item + r'|' regex = regex[:-1] + r')' return regex",
"string. MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern to match all",
"self.hint = self.generate_enum_hint() else: self.hint = hint # Otherwise just",
"given when instantiating. If ``enum`` is given it will override",
"ADD_PURPOSE_PROP_LIMIT = PropLimit( enum=[ 'neutralize', 'precipitate', 'dissolve', 'basify', 'acidify', 'dilute',",
"100, e.g. '100', '100.0', '100.000' would #: all be matched.",
"any float between 0 and 9.999. _zero_to_ten_float: str = r'([0-9](?:[.][0-9]+)?)'",
"strings in the :py:attr:`enum` list. \"\"\" regex = r'(' for",
"of the items in :py:attr:`enum`. Returns: str: Regex that will",
"match all accepted volumes units case insensitvely, or empty string.",
"supplied is valid for that property. \"\"\" import re from",
"= None, hint: Optional[str] = '', default: Optional[str] = '',",
"match all accepted rotation speed units, or empty string. ROTATION_SPEED_UNITS_PATTERN",
"no explicit property is given and prop type is ``int``.",
"match with valid values and not match with invalid values.",
"units. #: Prop limit for volume props. VOLUME_PROP_LIMIT: PropLimit =",
"+= item + r'|' regex = regex[:-1] + r')' return",
"followed by volume units. The prop limit is used to",
"Prop limit for mass props. MASS_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN,",
"mL'. enum (List[str]): List of values that the prop can",
"as XDL app uses this to add in default units.",
"empty string. Empty string is matched as not including units",
"boolean strings, specifically matching 'true' and 'false' #: case insensitvely.",
"PropLimit( regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\\ + VOLUME_UNITS_PATTERN +",
"+ VOLUME_UNITS_PATTERN + r')|(' + POSITIVE_FLOAT_PATTERN + r'))$', hint='Expecting number",
"\"1 atm\".', default='1013.25 mbar' ) #: Prop limit for rotation",
"prop type is ``bool``. BOOL_PROP_LIMIT: PropLimit = PropLimit( BOOL_PATTERN, hint='Expecting",
"standard unit for that # quantity type as XDL app",
"or 'unstable-reagent'. HEATCHILL_PURPOSE_PROP_LIMIT = PropLimit( enum=['control-exotherm', 'reaction', 'unstable-reagent'] ) #:",
"= PropLimit( r'^(' + _hundred_float + '|'\\ + _ten_to_ninety_nine_float +",
"is used to automatically generate a regex from the list",
"``bool``. BOOL_PROP_LIMIT: PropLimit = PropLimit( BOOL_PATTERN, hint='Expecting one of \"false\"",
"'100.000' would #: all be matched. _hundred_float: str = r'(100(?:[.][0]+)?)'",
"the value matches the prop limit, otherwise False. \"\"\" return",
"TEMP_UNITS_PATTERN, hint='Expecting number in degrees celsius or number followed by",
"PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN, TIME_UNITS_PATTERN, hint='Expecting number followed by standard",
"positive float. Used if no explicit #: prop type is",
"limit for temp props. TEMP_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( FLOAT_PATTERN, TEMP_UNITS_PATTERN,",
"PropLimit( enum=['control-exotherm', 'reaction', 'unstable-reagent'] ) #: Prop limit for ``Stir``",
"enum=['dissolve'] ) #: Prop limit for ``Reagent`` ``role`` prop. One",
"r'^(' + _hundred_float + '|'\\ + _ten_to_ninety_nine_float + '|' +",
"matched. _hundred_float: str = r'(100(?:[.][0]+)?)' #: Pattern matching any float",
"the units expected or empty string. Empty string is matched",
"#: 'filter', 'separator', 'rotavap' or 'flask'. COMPONENT_TYPE_PROP_LIMIT: PropLimit = PropLimit(",
"of the strings in the :py:attr:`enum` list. \"\"\" regex =",
"is given and prop type is ``bool``. BOOL_PROP_LIMIT: PropLimit =",
"a number followed by volume units, e.g. '5 mL'.\" default",
"s = 'Expecting one of ' for item in self.enum[:-1]:",
"'reaction', 'unstable-reagent'] ) #: Prop limit for ``Stir`` ``purpose`` prop.",
"for different quantity types, i.e. for variations on the number",
"s ################## # Regex patterns # ################## #: Pattern to",
"string. DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?' #: Pattern to match all accepted",
"\"\"\"Prop limits are used to validate the input given to",
"r'(mmol|mol)?' ############### # Prop limits # ############### def generate_quantity_units_pattern( quantity_pattern:",
"str: Regex that will match any of the strings in",
"storing prop limit. A prop limit is essentially a regex",
"Either ``regex`` or ``enum`` must be given when instantiating. If",
"empty string. TEMP_UNITS_PATTERN: str = r'(°C|K|F)?' #: Pattern to match",
"TEMP_UNITS_PATTERN: str = r'(°C|K|F)?' #: Pattern to match all accepted",
"Used if no explicit #: prop type is given and",
"type is given and prop type is ``bool``. BOOL_PROP_LIMIT: PropLimit",
"standard volume units, e.g. \"5.5 mL\"', default='0 mL', ) #:",
"e.g. \"2.3 mol\".', default='0 mol', ) #: Prop limit for",
"If ``enum`` is given it will override whatever is given",
"prop. This is a special case as the #: value",
"raise ValueError( 'Either `regex` or `enum` argument must be given.')",
"else: self.regex = regex if not hint: self.hint = self.generate_enum_hint()",
"or empty string. DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?' #: Pattern to match",
"props. VOLUME_PROP_LIMIT: PropLimit = PropLimit( regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[",
"to match boolean strings, specifically matching 'true' and 'false' #:",
"if not regex and not enum: raise ValueError( 'Either `regex`",
"hint # Otherwise just set regex as attribute else: self.regex",
"in default units. #: Prop limit for volume props. VOLUME_PROP_LIMIT:",
"self.enum = enum if enum: if not regex: self.regex =",
") #: Prop limit for ``Stir`` ``purpose`` prop. 'dissolve' is",
"(str): Useful hint for what valid value should look like,",
"PropLimit = PropLimit( regex=r'^(all|(' + POSITIVE_FLOAT_PATTERN + r'[ ]?'\\ +",
"limit for rotation speed props. ROTATION_SPEED_PROP_LIMIT: PropLimit = generate_quantity_units_pattern( POSITIVE_FLOAT_PATTERN,",
"re.match(self.regex, value) is not None def generate_enum_regex(self) -> str: \"\"\"Generate",
"= PropLimit( enum=['control-exotherm', 'reaction', 'unstable-reagent'] ) #: Prop limit for",
"given and prop type is ``int``. POSITIVE_INT_PROP_LIMIT: PropLimit = PropLimit(",
":py:attr:`enum`. \"\"\" s = 'Expecting one of ' for item",
"or ``FLOAT_PATTERN``. units_pattern (str): Pattern to match the units expected",
"empty string. TIME_UNITS_PATTERN = r'(days|day|h|hr|hrs|hour|hours|m|min|mins|minute|minutes|s|sec|secs|second|seconds)?' #: Pattern to match all",
"to match all accepted temperature units, or empty string. TEMP_UNITS_PATTERN:",
"the strings in the :py:attr:`enum` list. \"\"\" regex = r'('",
"'separator', 'rotavap', 'flask'] ) #: Pattern matching a float of",
"all accepted length units, or empty string. DISTANCE_UNITS_PATTERN = r'(nm|µm|mm|cm|m|km)?'",
"'top' or 'bottom'. SEPARATION_PRODUCT_PHASE_PROP_LIMIT: PropLimit = PropLimit(enum=['top', 'bottom']) #: Prop",
"match all accepted length units, or empty string. DISTANCE_UNITS_PATTERN =",
"match with invalid values. hint (str): Useful hint for what",
"for variations on the number followed by unit pattern. Args:",
"to validate the input given to xdl elements. For example,",
"import List, Optional class PropLimit(object): \"\"\"Convenience class for storing prop",
"or empty string. MASS_UNITS_PATTERN: str = r'(g|gram|grams|kg|kilogram|kilograms|mg|milligram|milligrams|ug|μg|microgram|micrograms)?' #: Pattern to",
"of the quantity involved, e.g. for volume, '0 mL'. enum",
"number, optionally followed by volume units. The prop limit is"
] |
[
"**kwargs): ... pass >>> bindcallargs(func, 5) ((5, 3), {}) >>>",
"= 5 # overwrite default value for b >>> func(*args,",
"= tuple(bargs) # Start with kwonlyargs. bkwargs = {kwonlyarg: callargs[kwonlyarg]",
"Start with kwonlyargs. bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in",
"# due to multiple values being passed for that argument",
"b, c, d ... >>> def override_c(*args, **kwargs): ... sig",
"`bargs` consists of the bound args, varargs, and kwonlyargs from",
"function. `bargs` consists of the bound args, varargs, and kwonlyargs",
"bindcallargs_32 for Python > 3.3. sig = inspect.signature(_fUnCtIoN_) ba =",
">>> bindcallargs(func, 5, 4, 3, 2, 1, hello='there') ((5, 4,",
"functionality of bindcallargs_32 for Python > 3.3. sig = inspect.signature(_fUnCtIoN_)",
">>> def func(a, b=3, *args, **kwargs): ... pass >>> bindcallargs(func,",
"specified function. Any default parameter values are included in the",
"varkw, defaults = getargspec(f) kwonlyargs = [] kwonlydefaults = None",
"4, 3, 2, 1), {'hello': 'there'}) >>> args, kwargs =",
"callargs = getcallargs(func, *args, **kwargs) spec = getfullargspec(func) # Construct",
"*ba.kwargs) ... >>> override_c(0, c=3) (0, 1, 10, 3) Also",
"defaults = getargspec(f) kwonlyargs = [] kwonlydefaults = None annotations",
"'__annotations__', {}) return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations)",
"tuple (bargs, bkwargs) suitable for manipulation and passing to the",
"getcallargs try: from inspect import getfullargspec except ImportError: # Python",
"parameter of some function: >>> import inspect >>> def func(a,",
"used in a call to the specified function. Any default",
"kwonlyargs, kwonlydefaults, annotations) def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs): \"\"\"Binds arguments and",
"getcallargs(func, *args, **kwargs) spec = getfullargspec(func) # Construct all args",
"bargs.extend(callargs[spec.varargs]) bargs = tuple(bargs) # Start with kwonlyargs. bkwargs =",
"\"\"\" Provides usable args and kwargs from inspect.getcallargs. For Python",
"annotations) def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs): \"\"\"Binds arguments and keyword arguments",
"inspect import getcallargs try: from inspect import getfullargspec except ImportError:",
"b >>> func(*args, **kwargs) \"\"\" # It is necessary to",
"= [callargs[arg] for arg in spec.args] if spec.varargs is not",
"allowed in python2, and we # don't support python 3.0,",
"bargs = tuple(bargs) # Start with kwonlyargs. bkwargs = {kwonlyarg:",
"... pass >>> bindcallargs(func, 5) ((5, 3), {}) >>> bindcallargs(func,",
"in the output. Examples -------- >>> def func(a, b=3, *args,",
"spec.kwonlydefaults is not None: # pragma: no cover bkwargs.update({k: v",
"Provides usable args and kwargs from inspect.getcallargs. For Python 3.3",
"FullArgSpec = namedtuple('FullArgSpec', 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')",
"kwonlyargs from getfullargspec. `bkwargs` consists of the bound varkw from",
"kwargs from inspect.getcallargs. For Python 3.3 and above, this module",
"values for param in sig.parameters.values(): if param.name not in ba.arguments:",
"from collections import namedtuple from inspect import getargspec FullArgSpec =",
"executed: if spec.kwonlydefaults is not None: # pragma: no cover",
"{}) return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) def",
"Add in varkw. if spec.varkw is not None: bkwargs.update(callargs[spec.varkw]) return",
"cover bkwargs.update({k: v for k, v in spec.kwonlydefaults.items() if k",
"... return a, b, c, d ... >>> def override_c(*args,",
">>> func(*args, **kwargs) \"\"\" # It is necessary to choose",
"# pragma: no cover bkwargs.update({k: v for k, v in",
"getfullargspec(func) # Construct all args and varargs and use them",
"of the bound varkw from getfullargspec. Both can be used",
"same name will cause a TypeError # due to multiple",
"tuple(bargs) # Start with kwonlyargs. bkwargs = {kwonlyarg: callargs[kwonlyarg] for",
"function. # The reason is that any kwarg by the",
"v for k, v in spec.kwonlydefaults.items() if k not in",
"PEP 362: http://www.python.org/dev/peps/pep-0362/ For example, to override a parameter of",
"varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') def getfullargspec(f): args, varargs,",
"bkwargs def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs): # Should match functionality of",
"values being passed for that argument name. func = _fUnCtIoN_",
"if sys.version_info[0:2] < (3,3): bindcallargs = bindcallargs_leq32 else: bindcallargs =",
"and we # don't support python 3.0, 3.1, 3.2, this",
"-------- >>> def func(a, b=3, *args, **kwargs): ... pass >>>",
"func(a, b=1, c=2, d=3): ... return a, b, c, d",
"v in spec.kwonlydefaults.items() if k not in bkwargs}) # Add",
"None annotations = getattr(f, '__annotations__', {}) return FullArgSpec(args, varargs, varkw,",
"and above, this module is unnecessary and can be achieved",
"a TypeError # due to multiple values being passed for",
"# Since keyword only arguements aren't allowed in python2, and",
"func = _fUnCtIoN_ callargs = getcallargs(func, *args, **kwargs) spec =",
"be used in a call to the specified function. Any",
"The reason is that any kwarg by the same name",
"... >>> def override_c(*args, **kwargs): ... sig = inspect.signature(override) ...",
"in ba.arguments: ba.arguments[param.name] = param.default return ba.args, ba.kwargs if sys.version_info[0:2]",
"varkw, defaults, kwonlyargs, kwonlydefaults, annotations) def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs): \"\"\"Binds",
"http://www.python.org/dev/peps/pep-3102/ \"\"\" import sys import inspect from inspect import getcallargs",
"we # don't support python 3.0, 3.1, 3.2, this should",
"included in the output. Examples -------- >>> def func(a, b=3,",
"'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') def getfullargspec(f): args,",
"ImportError: # Python 2.X from collections import namedtuple from inspect",
"def getfullargspec(f): args, varargs, varkw, defaults = getargspec(f) kwonlyargs =",
"above, this module is unnecessary and can be achieved using",
"of some function: >>> import inspect >>> def func(a, b=1,",
"in a call to the specified function. Any default parameter",
"python2, and we # don't support python 3.0, 3.1, 3.2,",
"pass >>> bindcallargs(func, 5) ((5, 3), {}) >>> bindcallargs(func, 5,",
"bindcallargs(func, 5, 4, 3, 2, 1, hello='there') ((5, 4, 3,",
"use them in bargs bargs = [callargs[arg] for arg in",
"_fUnCtIoN_ callargs = getcallargs(func, *args, **kwargs) spec = getfullargspec(func) #",
"*args, **kwargs): \"\"\"Binds arguments and keyword arguments to a function",
"passed for that argument name. func = _fUnCtIoN_ callargs =",
"kwonlydefaults for unspecified kwonlyargs only. # Since keyword only arguements",
"arguments to a function or method. Returns a tuple (bargs,",
"in spec.kwonlyargs} # Add in kwonlydefaults for unspecified kwonlyargs only.",
"try: from inspect import getfullargspec except ImportError: # Python 2.X",
"2, 1, hello='there') ((5, 4, 3, 2, 1), {'hello': 'there'})",
"arg in spec.args] if spec.varargs is not None: bargs.extend(callargs[spec.varargs]) bargs",
"from PEP 362: http://www.python.org/dev/peps/pep-0362/ For example, to override a parameter",
"**kwargs) ... ba['c'] = 10 ... return func(*ba.args, *ba.kwargs) ...",
"For example, to override a parameter of some function: >>>",
"> 3.3. sig = inspect.signature(_fUnCtIoN_) ba = sig.bind(*args, **kwargs) #",
"in all default values for param in sig.parameters.values(): if param.name",
"a function or method. Returns a tuple (bargs, bkwargs) suitable",
"... return func(*ba.args, *ba.kwargs) ... >>> override_c(0, c=3) (0, 1,",
"d=3): ... return a, b, c, d ... >>> def",
"inspect from inspect import getcallargs try: from inspect import getfullargspec",
"only. # Since keyword only arguements aren't allowed in python2,",
"or method. Returns a tuple (bargs, bkwargs) suitable for manipulation",
"= getfullargspec(func) # Construct all args and varargs and use",
"be executed: if spec.kwonlydefaults is not None: # pragma: no",
"suitable for manipulation and passing to the specified function. `bargs`",
"in sig.parameters.values(): if param.name not in ba.arguments: ba.arguments[param.name] = param.default",
"if spec.varargs is not None: bargs.extend(callargs[spec.varargs]) bargs = tuple(bargs) #",
"((5, 4, 3, 2, 1), {'hello': 'there'}) >>> args, kwargs",
"1, hello='there') ((5, 4, 3, 2, 1), {'hello': 'there'}) >>>",
"varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs):",
"output. Examples -------- >>> def func(a, b=3, *args, **kwargs): ...",
"2, 1), {'hello': 'there'}) >>> args, kwargs = bindcallargs(func, 5)",
"Python > 3.3. sig = inspect.signature(_fUnCtIoN_) ba = sig.bind(*args, **kwargs)",
"kwonlyargs = [] kwonlydefaults = None annotations = getattr(f, '__annotations__',",
"# Add in all default values for param in sig.parameters.values():",
"usable args and kwargs from inspect.getcallargs. For Python 3.3 and",
"10 ... return func(*ba.args, *ba.kwargs) ... >>> override_c(0, c=3) (0,",
"not None: bkwargs.update(callargs[spec.varkw]) return bargs, bkwargs def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs):",
"callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs} # Add in kwonlydefaults for",
"getfullargspec. Both can be used in a call to the",
"3.1, 3.2, this should never be executed: if spec.kwonlydefaults is",
"1), {'hello': 'there'}) >>> args, kwargs = bindcallargs(func, 5) >>>",
"namedtuple from inspect import getargspec FullArgSpec = namedtuple('FullArgSpec', 'args, varargs,",
"**kwargs): # Should match functionality of bindcallargs_32 for Python >",
"for that argument name. func = _fUnCtIoN_ callargs = getcallargs(func,",
"override_c(0, c=3) (0, 1, 10, 3) Also useful: http://www.python.org/dev/peps/pep-3102/ \"\"\"",
"varkw from getfullargspec. Both can be used in a call",
"http://www.python.org/dev/peps/pep-0362/ For example, to override a parameter of some function:",
"5) >>> kwargs['b'] = 5 # overwrite default value for",
"ba = sig.bind(*args, **kwargs) ... ba['c'] = 10 ... return",
"kwonlyargs, kwonlydefaults, annotations') def getfullargspec(f): args, varargs, varkw, defaults =",
"sys.version_info[0:2] < (3,3): bindcallargs = bindcallargs_leq32 else: bindcallargs = bindcallargs_geq33",
"defaults, kwonlyargs, kwonlydefaults, annotations') def getfullargspec(f): args, varargs, varkw, defaults",
"param.default return ba.args, ba.kwargs if sys.version_info[0:2] < (3,3): bindcallargs =",
"# Python 2.X from collections import namedtuple from inspect import",
"annotations') def getfullargspec(f): args, varargs, varkw, defaults = getargspec(f) kwonlyargs",
"and can be achieved using features from PEP 362: http://www.python.org/dev/peps/pep-0362/",
"bkwargs.update(callargs[spec.varkw]) return bargs, bkwargs def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs): # Should",
"with kwonlyargs. bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs}",
"default values for param in sig.parameters.values(): if param.name not in",
"if spec.varkw is not None: bkwargs.update(callargs[spec.varkw]) return bargs, bkwargs def",
"a parameter of some function: >>> import inspect >>> def",
"inspect import getfullargspec except ImportError: # Python 2.X from collections",
"except ImportError: # Python 2.X from collections import namedtuple from",
"bkwargs}) # Add in varkw. if spec.varkw is not None:",
"an unlikely variable name for the function. # The reason",
"(bargs, bkwargs) suitable for manipulation and passing to the specified",
"bindcallargs(func, 5) >>> kwargs['b'] = 5 # overwrite default value",
"param in sig.parameters.values(): if param.name not in ba.arguments: ba.arguments[param.name] =",
"namedtuple('FullArgSpec', 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') def getfullargspec(f):",
"ba.arguments[param.name] = param.default return ba.args, ba.kwargs if sys.version_info[0:2] < (3,3):",
"if param.name not in ba.arguments: ba.arguments[param.name] = param.default return ba.args,",
"= getcallargs(func, *args, **kwargs) spec = getfullargspec(func) # Construct all",
"is necessary to choose an unlikely variable name for the",
"b=1, c=2, d=3): ... return a, b, c, d ...",
"a tuple (bargs, bkwargs) suitable for manipulation and passing to",
"3), {}) >>> bindcallargs(func, 5, 4, 3, 2, 1, hello='there')",
"for kwonlyarg in spec.kwonlyargs} # Add in kwonlydefaults for unspecified",
"\"\"\"Binds arguments and keyword arguments to a function or method.",
"5 # overwrite default value for b >>> func(*args, **kwargs)",
"... ba = sig.bind(*args, **kwargs) ... ba['c'] = 10 ...",
"not None: bargs.extend(callargs[spec.varargs]) bargs = tuple(bargs) # Start with kwonlyargs.",
"consists of the bound args, varargs, and kwonlyargs from getfullargspec.",
"the bound args, varargs, and kwonlyargs from getfullargspec. `bkwargs` consists",
"this should never be executed: if spec.kwonlydefaults is not None:",
"= [] kwonlydefaults = None annotations = getattr(f, '__annotations__', {})",
"the output. Examples -------- >>> def func(a, b=3, *args, **kwargs):",
"keyword arguments to a function or method. Returns a tuple",
"= namedtuple('FullArgSpec', 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') def",
"[] kwonlydefaults = None annotations = getattr(f, '__annotations__', {}) return",
"def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs): \"\"\"Binds arguments and keyword arguments to",
"is not None: bkwargs.update(callargs[spec.varkw]) return bargs, bkwargs def bindcallargs_geq33(_fUnCtIoN_, *args,",
"varargs, and kwonlyargs from getfullargspec. `bkwargs` consists of the bound",
"for manipulation and passing to the specified function. `bargs` consists",
"for the function. # The reason is that any kwarg",
"**kwargs): \"\"\"Binds arguments and keyword arguments to a function or",
"for param in sig.parameters.values(): if param.name not in ba.arguments: ba.arguments[param.name]",
"and passing to the specified function. `bargs` consists of the",
"ba.kwargs if sys.version_info[0:2] < (3,3): bindcallargs = bindcallargs_leq32 else: bindcallargs",
"= sig.bind(*args, **kwargs) ... ba['c'] = 10 ... return func(*ba.args,",
"For Python 3.3 and above, this module is unnecessary and",
"of the bound args, varargs, and kwonlyargs from getfullargspec. `bkwargs`",
"some function: >>> import inspect >>> def func(a, b=1, c=2,",
"# Should match functionality of bindcallargs_32 for Python > 3.3.",
"*args, **kwargs): ... pass >>> bindcallargs(func, 5) ((5, 3), {})",
"spec.kwonlydefaults.items() if k not in bkwargs}) # Add in varkw.",
"*args, **kwargs): # Should match functionality of bindcallargs_32 for Python",
"from getfullargspec. `bkwargs` consists of the bound varkw from getfullargspec.",
"in spec.kwonlydefaults.items() if k not in bkwargs}) # Add in",
"cause a TypeError # due to multiple values being passed",
"getattr(f, '__annotations__', {}) return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,",
"useful: http://www.python.org/dev/peps/pep-3102/ \"\"\" import sys import inspect from inspect import",
"bkwargs) suitable for manipulation and passing to the specified function.",
"and varargs and use them in bargs bargs = [callargs[arg]",
"5, 4, 3, 2, 1, hello='there') ((5, 4, 3, 2,",
"unnecessary and can be achieved using features from PEP 362:",
"variable name for the function. # The reason is that",
"3.2, this should never be executed: if spec.kwonlydefaults is not",
"bkwargs.update({k: v for k, v in spec.kwonlydefaults.items() if k not",
"should never be executed: if spec.kwonlydefaults is not None: #",
"if spec.kwonlydefaults is not None: # pragma: no cover bkwargs.update({k:",
"the specified function. `bargs` consists of the bound args, varargs,",
"import getfullargspec except ImportError: # Python 2.X from collections import",
"call to the specified function. Any default parameter values are",
"= param.default return ba.args, ba.kwargs if sys.version_info[0:2] < (3,3): bindcallargs",
"Also useful: http://www.python.org/dev/peps/pep-3102/ \"\"\" import sys import inspect from inspect",
"*args, **kwargs) spec = getfullargspec(func) # Construct all args and",
"getargspec FullArgSpec = namedtuple('FullArgSpec', 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults,",
"args and varargs and use them in bargs bargs =",
"bargs, bkwargs def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs): # Should match functionality",
"the specified function. Any default parameter values are included in",
"kwargs['b'] = 5 # overwrite default value for b >>>",
"(0, 1, 10, 3) Also useful: http://www.python.org/dev/peps/pep-3102/ \"\"\" import sys",
"5) ((5, 3), {}) >>> bindcallargs(func, 5, 4, 3, 2,",
"to the specified function. Any default parameter values are included",
"name. func = _fUnCtIoN_ callargs = getcallargs(func, *args, **kwargs) spec",
"from inspect.getcallargs. For Python 3.3 and above, this module is",
"be achieved using features from PEP 362: http://www.python.org/dev/peps/pep-0362/ For example,",
"all default values for param in sig.parameters.values(): if param.name not",
">>> import inspect >>> def func(a, b=1, c=2, d=3): ...",
"spec.varkw is not None: bkwargs.update(callargs[spec.varkw]) return bargs, bkwargs def bindcallargs_geq33(_fUnCtIoN_,",
"... >>> override_c(0, c=3) (0, 1, 10, 3) Also useful:",
"to the specified function. `bargs` consists of the bound args,",
"python 3.0, 3.1, 3.2, this should never be executed: if",
"not in bkwargs}) # Add in varkw. if spec.varkw is",
"sig = inspect.signature(_fUnCtIoN_) ba = sig.bind(*args, **kwargs) # Add in",
"= None annotations = getattr(f, '__annotations__', {}) return FullArgSpec(args, varargs,",
"can be achieved using features from PEP 362: http://www.python.org/dev/peps/pep-0362/ For",
"args, kwargs = bindcallargs(func, 5) >>> kwargs['b'] = 5 #",
"and kwargs from inspect.getcallargs. For Python 3.3 and above, this",
"c=2, d=3): ... return a, b, c, d ... >>>",
"return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) def bindcallargs_leq32(_fUnCtIoN_,",
"Add in all default values for param in sig.parameters.values(): if",
"3.3 and above, this module is unnecessary and can be",
"3.0, 3.1, 3.2, this should never be executed: if spec.kwonlydefaults",
"sys import inspect from inspect import getcallargs try: from inspect",
"sig = inspect.signature(override) ... ba = sig.bind(*args, **kwargs) ... ba['c']",
"10, 3) Also useful: http://www.python.org/dev/peps/pep-3102/ \"\"\" import sys import inspect",
"in varkw. if spec.varkw is not None: bkwargs.update(callargs[spec.varkw]) return bargs,",
"don't support python 3.0, 3.1, 3.2, this should never be",
"values are included in the output. Examples -------- >>> def",
"= bindcallargs(func, 5) >>> kwargs['b'] = 5 # overwrite default",
"is that any kwarg by the same name will cause",
"that any kwarg by the same name will cause a",
"sig.bind(*args, **kwargs) # Add in all default values for param",
"inspect.signature(_fUnCtIoN_) ba = sig.bind(*args, **kwargs) # Add in all default",
"from inspect import getargspec FullArgSpec = namedtuple('FullArgSpec', 'args, varargs, varkw,",
"this module is unnecessary and can be achieved using features",
"return bargs, bkwargs def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs): # Should match",
"example, to override a parameter of some function: >>> import",
"bound args, varargs, and kwonlyargs from getfullargspec. `bkwargs` consists of",
"{'hello': 'there'}) >>> args, kwargs = bindcallargs(func, 5) >>> kwargs['b']",
"are included in the output. Examples -------- >>> def func(a,",
"def func(a, b=1, c=2, d=3): ... return a, b, c,",
"`bkwargs` consists of the bound varkw from getfullargspec. Both can",
"None: # pragma: no cover bkwargs.update({k: v for k, v",
"will cause a TypeError # due to multiple values being",
"param.name not in ba.arguments: ba.arguments[param.name] = param.default return ba.args, ba.kwargs",
"arguments and keyword arguments to a function or method. Returns",
"kwonlydefaults, annotations') def getfullargspec(f): args, varargs, varkw, defaults = getargspec(f)",
"kwargs = bindcallargs(func, 5) >>> kwargs['b'] = 5 # overwrite",
"bound varkw from getfullargspec. Both can be used in a",
"all args and varargs and use them in bargs bargs",
"bindcallargs(func, 5) ((5, 3), {}) >>> bindcallargs(func, 5, 4, 3,",
"and keyword arguments to a function or method. Returns a",
"features from PEP 362: http://www.python.org/dev/peps/pep-0362/ For example, to override a",
"ba = sig.bind(*args, **kwargs) # Add in all default values",
"Examples -------- >>> def func(a, b=3, *args, **kwargs): ... pass",
"# It is necessary to choose an unlikely variable name",
"is not None: # pragma: no cover bkwargs.update({k: v for",
"1, 10, 3) Also useful: http://www.python.org/dev/peps/pep-3102/ \"\"\" import sys import",
"Python 3.3 and above, this module is unnecessary and can",
"not None: # pragma: no cover bkwargs.update({k: v for k,",
"for Python > 3.3. sig = inspect.signature(_fUnCtIoN_) ba = sig.bind(*args,",
"annotations = getattr(f, '__annotations__', {}) return FullArgSpec(args, varargs, varkw, defaults,",
"2.X from collections import namedtuple from inspect import getargspec FullArgSpec",
"function or method. Returns a tuple (bargs, bkwargs) suitable for",
"import sys import inspect from inspect import getcallargs try: from",
"kwonlydefaults, annotations) def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs): \"\"\"Binds arguments and keyword",
"TypeError # due to multiple values being passed for that",
"Python 2.X from collections import namedtuple from inspect import getargspec",
"passing to the specified function. `bargs` consists of the bound",
"3, 2, 1), {'hello': 'there'}) >>> args, kwargs = bindcallargs(func,",
"def override_c(*args, **kwargs): ... sig = inspect.signature(override) ... ba =",
">>> def func(a, b=1, c=2, d=3): ... return a, b,",
"in bargs bargs = [callargs[arg] for arg in spec.args] if",
"bargs bargs = [callargs[arg] for arg in spec.args] if spec.varargs",
"\"\"\" import sys import inspect from inspect import getcallargs try:",
"sig.bind(*args, **kwargs) ... ba['c'] = 10 ... return func(*ba.args, *ba.kwargs)",
"import namedtuple from inspect import getargspec FullArgSpec = namedtuple('FullArgSpec', 'args,",
"= getargspec(f) kwonlyargs = [] kwonlydefaults = None annotations =",
"to override a parameter of some function: >>> import inspect",
"Both can be used in a call to the specified",
"parameter values are included in the output. Examples -------- >>>",
"getfullargspec except ImportError: # Python 2.X from collections import namedtuple",
"{}) >>> bindcallargs(func, 5, 4, 3, 2, 1, hello='there') ((5,",
"from getfullargspec. Both can be used in a call to",
"**kwargs) # Add in all default values for param in",
"unspecified kwonlyargs only. # Since keyword only arguements aren't allowed",
"Should match functionality of bindcallargs_32 for Python > 3.3. sig",
"b=3, *args, **kwargs): ... pass >>> bindcallargs(func, 5) ((5, 3),",
"override a parameter of some function: >>> import inspect >>>",
"bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs): \"\"\"Binds arguments and keyword arguments to a",
"by the same name will cause a TypeError # due",
"and use them in bargs bargs = [callargs[arg] for arg",
"using features from PEP 362: http://www.python.org/dev/peps/pep-0362/ For example, to override",
"kwarg by the same name will cause a TypeError #",
"to a function or method. Returns a tuple (bargs, bkwargs)",
">>> override_c(0, c=3) (0, 1, 10, 3) Also useful: http://www.python.org/dev/peps/pep-3102/",
"not in ba.arguments: ba.arguments[param.name] = param.default return ba.args, ba.kwargs if",
"ba.args, ba.kwargs if sys.version_info[0:2] < (3,3): bindcallargs = bindcallargs_leq32 else:",
"# The reason is that any kwarg by the same",
"Construct all args and varargs and use them in bargs",
"return a, b, c, d ... >>> def override_c(*args, **kwargs):",
"of bindcallargs_32 for Python > 3.3. sig = inspect.signature(_fUnCtIoN_) ba",
"getargspec(f) kwonlyargs = [] kwonlydefaults = None annotations = getattr(f,",
"Returns a tuple (bargs, bkwargs) suitable for manipulation and passing",
"arguements aren't allowed in python2, and we # don't support",
"match functionality of bindcallargs_32 for Python > 3.3. sig =",
"bargs = [callargs[arg] for arg in spec.args] if spec.varargs is",
"[callargs[arg] for arg in spec.args] if spec.varargs is not None:",
"c=3) (0, 1, 10, 3) Also useful: http://www.python.org/dev/peps/pep-3102/ \"\"\" import",
"d ... >>> def override_c(*args, **kwargs): ... sig = inspect.signature(override)",
"value for b >>> func(*args, **kwargs) \"\"\" # It is",
"= 10 ... return func(*ba.args, *ba.kwargs) ... >>> override_c(0, c=3)",
"k not in bkwargs}) # Add in varkw. if spec.varkw",
"in bkwargs}) # Add in varkw. if spec.varkw is not",
"3.3. sig = inspect.signature(_fUnCtIoN_) ba = sig.bind(*args, **kwargs) # Add",
"override_c(*args, **kwargs): ... sig = inspect.signature(override) ... ba = sig.bind(*args,",
"can be used in a call to the specified function.",
"from inspect import getfullargspec except ImportError: # Python 2.X from",
"**kwargs): ... sig = inspect.signature(override) ... ba = sig.bind(*args, **kwargs)",
"= getattr(f, '__annotations__', {}) return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,",
"hello='there') ((5, 4, 3, 2, 1), {'hello': 'there'}) >>> args,",
"in python2, and we # don't support python 3.0, 3.1,",
"to choose an unlikely variable name for the function. #",
"if k not in bkwargs}) # Add in varkw. if",
"the function. # The reason is that any kwarg by",
"import inspect from inspect import getcallargs try: from inspect import",
"and kwonlyargs from getfullargspec. `bkwargs` consists of the bound varkw",
"4, 3, 2, 1, hello='there') ((5, 4, 3, 2, 1),",
"defaults, kwonlyargs, kwonlydefaults, annotations) def bindcallargs_leq32(_fUnCtIoN_, *args, **kwargs): \"\"\"Binds arguments",
"from inspect import getcallargs try: from inspect import getfullargspec except",
"them in bargs bargs = [callargs[arg] for arg in spec.args]",
"reason is that any kwarg by the same name will",
"varkw, defaults, kwonlyargs, kwonlydefaults, annotations') def getfullargspec(f): args, varargs, varkw,",
"unlikely variable name for the function. # The reason is",
"ba['c'] = 10 ... return func(*ba.args, *ba.kwargs) ... >>> override_c(0,",
"spec.varargs is not None: bargs.extend(callargs[spec.varargs]) bargs = tuple(bargs) # Start",
"'there'}) >>> args, kwargs = bindcallargs(func, 5) >>> kwargs['b'] =",
"sig.parameters.values(): if param.name not in ba.arguments: ba.arguments[param.name] = param.default return",
"bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs} # Add",
"ba.arguments: ba.arguments[param.name] = param.default return ba.args, ba.kwargs if sys.version_info[0:2] <",
"Add in kwonlydefaults for unspecified kwonlyargs only. # Since keyword",
"= {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs} # Add in",
"kwonlyarg in spec.kwonlyargs} # Add in kwonlydefaults for unspecified kwonlyargs",
"name will cause a TypeError # due to multiple values",
"((5, 3), {}) >>> bindcallargs(func, 5, 4, 3, 2, 1,",
"kwonlydefaults = None annotations = getattr(f, '__annotations__', {}) return FullArgSpec(args,",
"in spec.args] if spec.varargs is not None: bargs.extend(callargs[spec.varargs]) bargs =",
"spec.args] if spec.varargs is not None: bargs.extend(callargs[spec.varargs]) bargs = tuple(bargs)",
"import inspect >>> def func(a, b=1, c=2, d=3): ... return",
"# don't support python 3.0, 3.1, 3.2, this should never",
"# Add in varkw. if spec.varkw is not None: bkwargs.update(callargs[spec.varkw])",
"specified function. `bargs` consists of the bound args, varargs, and",
"the bound varkw from getfullargspec. Both can be used in",
"keyword only arguements aren't allowed in python2, and we #",
"any kwarg by the same name will cause a TypeError",
"It is necessary to choose an unlikely variable name for",
"name for the function. # The reason is that any",
"def func(a, b=3, *args, **kwargs): ... pass >>> bindcallargs(func, 5)",
"for arg in spec.args] if spec.varargs is not None: bargs.extend(callargs[spec.varargs])",
"a call to the specified function. Any default parameter values",
"function: >>> import inspect >>> def func(a, b=1, c=2, d=3):",
"... sig = inspect.signature(override) ... ba = sig.bind(*args, **kwargs) ...",
"module is unnecessary and can be achieved using features from",
"bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs): # Should match functionality of bindcallargs_32 for",
"varargs and use them in bargs bargs = [callargs[arg] for",
"kwonlyargs only. # Since keyword only arguements aren't allowed in",
"for k, v in spec.kwonlydefaults.items() if k not in bkwargs})",
"args, varargs, varkw, defaults = getargspec(f) kwonlyargs = [] kwonlydefaults",
"inspect.signature(override) ... ba = sig.bind(*args, **kwargs) ... ba['c'] = 10",
"Since keyword only arguements aren't allowed in python2, and we",
"3, 2, 1, hello='there') ((5, 4, 3, 2, 1), {'hello':",
"= _fUnCtIoN_ callargs = getcallargs(func, *args, **kwargs) spec = getfullargspec(func)",
"to multiple values being passed for that argument name. func",
"kwonlyargs. bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs} #",
"is not None: bargs.extend(callargs[spec.varargs]) bargs = tuple(bargs) # Start with",
"func(*ba.args, *ba.kwargs) ... >>> override_c(0, c=3) (0, 1, 10, 3)",
"method. Returns a tuple (bargs, bkwargs) suitable for manipulation and",
"due to multiple values being passed for that argument name.",
"c, d ... >>> def override_c(*args, **kwargs): ... sig =",
"# Add in kwonlydefaults for unspecified kwonlyargs only. # Since",
"choose an unlikely variable name for the function. # The",
"getfullargspec. `bkwargs` consists of the bound varkw from getfullargspec. Both",
"spec = getfullargspec(func) # Construct all args and varargs and",
"... ba['c'] = 10 ... return func(*ba.args, *ba.kwargs) ... >>>",
"# Start with kwonlyargs. bkwargs = {kwonlyarg: callargs[kwonlyarg] for kwonlyarg",
"**kwargs) spec = getfullargspec(func) # Construct all args and varargs",
"# overwrite default value for b >>> func(*args, **kwargs) \"\"\"",
"Any default parameter values are included in the output. Examples",
"3) Also useful: http://www.python.org/dev/peps/pep-3102/ \"\"\" import sys import inspect from",
"# Construct all args and varargs and use them in",
"in kwonlydefaults for unspecified kwonlyargs only. # Since keyword only",
"support python 3.0, 3.1, 3.2, this should never be executed:",
"{kwonlyarg: callargs[kwonlyarg] for kwonlyarg in spec.kwonlyargs} # Add in kwonlydefaults",
"no cover bkwargs.update({k: v for k, v in spec.kwonlydefaults.items() if",
"function. Any default parameter values are included in the output.",
"\"\"\" # It is necessary to choose an unlikely variable",
"args, varargs, and kwonlyargs from getfullargspec. `bkwargs` consists of the",
"that argument name. func = _fUnCtIoN_ callargs = getcallargs(func, *args,",
">>> kwargs['b'] = 5 # overwrite default value for b",
"consists of the bound varkw from getfullargspec. Both can be",
"for unspecified kwonlyargs only. # Since keyword only arguements aren't",
"args and kwargs from inspect.getcallargs. For Python 3.3 and above,",
"**kwargs) \"\"\" # It is necessary to choose an unlikely",
"k, v in spec.kwonlydefaults.items() if k not in bkwargs}) #",
"necessary to choose an unlikely variable name for the function.",
"None: bargs.extend(callargs[spec.varargs]) bargs = tuple(bargs) # Start with kwonlyargs. bkwargs",
"varargs, varkw, defaults = getargspec(f) kwonlyargs = [] kwonlydefaults =",
"multiple values being passed for that argument name. func =",
"collections import namedtuple from inspect import getargspec FullArgSpec = namedtuple('FullArgSpec',",
"import getcallargs try: from inspect import getfullargspec except ImportError: #",
"inspect import getargspec FullArgSpec = namedtuple('FullArgSpec', 'args, varargs, varkw, defaults,",
"manipulation and passing to the specified function. `bargs` consists of",
"argument name. func = _fUnCtIoN_ callargs = getcallargs(func, *args, **kwargs)",
"varkw. if spec.varkw is not None: bkwargs.update(callargs[spec.varkw]) return bargs, bkwargs",
"overwrite default value for b >>> func(*args, **kwargs) \"\"\" #",
"return func(*ba.args, *ba.kwargs) ... >>> override_c(0, c=3) (0, 1, 10,",
"achieved using features from PEP 362: http://www.python.org/dev/peps/pep-0362/ For example, to",
"362: http://www.python.org/dev/peps/pep-0362/ For example, to override a parameter of some",
"getfullargspec(f): args, varargs, varkw, defaults = getargspec(f) kwonlyargs = []",
"= sig.bind(*args, **kwargs) # Add in all default values for",
"inspect.getcallargs. For Python 3.3 and above, this module is unnecessary",
">>> def override_c(*args, **kwargs): ... sig = inspect.signature(override) ... ba",
"= inspect.signature(_fUnCtIoN_) ba = sig.bind(*args, **kwargs) # Add in all",
"import getargspec FullArgSpec = namedtuple('FullArgSpec', 'args, varargs, varkw, defaults, kwonlyargs,",
"aren't allowed in python2, and we # don't support python",
"is unnecessary and can be achieved using features from PEP",
">>> bindcallargs(func, 5) ((5, 3), {}) >>> bindcallargs(func, 5, 4,",
"a, b, c, d ... >>> def override_c(*args, **kwargs): ...",
"func(a, b=3, *args, **kwargs): ... pass >>> bindcallargs(func, 5) ((5,",
"return ba.args, ba.kwargs if sys.version_info[0:2] < (3,3): bindcallargs = bindcallargs_leq32",
"being passed for that argument name. func = _fUnCtIoN_ callargs",
"inspect >>> def func(a, b=1, c=2, d=3): ... return a,",
"default parameter values are included in the output. Examples --------",
"default value for b >>> func(*args, **kwargs) \"\"\" # It",
"func(*args, **kwargs) \"\"\" # It is necessary to choose an",
"the same name will cause a TypeError # due to",
"only arguements aren't allowed in python2, and we # don't",
"spec.kwonlyargs} # Add in kwonlydefaults for unspecified kwonlyargs only. #",
"pragma: no cover bkwargs.update({k: v for k, v in spec.kwonlydefaults.items()",
"def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs): # Should match functionality of bindcallargs_32",
"for b >>> func(*args, **kwargs) \"\"\" # It is necessary",
">>> args, kwargs = bindcallargs(func, 5) >>> kwargs['b'] = 5",
"never be executed: if spec.kwonlydefaults is not None: # pragma:",
"FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) def bindcallargs_leq32(_fUnCtIoN_, *args,",
"= inspect.signature(override) ... ba = sig.bind(*args, **kwargs) ... ba['c'] =",
"None: bkwargs.update(callargs[spec.varkw]) return bargs, bkwargs def bindcallargs_geq33(_fUnCtIoN_, *args, **kwargs): #"
] |
[
"GaiaApps.getRunningApps()\") def switch_to_frame(self, app_frame, url=None, timeout=30): self.marionette.switch_to_frame(app_frame) start = time.time()",
"launch(self, name, switch_to_frame=True, url=None, launch_timeout=None): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" %",
"float(timeout) + time.time() while time.time() < timeout: time.sleep(0.5) try: self.marionette.find_element(by,",
"value: return value except (NoSuchElementException, StaleElementException): pass time.sleep(0.5) else: raise",
"# Switch off keyboard FTU screen self.data_layer.set_setting(\"keyboard.ftu.enabled\", False) # restore",
"Dev\", \"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp \", } if not self.apps.is_app_installed(mk['name']): # install",
"def switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self, app_name): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\"",
"True) def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False) @property def is_wifi_enabled(self): return self.marionette.execute_script(\"return",
"now process checkpoint data into .json output self.process_checkpoint_data() def checkpoint(self):",
"def media_files(self): result = [] result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files) return result",
"in files if filename.endswith(extension)] return files def send_sms(self, number, message):",
"url=None, launch_timeout=None): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" % name, script_timeout=launch_timeout) assert",
"@property def is_online(self): # Returns true if the device has",
"filename.endswith(extension)] return files def send_sms(self, number, message): import json number",
"active_telephony_state(self): # Returns the state of only the currently active",
"the average b2g_rss total = 0 for b2g_mem_value in b2g_rss_list:",
"enable WiFi' def disable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\", special_powers=True)",
"# This is an effortless way to give extra debugging",
"def set_volume(self, value): channels = ['alarm', 'content', 'notification'] for channel",
"integer pref, which is different from a Gaia setting.\"\"\" return",
"+ 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection !== undefined') @property def",
"self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined') return self._has_wifi def push_file(self,",
"self.marionette.find_element(by, locator) except NoSuchElementException: pass else: raise TimeoutException( 'Element %s",
"self.start_b2g() def start_b2g(self): if self.marionette.instance: # launch the gecko instance",
"locator_part_two = '#cards-view li.card[data-origin*=\"%s\"] .close-card' % self.app_under_test.lower() _close_button_locator = ('css",
"self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" % date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def all_contacts(self): self.marionette.switch_to_frame() return",
"Now drive the actual test case iterations for count in",
"sim_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True) def insert_contact(self, contact): self.marionette.switch_to_frame()",
"} if not self.apps.is_app_installed(mk['name']): # install the marketplace dev app",
"False) # Set do not track pref back to the",
"from marionette.errors import StaleElementException from marionette.errors import InvalidResponseException import mozdevice",
"if not self.device.is_online: if self.testvars.get('wifi') and self.device.has_wifi: self.data_layer.connect_to_wifi() assert self.device.is_online",
"TimeoutException( 'Element %s still present after timeout' % locator) def",
"at http://mozilla.org/MPL/2.0/. import json import os import sys import time",
"to give the device some idle time\" % idle_time) time.sleep(idle_time)",
"to lock screen' def unlock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert",
"established (cell data, wifi, etc) return self.marionette.execute_script('return window.navigator.onLine;') @property def",
"timeout = float(timeout) + time.time() e = None while time.time()",
"special_powers=True) assert result, 'Unable to insert contact %s' % contact",
"disable carrier data connection if self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if self.device.has_wifi:",
"app.frame_id is None: raise Exception(\"App failed to launch; there is",
"% dm_type) return self._manager @property def is_android_build(self): if self.testvars.get('is_android_build') is",
"in self.data_layer.media_files: # filename is a fully qualified path self.device.manager.removeFile(filename)",
"def screen_width(self): return self.marionette.execute_script('return window.screen.width') @property def screen_orientation(self): return self.marionette.execute_script('return",
".close-card' % self.app_under_test.lower() _close_button_locator = ('css selector', locator_part_two) close_card_app_button =",
"assert network, 'No WiFi network provided' self.enable_wifi() self.marionette.switch_to_frame() result =",
"if (arguments[0] === arguments[1]) { marionetteScriptFinished(); } else { var",
"checkpoint data into .json output self.process_checkpoint_data() def checkpoint(self): # Console",
"self.marionette.wait_for_port() self.marionette.start_session() if self.is_android_build: self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend', function loaded(aEvent) { if",
"Keyboard self.keyboard = Keyboard(self.marionette) self.cleanUp() def cleanUp(self): # remove media",
"return self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\") def enable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\",",
"raise TimeoutException(message) def is_element_present(self, by, locator): try: self.marionette.find_element(by, locator) return",
"&& ' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection !== undefined')",
"filename in self.data_layer.media_files: # filename is a fully qualified path",
"*args, **kwargs) def setUp(self): try: MarionetteTestCase.setUp(self) except InvalidResponseException: if self.restart:",
"% self.app_under_test.lower() _close_button_locator = ('css selector', locator_part_two) close_card_app_button = self.marionette.find_element(*_close_button_locator)",
"disable_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\", special_powers=True) assert result, 'Unable",
"by using the home button self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # Bring",
"before timeout' % locator) def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout): timeout",
"def bluetooth_is_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\") @property def is_cell_data_enabled(self): return self.get_setting('ril.data.enabled')",
"%d seconds to give the device some idle time\" %",
"file if it doesn't exist already if self.iteration in (0,",
"seconds to give the device some idle time\" % idle_time)",
"seconds for the wait_for methods _default_timeout = 30 def __init__(self,",
"= \"%s/checkpoint_%s_%s.log\" % (self.checkpoint_path, self.test_method.__name__, self.cur_time) with open(self.log_name, 'a') as",
"GaiaDataLayer.getAllContacts();', special_powers=True) @property def sim_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True)",
"on while test is running if self.iteration == 1: print",
"summary_file.write('app_under_test: %s\\n' % self.app_under_test.lower()) summary_file.write('total_iterations: %d\\n' % self.iterations) summary_file.write('checkpoint_interval: %d\\n'",
"clean state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() # the emulator can be",
"self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" % app_name) def uninstall(self, name): self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" % name)",
"console so can see what iteration we're on while test",
"result.extend(self.picture_files) result.extend(self.video_files) return result def delete_all_sms(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\",",
"not present before timeout' % locator) else: raise TimeoutException('Element %s",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\", special_powers=True) assert result, 'Unable to",
"= json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\" % (datatype, name, value), special_powers=True) def",
"%d\" % (self.test_method.__name__, count, self.iterations)) # Print to console so",
"mobileConnection = window.navigator.mozMobileConnection || ' + 'window.navigator.mozMobileConnections && ' +",
"self._manager @property def is_android_build(self): if self.testvars.get('is_android_build') is None: self.testvars['is_android_build'] =",
"'return GaiaDataLayer.getAllPictures();') @property def video_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();') def",
"test, app): self.test_method = test self.app_under_test = app # Now",
"special_powers=True) assert result, 'Unable to disable WiFi' def connect_to_wifi(self, network=None):",
"\"\"\"Sets the value of a Gecko boolean pref, which is",
"count, '/'.join(['sdcard', destination])) def resource(self, filename): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename))",
"Close the summary file summary_file.close() # Write to suite summary",
"[] result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files) return result def delete_all_sms(self): self.marionette.switch_to_frame() return",
"setting.\"\"\" return self._get_pref('Char', name) def set_char_pref(self, name, value): \"\"\"Sets the",
"GaiaData(self.marionette, self.testvars) from gaiatest.apps.keyboard.app import Keyboard self.keyboard = Keyboard(self.marionette) self.cleanUp()",
"If a copy of the MPL was not distributed with",
"channels: self.set_setting('audio.volume.%s' % channel, value) def bluetooth_enable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return",
"< timeout: time.sleep(0.5) try: self.marionette.find_element(by, locator) except NoSuchElementException: break else:",
"**kwargs) def drive(self, test, app): self.test_method = test self.app_under_test =",
"=== expected; } ); }; console.log(\"Changing orientation to '\" +",
"up the cards view _cards_view_locator = ('id', 'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\")",
"@property def displayed_app(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'),",
"self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self, app_name): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" % app_name) def",
"def set_setting(self, name, value): import json value = json.dumps(value) result",
"device_name) def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self, device_name): result",
"'return mobileConnection !== undefined') @property def has_wifi(self): if not hasattr(self,",
"(0, self.checkpoint_interval): self.checkpoint_path = \"checkpoints\" if not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755)",
"print \"Checkpoint...\" sys.stdout.flush() # Sleep to give device idle time",
"restore settings from testvars [self.data_layer.set_setting(name, value) for name, value in",
"# disable passcode before restore settings from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111')",
"if value: return value except (NoSuchElementException, StaleElementException): pass time.sleep(0.5) else:",
"self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result, 'Unable",
"'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator) # Sleep a bit time.sleep(5) #",
"self.marionette.switch_to_frame(app_frame) start = time.time() if not url: def check(now): return",
"name) def set_bool_pref(self, name, value): \"\"\"Sets the value of a",
"% (self.test_method.__name__, count, self.iterations)) # Print to console so can",
"name=result.get('name'), origin=result.get('origin')) if app.frame_id is None: raise Exception(\"App failed to",
"window.navigator.mozFMRadio.antennaAvailable') @property def is_fm_radio_enabled(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property def fm_radio_frequency(self):",
"def __init__(self, *args, **kwargs): self.restart = kwargs.pop('restart', False) kwargs.pop('iterations', None)",
"setting.\"\"\" return self._set_pref('Bool', name, value) def get_int_pref(self, name): \"\"\"Returns the",
"return files def send_sms(self, number, message): import json number =",
"% self.test_method.__name__) summary_file.write('completed: %s\\n' % self.cur_time) summary_file.write('app_under_test: %s\\n' % self.app_under_test.lower())",
"recipient %s with text %s' % (number, message) class GaiaDevice(object):",
"i in range(1, count + 1): remote_copy = '_%s.'.join(iter(destination.split('.'))) %",
"= json.dumps(message) result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message),",
"result = self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\", special_powers=True) assert result, 'Unable to enable",
"is_locked(self): self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.lock()')",
"% locator) def wait_for_element_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout)",
"{} @property def manager(self): if hasattr(self, '_manager') and self._manager: return",
"a Gaia setting.\"\"\" return self._set_pref('Bool', name, value) def get_int_pref(self, name):",
"return False def tearDown(self): self.lockscreen = None self.apps = None",
"from a Gaia setting.\"\"\" return self._set_pref('Bool', name, value) def get_int_pref(self,",
"sys.stdout.flush() self.test_method() # Checkpoint time? if ((count % self.checkpoint_interval) ==",
"self.iteration, self.iterations)) log_file.write('%s\\n' % output_str) def close_app(self): # Close the",
"assert result, 'Unable to unlock screen' class GaiaApp(object): def __init__(self,",
"(self.test_method.__name__, count, self.iterations)) # Print to console so can see",
"the gecko instance attached to marionette self.marionette.instance.start() elif self.is_android_build: self.manager.shellCheckOutput(['start',",
"'content', 'notification'] for channel in channels: self.set_setting('audio.volume.%s' % channel, value)",
"completely self.data_layer.set_volume(0) def install_marketplace(self): _yes_button_locator = (By.ID, 'app-install-install-button') mk =",
"contact %s' % contact def remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000",
"Event('home'));\") # Bring up the cards view _cards_view_locator = ('id',",
"runningApps(self): return self.marionette.execute_script(\"return GaiaApps.getRunningApps()\") def switch_to_frame(self, app_frame, url=None, timeout=30): self.marionette.switch_to_frame(app_frame)",
"some memory status info self.marionette.log(\"checkpoint\") self.cur_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) #",
"class GaiaApp(object): def __init__(self, origin=None, name=None, frame=None, src=None): self.frame =",
"summary_file = open(summary_name, 'w') # Write the summarized checkpoint data",
"while introducing multi-sim APIs return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection ||",
"is_fm_radio_enabled(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property def fm_radio_frequency(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency')",
"def connect_to_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\", special_powers=True) assert result,",
"_yes_button_locator = (By.ID, 'app-install-install-button') mk = {\"name\": \"Marketplace Dev\", \"manifest\":",
"if not destination.count('.') > 0: destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination)",
"orientation), landscape-primary, portrait-secondary and landscape-secondary \"\"\" self.marionette.execute_async_script(\"\"\" if (arguments[0] ===",
"connect to cell data' def disable_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return",
"happening if watching console print \"Checkpoint...\" sys.stdout.flush() # Sleep to",
"time.sleep(idle_time) # Dump out some memory status info self.marionette.log(\"checkpoint\") self.cur_time",
"window.screen.mozOrientation === expected; } ); }; console.log(\"Changing orientation to '\"",
"information if isinstance(e, NoSuchElementException): raise TimeoutException('Element %s not present before",
"1): self.iteration = count self.marionette.log(\"%s iteration %d of %d\" %",
"call log needs to be open and focused in order",
"self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result, 'Unable to unlock screen' class GaiaApp(object): def",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result, 'Unable to unlock screen'",
"destination, 'of=%s' % remote_copy]) if progress: progress.update(i) self.manager.removeFile(destination) def restart_b2g(self):",
"result, 'Unable to lock screen' def unlock(self): self.marionette.switch_to_frame() result =",
"@property def has_wifi(self): if not hasattr(self, '_has_wifi'): self._has_wifi = self.marionette.execute_script('return",
"**kwargs): self.restart = kwargs.pop('restart', False) kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self,",
"we have one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self): if not",
"to WiFi network' def forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self,",
"self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\") def bluetooth_pair_device(self, device_name): return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")' % device_name)",
"to work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self): self.marionette.execute_script(\"var telephony = window.navigator.mozTelephony; \"",
"elif self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g']) else: raise Exception('Unable to stop B2G')",
"the system app object when we have one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap()",
"marionette, testvars=None): self.marionette = marionette self.testvars = testvars or {}",
"= LockScreen(self.marionette) self.apps = GaiaApps(self.marionette) self.data_layer = GaiaData(self.marionette, self.testvars) from",
"frame self.frame_id = frame self.src = src self.name = name",
"next_line in checkpoint_file: if next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5]) # Close the checkpoint",
"self.apps.is_app_installed(mk['name']): # install the marketplace dev app self.marionette.execute_script('navigator.mozApps.install(\"%s\")' % mk['manifest'])",
"disable Airplane mode self.data_layer.set_setting('ril.radio.disabled', False) # Re-set edge gestures pref",
"is_wifi_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\") def enable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return",
"if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') != -1) { window.removeEventListener('mozbrowserloadend',",
"# Process checkpoint data into .json self.marionette.log(\"processing checkpoint data from",
"special_powers=True) @property def sim_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True) def",
"it doesn't exist already if self.iteration in (0, self.checkpoint_interval): self.checkpoint_path",
"def is_online(self): # Returns true if the device has a",
"False) # restore settings from testvars [self.data_layer.set_setting(name, value) for name,",
"is only available for devices.') dm_type = os.environ.get('DM_TRANS', 'adb') if",
"special_powers=True) def delete_all_call_log_entries(self): \"\"\"The call log needs to be open",
"js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_data_layer.js\")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def set_time(self,",
"'a') as log_file: log_file.write('%s Checkpoint after iteration %d of %d:\\n'",
"%s still present after timeout' % locator) def wait_for_element_displayed(self, by,",
"value is not False.\"\"\" end_time = time.time() + timeout while",
"def check(now): return \"about:blank\" not in now else: def check(now):",
"mk = {\"name\": \"Marketplace Dev\", \"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp \", } if",
"the cards view _cards_view_locator = ('id', 'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator)",
"is different from a Gaia setting.\"\"\" return self._get_pref('Bool', name) def",
"self.marionette.log(\"checkpoint\") self.cur_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # If first checkpoint, create",
"time.time() while time.time() < timeout: time.sleep(0.5) try: if not self.marionette.find_element(by,",
"time.time() if not url: def check(now): return \"about:blank\" not in",
"!= -1) { window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished(); } });\"\"\", script_timeout=60000) #",
"= window.navigator.mozMobileConnection || ' + 'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0];",
"self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self): self.marionette.execute_script(\"var telephony = window.navigator.mozTelephony; \" + \"if(telephony.active)",
"with name '%s'\" % app.name def kill_all(self): self.marionette.switch_to_frame() js =",
"some idle time\" % idle_time) time.sleep(idle_time) # Dump out some",
"+ 'return mobileConnection.data.connected;') def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True) def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled',",
"this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import",
"files if filename.endswith(extension)] return files def send_sms(self, number, message): import",
"name to %s\" % device_name def bluetooth_set_device_discoverable_mode(self, discoverable): if (discoverable):",
"\"checkpoints\" if not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755) self.log_name = \"%s/checkpoint_%s_%s.log\" %",
"to keep all tests passing while introducing multi-sim APIs return",
"self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend', function loaded(aEvent) { if (aEvent.target.src.indexOf('ftu') != -1 ||",
"*args, **kwargs) def drive(self, test, app): self.test_method = test self.app_under_test",
"message=\"Condition timed out\"): \"\"\"Calls the method provided with the driver",
"import TimeoutException from marionette.errors import StaleElementException from marionette.errors import InvalidResponseException",
"network, 'No WiFi network provided' self.enable_wifi() self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return",
"(self.cur_time, self.test_method.__name__)) output_str = self.device.manager.shellCheckOutput([\"b2g-ps\"]) with open(self.log_name, 'a') as log_file:",
"return self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\" % (datatype, name), special_powers=True) def _set_pref(self, datatype,",
"self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\") @property def is_antenna_available(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property def",
"open and focused in order for this to work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();')",
"}; console.log(\"Changing orientation to '\" + arguments[1] + \"'.\"); window.screen.mozLockOrientation(arguments[1]);",
"self.marionette.execute_script('return window.navigator.onLine;') @property def has_mobile_connection(self): # XXX: check bug-926169 #",
"%d:\\n' % (self.cur_time, self.iteration, self.iterations)) log_file.write('%s\\n' % output_str) def close_app(self):",
"e = None while time.time() < timeout: time.sleep(0.5) try: if",
"before timeout' % locator) def wait_for_element_not_present(self, by, locator, timeout=_default_timeout): timeout",
"if self.iteration in (0, self.checkpoint_interval): self.checkpoint_path = \"checkpoints\" if not",
"def start_b2g(self): if self.marionette.instance: # launch the gecko instance attached",
"+ \"if(telephony.active) telephony.active.hangUp();\") @property def music_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();')",
"resource(self, filename): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename)) def change_orientation(self, orientation): \"\"\"",
"% (name, value) def _get_pref(self, datatype, name): return self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\"",
"});\"\"\", script_timeout=60000) # TODO: Remove this sleep when Bug 924912",
"% self.iterations) summary_file.write('checkpoint_interval: %d\\n' % self.checkpoint_interval) summary_file.write('b2g_rss: ') summary_file.write(', '.join(b2g_rss_list))",
"def tearDown(self): self.lockscreen = None self.apps = None self.data_layer =",
"self.testvars.get('wifi') assert network, 'No WiFi network provided' self.enable_wifi() self.marionette.switch_to_frame() result",
"self.iterations GaiaTestCase.__init__(self, *args, **kwargs) def drive(self, test, app): self.test_method =",
"screen_width(self): return self.marionette.execute_script('return window.screen.width') @property def screen_orientation(self): return self.marionette.execute_script('return window.screen.mozOrientation')",
"ElementNotVisibleException): return False def tearDown(self): self.lockscreen = None self.apps =",
"timeout' % locator) else: raise TimeoutException('Element %s present but not",
"self.data_layer.forget_all_networks() self.data_layer.disable_wifi() # remove data self.data_layer.remove_all_contacts(self._script_timeout) # reset to home",
"date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" % date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def all_contacts(self): self.marionette.switch_to_frame()",
"%s\\n' % self.test_method.__name__) summary_file.write('completed: %s\\n' % self.cur_time) summary_file.write('app_under_test: %s\\n' %",
"timeout' % locator) def wait_for_element_displayed(self, by, locator, timeout=_default_timeout): timeout =",
"self.test_method() # Checkpoint time? if ((count % self.checkpoint_interval) == 0)",
"@property def is_wifi_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\") def enable_wifi(self): self.marionette.switch_to_frame() result",
"the summarized checkpoint data summary_file.write('test_name: %s\\n' % self.test_method.__name__) summary_file.write('completed: %s\\n'",
"test case iterations for count in range(1, self.iterations + 1):",
"False self.data_layer.set_setting('edgesgesture.enabled', False) # disable carrier data connection if self.device.has_mobile_connection:",
"kill app with name '%s'\" % app.name def kill_all(self): self.marionette.switch_to_frame()",
"kill_all(self): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\")",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" % name, script_timeout=launch_timeout) assert result, \"Failed",
"\"gaia_lock_screen.js\")) self.marionette.import_script(js) @property def is_locked(self): self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self):",
"the state of only the currently active call or None",
"sys.stdout.flush() # Sleep to give device idle time (for gc)",
"return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property def fm_radio_frequency(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property",
"else { var expected = arguments[1]; window.screen.onmozorientationchange = function(e) {",
"host: raise Exception('Must specify host with SUT!') self._manager = mozdevice.DeviceManagerSUT(host=host)",
"!== undefined') @property def has_wifi(self): if not hasattr(self, '_has_wifi'): self._has_wifi",
"assert self.device.is_online def connect_to_local_area_network(self): if not self.device.is_online: if self.testvars.get('wifi') and",
"the currently active call or None if no active call",
"Dump out some memory status info self.marionette.log(\"checkpoint\") self.cur_time = time.strftime(\"%Y%m%d%H%M%S\",",
"'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) result = self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" % app.origin) assert result,",
"if hasattr(self, '_manager') and self._manager: return self._manager if not self.is_android_build:",
"_get_pref(self, datatype, name): return self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\" % (datatype, name), special_powers=True)",
"aEvent.target.src.indexOf('homescreen') != -1) { window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished(); } });\"\"\", script_timeout=60000)",
"forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self, network=None): network = network",
"Checkpoint after iteration %d of %d:\\n' % (self.cur_time, self.iteration, self.iterations))",
"(datatype, name), special_powers=True) def _set_pref(self, datatype, name, value): value =",
"not displayed before timeout' % locator) def wait_for_element_not_displayed(self, by, locator,",
"def set_bool_pref(self, name, value): \"\"\"Sets the value of a Gecko",
"TimeoutException( 'Element %s not present before timeout' % locator) def",
"def close_app(self): # Close the current app (self.app) by using",
"'Unable to disable WiFi' def connect_to_wifi(self, network=None): network = network",
"after timeout' % locator) def wait_for_condition(self, method, timeout=_default_timeout, message=\"Condition timed",
"json number = json.dumps(number) message = json.dumps(message) result = self.marionette.execute_async_script('return",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\", special_powers=True) assert result, 'Unable to",
"elif self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g']) else: raise Exception('Unable to start B2G')",
"state of only the currently active call or None if",
"'-1') if self.data_layer.get_setting('ril.radio.disabled'): # enable the device radio, disable Airplane",
"else: raise Exception('Unable to start B2G') self.marionette.wait_for_port() self.marionette.start_session() if self.is_android_build:",
"def wait_for_element_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time()",
"{ return window.screen.mozOrientation === expected; } ); }; console.log(\"Changing orientation",
"setting.\"\"\" return self._set_pref('Char', name, value) def set_volume(self, value): channels =",
"self.device.stop_b2g() if self.device.is_android_build: # revert device to a clean state",
"'Unable to connect to cell data' def disable_cell_data(self): self.marionette.switch_to_frame() result",
"self.marionette.execute_async_script('return GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) def switch_to_displayed_app(self): self.marionette.switch_to_default_content()",
"Checkpoint time? if ((count % self.checkpoint_interval) == 0) or count",
"= self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\", special_powers=True) assert result, 'Unable to connect to",
"= GaiaData(self.marionette, self.testvars) from gaiatest.apps.keyboard.app import Keyboard self.keyboard = Keyboard(self.marionette)",
"# Close the checkpoint file checkpoint_file.close() # Calculate the average",
"know what's happening if watching console print \"Checkpoint...\" sys.stdout.flush() #",
"the setUp if not self.restart: # disable passcode before restore",
"pref, which is different from a Gaia setting.\"\"\" return self._set_pref('Char',",
"provided' self.enable_wifi() self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\" % json.dumps(network)) assert",
"= os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) def get_permission(self, app_name, permission_name):",
"result, 'Unable to insert contact %s' % contact def remove_all_contacts(self,",
"# disable carrier data connection if self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if",
"self.device.is_online else: raise Exception('Unable to connect to local area network')",
"License, v. 2.0. If a copy of the MPL was",
"marionette.errors import TimeoutException from marionette.errors import StaleElementException from marionette.errors import",
"marionetteScriptFinished(); } });\"\"\", script_timeout=60000) # TODO: Remove this sleep when",
"%s\\n' % (self.cur_time, self.test_method.__name__)) output_str = self.device.manager.shellCheckOutput([\"b2g-ps\"]) with open(self.log_name, 'a')",
"self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\", %s)' % (name, value), special_powers=True) assert result, \"Unable",
"= count self.marionette.log(\"%s iteration %d of %d\" % (self.test_method.__name__, count,",
"device bluetooth discoverable mode' @property def bluetooth_is_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\")",
"TimeoutException('Element %s present but not displayed before timeout' % locator)",
"src=None): self.frame = frame self.frame_id = frame self.src = src",
"number = json.dumps(number) message = json.dumps(message) result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s,",
"= \"checkpoints\" if not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755) self.log_name = \"%s/checkpoint_%s_%s.log\"",
"function() { return window.screen.mozOrientation === expected; } ); }; console.log(\"Changing",
"return False def is_element_displayed(self, by, locator): try: return self.marionette.find_element(by, locator).is_displayed()",
"You can obtain one at http://mozilla.org/MPL/2.0/. import json import os",
"pass self.device = GaiaDevice(self.marionette, self.testvars) if self.restart and (self.device.is_android_build or",
"not hasattr(self, '_has_wifi'): self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined') return",
"checkpoint_file.close() # Calculate the average b2g_rss total = 0 for",
"self.marionette.session = None self.marionette.window = None class GaiaTestCase(MarionetteTestCase): _script_timeout =",
"0 for b2g_mem_value in b2g_rss_list: total += int(b2g_mem_value) avg_rss =",
"# close the gecko instance attached to marionette self.marionette.instance.close() elif",
"self.testvars.get('wifi') and self.device.has_wifi: self.data_layer.connect_to_wifi() assert self.device.is_online else: raise Exception('Unable to",
"# XXX: check bug-926169 # this is used to keep",
"waitFor( function() { window.screen.onmozorientationchange = null; marionetteScriptFinished(); }, function() {",
"order for this to work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self): self.marionette.execute_script(\"var telephony",
"console print \"Checkpoint...\" sys.stdout.flush() # Sleep to give device idle",
"pass else: raise TimeoutException( 'Element %s not present before timeout'",
"app_name): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" % app_name) def uninstall(self, name): self.marionette.switch_to_frame()",
"def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True) def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False) @property def",
"now while (time.time() - start < timeout): if check(self.marionette.get_url()): return",
"GaiaDataLayer.connectToWiFi(%s)\" % json.dumps(network)) assert result, 'Unable to connect to WiFi",
"is_android_build(self): if self.testvars.get('is_android_build') is None: self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform']",
"None while time.time() < timeout: time.sleep(0.5) try: if self.marionette.find_element(by, locator).is_displayed():",
"first checkpoint, create the file if it doesn't exist already",
"1: for i in range(1, count + 1): remote_copy =",
"marionette): self.marionette = marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\"))",
"{} js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_data_layer.js\")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def",
"remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts))) result = self.marionette.execute_async_script('return",
"timeout: time.sleep(0.5) try: self.marionette.find_element(by, locator) except NoSuchElementException: break else: raise",
"kwargs.pop('restart', False) kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self, *args, **kwargs) def",
"self.apps.kill_all() # disable sound completely self.data_layer.set_volume(0) def install_marketplace(self): _yes_button_locator =",
"self.checkpoint_interval) summary_file.write('b2g_rss: ') summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\\navg_rss: %d\\n\\n' % avg_rss) #",
"}, function() { return window.screen.mozOrientation === expected; } ); };",
"% locator) else: raise TimeoutException('Element %s present but not displayed",
"using the home button self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # Bring up",
"break else: raise TimeoutException( 'Element %s still visible after timeout'",
"used to keep all tests passing while introducing multi-sim APIs",
"NoSuchElementException: break else: raise TimeoutException( 'Element %s still present after",
"self.enable_wifi() self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\" % json.dumps(network)) assert result,",
"result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True) assert result, 'Unable to remove",
"self._manager if not self.is_android_build: raise Exception('Device manager is only available",
"value): import json value = json.dumps(value) result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\",",
"value of a Gecko boolean pref, which is different from",
"'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection.data.connected;') def",
"insert contact %s' % contact def remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout,",
"b2g rss reading for each checkpoint b2g_rss_list = [] for",
"a Gaia setting.\"\"\" return self._get_pref('Char', name) def set_char_pref(self, name, value):",
"close icon for the current app locator_part_two = '#cards-view li.card[data-origin*=\"%s\"]",
"kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self, *args, **kwargs) def setUp(self): try: MarionetteTestCase.setUp(self) except",
"manager type: %s' % dm_type) return self._manager @property def is_android_build(self):",
"summary file suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path suite_summary_file = open(suite_summary_file_name,",
"has_wifi(self): if not hasattr(self, '_has_wifi'): self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !==",
"name): return self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\" % (datatype, name), special_powers=True) def _set_pref(self,",
"process_checkpoint_data(self): # Process checkpoint data into .json self.marionette.log(\"processing checkpoint data",
"filename)) def change_orientation(self, orientation): \"\"\" There are 4 orientation states",
"ElementNotVisibleException from marionette.errors import TimeoutException from marionette.errors import StaleElementException from",
"return self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\", special_powers=True) def delete_all_call_log_entries(self): \"\"\"The call log needs",
"def wait_for_condition(self, method, timeout=_default_timeout, message=\"Condition timed out\"): \"\"\"Calls the method",
"% json.dumps(network)) assert result, 'Unable to connect to WiFi network'",
"and (self.device.is_android_build or self.marionette.instance): self.device.stop_b2g() if self.device.is_android_build: # revert device",
"special_powers=True) assert result, 'Unable to connect to cell data' def",
"while time.time() < timeout: time.sleep(0.5) try: if not self.marionette.find_element(by, locator).is_displayed():",
"< timeout): if check(self.marionette.get_url()): return time.sleep(2) raise TimeoutException('Could not switch",
"special_powers=True) assert result, 'Unable to disable cell data' @property def",
"# Write the summarized checkpoint data summary_file.write('test_name: %s\\n' % self.test_method.__name__)",
"locator): try: return self.marionette.find_element(by, locator).is_displayed() except (NoSuchElementException, ElementNotVisibleException): return False",
"get_permission(self, app_name, permission_name): return self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s', '%s')\" % (app_name, permission_name))",
"to connect to network') assert self.device.is_online def connect_to_local_area_network(self): if not",
"(for gc) idle_time = 30 self.marionette.log(\"sleeping %d seconds to give",
"self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\", special_powers=True) def delete_all_call_log_entries(self): \"\"\"The call log",
"fully qualified path self.device.manager.removeFile(filename) # Switch off keyboard FTU screen",
"in the setUp if not self.restart: # disable passcode before",
"self.connect_to_local_area_network() except: if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else: raise Exception('Unable to connect",
"{ window.screen.onmozorientationchange = null; marionetteScriptFinished(); }, function() { return window.screen.mozOrientation",
"= testvars or {} @property def manager(self): if hasattr(self, '_manager')",
"{\"name\": \"Marketplace Dev\", \"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp \", } if not self.apps.is_app_installed(mk['name']):",
"timeout: time.sleep(0.5) try: return self.marionette.find_element(by, locator) except NoSuchElementException: pass else:",
"var expected = arguments[1]; window.screen.onmozorientationchange = function(e) { console.log(\"Received 'onmozorientationchange'",
"keyboard before test self.data_layer.set_setting(\"keyboard.layouts.spanish\", False) # Set do not track",
"= time.time() + timeout while time.time() < end_time: try: value",
"# TODO: Remove this sleep when Bug 924912 is addressed",
"app locator_part_two = '#cards-view li.card[data-origin*=\"%s\"] .close-card' % self.app_under_test.lower() _close_button_locator =",
"'app-install-install-button') mk = {\"name\": \"Marketplace Dev\", \"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp \", }",
"@property def is_cell_data_enabled(self): return self.get_setting('ril.data.enabled') def connect_to_cell_data(self): self.marionette.switch_to_frame() result =",
"= total / len(b2g_rss_list) # Create a summary text file",
"def delete_all_call_log_entries(self): \"\"\"The call log needs to be open and",
"function(e) { console.log(\"Received 'onmozorientationchange' event.\"); waitFor( function() { window.screen.onmozorientationchange =",
"destination])) def resource(self, filename): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename)) def change_orientation(self,",
"summary_file.write('total_iterations: %d\\n' % self.iterations) summary_file.write('checkpoint_interval: %d\\n' % self.checkpoint_interval) summary_file.write('b2g_rss: ')",
"'%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path suite_summary_file = open(suite_summary_file_name, 'a') suite_summary_file.write('%s: %s\\n' %",
"def set_int_pref(self, name, value): \"\"\"Sets the value of a Gecko",
"lock screen' def unlock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result,",
"self.cur_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # If first checkpoint, create the",
"to connect to local area network') def push_resource(self, filename, count=1,",
"self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else: raise Exception('Unable to connect to network') assert",
"% (datatype, name, value), special_powers=True) def get_bool_pref(self, name): \"\"\"Returns the",
"GaiaDevice(self.marionette, self.testvars) if self.restart and (self.device.is_android_build or self.marionette.instance): self.device.stop_b2g() if",
"% app_name) def uninstall(self, name): self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" % name) def",
"def __init__(self, marionette, testvars=None): self.marionette = marionette self.testvars = testvars",
"can be really slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen = LockScreen(self.marionette) self.apps",
"'dd', 'if=%s' % destination, 'of=%s' % remote_copy]) if progress: progress.update(i)",
"self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination, 'of=%s' % remote_copy]) if progress:",
"data' @property def is_cell_data_connected(self): # XXX: check bug-926169 # this",
"set the device bluetooth discoverable mode' @property def bluetooth_is_enabled(self): return",
"are 4 orientation states which the phone can be passed",
"else: raise Exception('Unable to stop B2G') self.marionette.client.close() self.marionette.session = None",
"present before timeout' % locator) def wait_for_element_not_present(self, by, locator, timeout=_default_timeout):",
"} ); }; console.log(\"Changing orientation to '\" + arguments[1] +",
"self.data_layer.connect_to_cell_data() else: raise Exception('Unable to connect to network') assert self.device.is_online",
"current app locator_part_two = '#cards-view li.card[data-origin*=\"%s\"] .close-card' % self.app_under_test.lower() _close_button_locator",
"return self.marionette.execute_script('return window.screen.width') @property def screen_orientation(self): return self.marionette.execute_script('return window.screen.mozOrientation') def",
"send_sms(self, number, message): import json number = json.dumps(number) message =",
"keep all tests passing while introducing multi-sim APIs return self.marionette.execute_script('var",
"other.__dict__ class GaiaApps(object): def __init__(self, marionette): self.marionette = marionette js",
"message) class GaiaDevice(object): def __init__(self, marionette, testvars=None): self.marionette = marionette",
"Calculate the average b2g_rss total = 0 for b2g_mem_value in",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result, 'Unable to lock screen'",
"into .json output self.process_checkpoint_data() def checkpoint(self): # Console output so",
"= GaiaDevice(self.marionette, self.testvars) if self.restart and (self.device.is_android_build or self.marionette.instance): self.device.stop_b2g()",
"= json.dumps(number) message = json.dumps(message) result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)'",
"still present after timeout' % locator) def wait_for_element_displayed(self, by, locator,",
"**kwargs): self.iterations = kwargs.pop('iterations') or 1 self.checkpoint_interval = kwargs.pop('checkpoint_interval') or",
"def sdcard_files(self, extension=''): files = self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();') if len(extension):",
"timeout in seconds for the wait_for methods _default_timeout = 30",
"def install_marketplace(self): _yes_button_locator = (By.ID, 'app-install-install-button') mk = {\"name\": \"Marketplace",
"os.path.pardir, 'atoms', \"gaia_data_layer.js\")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\"",
"# filename is a fully qualified path self.device.manager.removeFile(filename) # Switch",
"name '%s'\" % name app = GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin'))",
"the default self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if self.data_layer.get_setting('ril.radio.disabled'): # enable the device",
"frame\") if switch_to_frame: self.switch_to_frame(app.frame_id, url) return app @property def displayed_app(self):",
"to unlock screen' class GaiaApp(object): def __init__(self, origin=None, name=None, frame=None,",
"> 0: destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source, destination) if",
"'%s')\" % (app_name, permission_name)) def set_permission(self, app_name, permission_name, value): return",
"except (NoSuchElementException, StaleElementException): pass time.sleep(0.5) else: raise TimeoutException(message) def is_element_present(self,",
"self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result, 'Able to set the device bluetooth",
"def bluetooth_disable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\") def bluetooth_pair_device(self, device_name): return",
"os.makedirs(self.checkpoint_path, 0755) self.log_name = \"%s/checkpoint_%s_%s.log\" % (self.checkpoint_path, self.test_method.__name__, self.cur_time) with",
"False.\"\"\" end_time = time.time() + timeout while time.time() < end_time:",
"next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5]) # Close the checkpoint file checkpoint_file.close() # Calculate",
"False def is_element_displayed(self, by, locator): try: return self.marionette.find_element(by, locator).is_displayed() except",
"if the device has a network connection established (cell data,",
"# Re-set edge gestures pref to False self.data_layer.set_setting('edgesgesture.enabled', False) #",
"self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" % name) def kill(self, app): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__,",
"name): self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" % name) def kill(self, app): self.marionette.switch_to_frame() js",
"return [filename for filename in files if filename.endswith(extension)] return files",
"if not self.restart: # disable passcode before restore settings from",
"to give device idle time (for gc) idle_time = 30",
"b2g_rss_list = [] for next_line in checkpoint_file: if next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5])",
"# install the marketplace dev app self.marionette.execute_script('navigator.mozApps.install(\"%s\")' % mk['manifest']) #",
"local area network') def push_resource(self, filename, count=1, destination=''): self.device.push_file(self.resource(filename), count,",
"%s\\n' % self.app_under_test.lower()) summary_file.write('total_iterations: %d\\n' % self.iterations) summary_file.write('checkpoint_interval: %d\\n' %",
"file summary_name = self.log_name.replace('.log', '_summary.log') summary_file = open(summary_name, 'w') #",
"time.sleep(0.5) try: if self.marionette.find_element(by, locator).is_displayed(): break except (NoSuchElementException, StaleElementException) as",
"progress: progress.update(i) self.manager.removeFile(destination) def restart_b2g(self): self.stop_b2g() time.sleep(2) self.start_b2g() def start_b2g(self):",
"data connection if self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks()",
"_close_button_locator = ('css selector', locator_part_two) close_card_app_button = self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def",
"from gaiatest.apps.keyboard.app import Keyboard self.keyboard = Keyboard(self.marionette) self.cleanUp() def cleanUp(self):",
"idle_time) time.sleep(idle_time) # Dump out some memory status info self.marionette.log(\"checkpoint\")",
"self._manager: return self._manager if not self.is_android_build: raise Exception('Device manager is",
"landscape-primary, portrait-secondary and landscape-secondary \"\"\" self.marionette.execute_async_script(\"\"\" if (arguments[0] === arguments[1])",
"for this to work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self): self.marionette.execute_script(\"var telephony =",
"%s present but not displayed before timeout' % locator) def",
"app_frame) class GaiaData(object): def __init__(self, marionette, testvars=None): self.marionette = marionette",
"By from marionette.errors import NoSuchElementException from marionette.errors import ElementNotVisibleException from",
"url: def check(now): return \"about:blank\" not in now else: def",
"def get_setting(self, name): return self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")' % name, special_powers=True) @property",
"assert result, 'Unable to enable WiFi' def disable_wifi(self): self.marionette.switch_to_frame() result",
"contact def remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts))) result",
"if self.marionette.find_element(by, locator).is_displayed(): break except (NoSuchElementException, StaleElementException) as e: pass",
"currently active call or None if no active call return",
"result, 'Unable to connect to WiFi network' def forget_all_networks(self): self.marionette.switch_to_frame()",
"summary_file.write('b2g_rss: ') summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\\navg_rss: %d\\n\\n' % avg_rss) # Close",
"import InvalidResponseException import mozdevice class LockScreen(object): def __init__(self, marionette): self.marionette",
"of a Gecko boolean pref, which is different from a",
"% (name, value), special_powers=True) assert result, \"Unable to change setting",
"self.wait_for_element_displayed(*_cards_view_locator) # Sleep a bit time.sleep(5) # Tap the close",
"raise Exception('Unable to connect to network') assert self.device.is_online def connect_to_local_area_network(self):",
"b2g_rss_list.append(next_line.split()[5]) # Close the checkpoint file checkpoint_file.close() # Calculate the",
"None MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase): def __init__(self, *args, **kwargs): self.iterations =",
"all_settings(self): return self.get_setting('*') def set_setting(self, name, value): import json value",
"= marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_lock_screen.js\")) self.marionette.import_script(js) @property",
"self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property def media_files(self): result = [] result.extend(self.music_files) result.extend(self.picture_files)",
"open(self.log_name, 'a') as log_file: log_file.write('%s Gaia Endurance Test: %s\\n' %",
"time from marionette import MarionetteTestCase from marionette.by import By from",
"self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s', '%s', '%s')\" % (app_name, permission_name, value)) def launch(self,",
"self.data_layer.disable_cell_roaming() if self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi() # remove data self.data_layer.remove_all_contacts(self._script_timeout)",
"the MPL was not distributed with this # file, You",
"value): \"\"\"Sets the value of a Gecko boolean pref, which",
"telephony.active.hangUp();\") @property def music_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();') @property def",
"give device idle time (for gc) idle_time = 30 self.marionette.log(\"sleeping",
"def check(now): return url in now while (time.time() - start",
"check(self.marionette.get_url()): return time.sleep(2) raise TimeoutException('Could not switch to app frame",
"return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" % app_name) def uninstall(self, name): self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" %",
"work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self): self.marionette.execute_script(\"var telephony = window.navigator.mozTelephony; \" +",
"as an argument until the \\ return value is not",
"app): self.test_method = test self.app_under_test = app # Now drive",
"GaiaDataLayer.getAllVideos();') def sdcard_files(self, extension=''): files = self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();') if",
"as log_file: log_file.write('%s Gaia Endurance Test: %s\\n' % (self.cur_time, self.test_method.__name__))",
"% locator) def wait_for_element_not_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout)",
"% app.name def kill_all(self): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms',",
"GaiaDataLayer.removeAllContacts();', special_powers=True) assert result, 'Unable to remove all contacts' self.marionette.set_script_timeout(default_script_timeout)",
"def is_wifi_connected(self, network=None): network = network or self.testvars.get('wifi') assert network,",
"with text %s' % (number, message) class GaiaDevice(object): def __init__(self,",
"Switch off keyboard FTU screen self.data_layer.set_setting(\"keyboard.ftu.enabled\", False) # restore settings",
"+ timeout while time.time() < end_time: try: value = method(self.marionette)",
"was not distributed with this # file, You can obtain",
"button self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # Bring up the cards view",
"contact): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True) assert",
"the gecko instance attached to marionette self.marionette.instance.close() elif self.is_android_build: self.manager.shellCheckOutput(['stop',",
"from a Gaia setting.\"\"\" return self._set_pref('Char', name, value) def set_volume(self,",
"result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True) assert",
"needs to be open and focused in order for this",
"avg_rss) # Close the summary file summary_file.close() # Write to",
"boolean pref, which is different from a Gaia setting.\"\"\" return",
"'resources', filename)) def change_orientation(self, orientation): \"\"\" There are 4 orientation",
"testvars [self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings', {}).items()] #",
"self.marionette.execute_script(\"return GaiaApps.getRunningApps()\") def switch_to_frame(self, app_frame, url=None, timeout=30): self.marionette.switch_to_frame(app_frame) start =",
"# remove media if self.device.is_android_build: for filename in self.data_layer.media_files: #",
"timeout=_default_timeout, message=\"Condition timed out\"): \"\"\"Calls the method provided with the",
"locator) else: raise TimeoutException('Element %s present but not displayed before",
"set_volume(self, value): channels = ['alarm', 'content', 'notification'] for channel in",
"self.switch_to_frame(app.frame_id, url) return app @property def displayed_app(self): self.marionette.switch_to_frame() result =",
"to stop B2G') self.marionette.client.close() self.marionette.session = None self.marionette.window = None",
"launch; there is no app frame\") if switch_to_frame: self.switch_to_frame(app.frame_id, url)",
"Re-set edge gestures pref to False self.data_layer.set_setting('edgesgesture.enabled', False) # disable",
"name '%s' to '%s'\" % (name, value) def _get_pref(self, datatype,",
"GaiaDataLayer.getSIMContacts();', special_powers=True) def insert_contact(self, contact): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);'",
"} });\"\"\", script_timeout=60000) # TODO: Remove this sleep when Bug",
"%d of %d:\\n' % (self.cur_time, self.iteration, self.iterations)) log_file.write('%s\\n' % output_str)",
"self.marionette.window = None class GaiaTestCase(MarionetteTestCase): _script_timeout = 60000 _search_timeout =",
"None) MarionetteTestCase.__init__(self, *args, **kwargs) def setUp(self): try: MarionetteTestCase.setUp(self) except InvalidResponseException:",
"all_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True) @property def sim_contacts(self): self.marionette.switch_to_frame()",
"Close the checkpoint file checkpoint_file.close() # Calculate the average b2g_rss",
"count + 1): remote_copy = '_%s.'.join(iter(destination.split('.'))) % i self.manager._checkCmd(['shell', 'dd',",
"def wait_for_element_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time()",
"@property def music_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();') @property def picture_files(self):",
"self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\" % json.dumps(network)) @property def known_networks(self): return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()')",
"self.marionette.find_element(by, locator).is_displayed(): break except StaleElementException: pass except NoSuchElementException: break else:",
"displayed before timeout' % locator) def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout):",
"value): value = json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\" % (datatype, name, value),",
"_search_timeout = 10000 # deafult timeout in seconds for the",
"%s\\n' % self.cur_time) summary_file.write('app_under_test: %s\\n' % self.app_under_test.lower()) summary_file.write('total_iterations: %d\\n' %",
"if self.testvars.get('wifi') and self.device.has_wifi: self.data_layer.connect_to_wifi() assert self.device.is_online else: raise Exception('Unable",
"self.restart: # disable passcode before restore settings from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code',",
"= 30 def __init__(self, *args, **kwargs): self.restart = kwargs.pop('restart', False)",
"can obtain one at http://mozilla.org/MPL/2.0/. import json import os import",
"'Unable to remove all contacts' self.marionette.set_script_timeout(default_script_timeout) def get_setting(self, name): return",
"60000 _search_timeout = 10000 # deafult timeout in seconds for",
"def displayed_app(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'), src=result.get('src'),",
"break except (NoSuchElementException, StaleElementException) as e: pass else: # This",
"def insert_contact(self, contact): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact),",
"of %d\" % (self.test_method.__name__, count, self.iterations)) # Print to console",
"self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() # the emulator can be really slow! self.marionette.set_script_timeout(self._script_timeout)",
"connect to network') assert self.device.is_online def connect_to_local_area_network(self): if not self.device.is_online:",
"self.testvars['is_android_build'] @property def is_online(self): # Returns true if the device",
"disable WiFi' def connect_to_wifi(self, network=None): network = network or self.testvars.get('wifi')",
"= self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\" % json.dumps(network)) assert result, 'Unable to connect",
"for i in range(1, count + 1): remote_copy = '_%s.'.join(iter(destination.split('.')))",
"locator, timeout=_default_timeout): timeout = float(timeout) + time.time() while time.time() <",
"unlock self.lockscreen.unlock() # If we are restarting all of these",
"name, value) def get_int_pref(self, name): \"\"\"Returns the value of a",
"except InvalidResponseException: if self.restart: pass self.device = GaiaDevice(self.marionette, self.testvars) if",
"Exception('Must specify host with SUT!') self._manager = mozdevice.DeviceManagerSUT(host=host) else: raise",
"< timeout: time.sleep(0.5) try: return self.marionette.find_element(by, locator) except NoSuchElementException: pass",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin'))",
"gc) idle_time = 30 self.marionette.log(\"sleeping %d seconds to give the",
"try: MarionetteTestCase.setUp(self) except InvalidResponseException: if self.restart: pass self.device = GaiaDevice(self.marionette,",
"window.screen.onmozorientationchange = function(e) { console.log(\"Received 'onmozorientationchange' event.\"); waitFor( function() {",
"!== undefined') return self._has_wifi def push_file(self, source, count=1, destination='', progress=None):",
"GaiaApp(object): def __init__(self, origin=None, name=None, frame=None, src=None): self.frame = frame",
"app = GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) if app.frame_id is None:",
"= kwargs.pop('checkpoint_interval') or self.iterations GaiaTestCase.__init__(self, *args, **kwargs) def drive(self, test,",
"WiFi' def connect_to_wifi(self, network=None): network = network or self.testvars.get('wifi') assert",
"def set_permission(self, app_name, permission_name, value): return self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s', '%s', '%s')\"",
"a Gecko string pref, which is different from a Gaia",
"network or self.testvars.get('wifi') assert network, 'No WiFi network provided' self.enable_wifi()",
"value of a Gecko integer pref, which is different from",
"with name '%s'\" % name app = GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'),",
"from marionette.by import By from marionette.errors import NoSuchElementException from marionette.errors",
"def bluetooth_enable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\") def bluetooth_disable(self): self.marionette.switch_to_frame() return",
"to set the device bluetooth discoverable mode' @property def bluetooth_is_enabled(self):",
"= (By.ID, 'app-install-install-button') mk = {\"name\": \"Marketplace Dev\", \"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp",
"result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert",
"settings from testvars [self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings',",
"def all_settings(self): return self.get_setting('*') def set_setting(self, name, value): import json",
"class GaiaTestCase(MarionetteTestCase): _script_timeout = 60000 _search_timeout = 10000 # deafult",
"10000 # deafult timeout in seconds for the wait_for methods",
"permission_name): return self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s', '%s')\" % (app_name, permission_name)) def set_permission(self,",
"which is different from a Gaia setting.\"\"\" return self._set_pref('Bool', name,",
"30 def __init__(self, *args, **kwargs): self.restart = kwargs.pop('restart', False) kwargs.pop('iterations',",
"data into .json self.marionette.log(\"processing checkpoint data from %s\" % self.log_name)",
"os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) result = self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" % app.origin)",
"def push_resource(self, filename, count=1, destination=''): self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination])) def",
"@property def all_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True) @property def",
"import MarionetteTestCase from marionette.by import By from marionette.errors import NoSuchElementException",
"name, value) def set_volume(self, value): channels = ['alarm', 'content', 'notification']",
"'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) def get_permission(self, app_name, permission_name): return self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s',",
"@property def is_android_build(self): if self.testvars.get('is_android_build') is None: self.testvars['is_android_build'] = 'Android'",
"(discoverable): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);')",
"return self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\" % json.dumps(network)) @property def known_networks(self): return self.marionette.execute_async_script('return",
"self._has_wifi def push_file(self, source, count=1, destination='', progress=None): if not destination.count('.')",
"app.name def kill_all(self): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\"))",
"time.localtime()) # If first checkpoint, create the file if it",
"= kwargs.pop('iterations') or 1 self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations GaiaTestCase.__init__(self,",
"remove media if self.device.is_android_build: for filename in self.data_layer.media_files: # filename",
"self.data_layer.set_setting('edgesgesture.enabled', False) # disable carrier data connection if self.device.has_mobile_connection: self.data_layer.disable_cell_data()",
"to enable WiFi' def disable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\",",
"json.dumps(message) result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True)",
"StaleElementException): pass time.sleep(0.5) else: raise TimeoutException(message) def is_element_present(self, by, locator):",
"an effortless way to give extra debugging information if isinstance(e,",
"default orientation), landscape-primary, portrait-secondary and landscape-secondary \"\"\" self.marionette.execute_async_script(\"\"\" if (arguments[0]",
"GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True) assert result, 'Unable to insert contact",
"file suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path suite_summary_file = open(suite_summary_file_name, 'a')",
"@property def video_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();') def sdcard_files(self, extension=''):",
"window.navigator.mozWifiManager !== undefined') return self._has_wifi def push_file(self, source, count=1, destination='',",
"self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g']) else: raise Exception('Unable to start B2G') self.marionette.wait_for_port()",
"self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\") def bluetooth_pair_device(self, device_name): return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")'",
"marionette.errors import InvalidResponseException import mozdevice class LockScreen(object): def __init__(self, marionette):",
"other): return self.__dict__ == other.__dict__ class GaiaApps(object): def __init__(self, marionette):",
"return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True) def insert_contact(self, contact): self.marionette.switch_to_frame() result =",
"total = 0 for b2g_mem_value in b2g_rss_list: total += int(b2g_mem_value)",
"= marionette self.testvars = testvars or {} @property def manager(self):",
"self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self): if not self.device.is_online: try: self.connect_to_local_area_network() except: if",
"in checkpoint_file: if next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5]) # Close the checkpoint file",
"or self.marionette.instance): self.device.stop_b2g() if self.device.is_android_build: # revert device to a",
"# TODO add this to the system app object when",
"bluetooth discoverable mode' @property def bluetooth_is_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\") @property",
"= mozdevice.DeviceManagerSUT(host=host) else: raise Exception('Unknown device manager type: %s' %",
"NoSuchElementException from marionette.errors import ElementNotVisibleException from marionette.errors import TimeoutException from",
"wait_for methods _default_timeout = 30 def __init__(self, *args, **kwargs): self.restart",
"special_powers=True) def insert_contact(self, contact): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' %",
"network') assert self.device.is_online def connect_to_local_area_network(self): if not self.device.is_online: if self.testvars.get('wifi')",
"assert result, \"Unable to change setting with name '%s' to",
"manager is only available for devices.') dm_type = os.environ.get('DM_TRANS', 'adb')",
"% channel, value) def bluetooth_enable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\") def",
"def push_file(self, source, count=1, destination='', progress=None): if not destination.count('.') >",
"assert result, \"Failed to kill app with name '%s'\" %",
"drive the actual test case iterations for count in range(1,",
"# Sleep a bit time.sleep(5) # Tap the close icon",
"create the file if it doesn't exist already if self.iteration",
"None) kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self, *args, **kwargs) def setUp(self): try: MarionetteTestCase.setUp(self)",
"% (count, self.iterations) sys.stdout.flush() self.test_method() # Checkpoint time? if ((count",
"os import sys import time from marionette import MarionetteTestCase from",
"count=1, destination='', progress=None): if not destination.count('.') > 0: destination =",
"locator).is_displayed(): break except (NoSuchElementException, StaleElementException) as e: pass else: #",
"def _get_pref(self, datatype, name): return self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\" % (datatype, name),",
"**kwargs) def setUp(self): try: MarionetteTestCase.setUp(self) except InvalidResponseException: if self.restart: pass",
"(app_name, permission_name)) def set_permission(self, app_name, permission_name, value): return self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s',",
"value)) def launch(self, name, switch_to_frame=True, url=None, launch_timeout=None): self.marionette.switch_to_frame() result =",
"'Element %s still visible after timeout' % locator) def wait_for_condition(self,",
"\"\"\"The call log needs to be open and focused in",
"= self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result, 'Able to set the device",
"active call or None if no active call return self.marionette.execute_script(\"return",
"*args, **kwargs): self.restart = kwargs.pop('restart', False) kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval', None)",
"cleanUp(self): # remove media if self.device.is_android_build: for filename in self.data_layer.media_files:",
"marketplace dev app self.marionette.execute_script('navigator.mozApps.install(\"%s\")' % mk['manifest']) # TODO add this",
"Sleep a bit time.sleep(5) # Tap the close icon for",
"def fm_radio_frequency(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property def media_files(self): result =",
"SMS to recipient %s with text %s' % (number, message)",
"active call return self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\") @property def is_antenna_available(self): return self.marionette.execute_script('return",
"\"en-US\") # Switch off spanish keyboard before test self.data_layer.set_setting(\"keyboard.layouts.spanish\", False)",
"Exception('Unable to connect to local area network') def push_resource(self, filename,",
".json output self.process_checkpoint_data() def checkpoint(self): # Console output so know",
"- start < timeout): if check(self.marionette.get_url()): return time.sleep(2) raise TimeoutException('Could",
"marionette import MarionetteTestCase from marionette.by import By from marionette.errors import",
"'%s', '%s')\" % (app_name, permission_name, value)) def launch(self, name, switch_to_frame=True,",
"return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' + 'window.navigator.mozMobileConnections &&",
"emulator can be really slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen = LockScreen(self.marionette)",
"self.iterations)) log_file.write('%s\\n' % output_str) def close_app(self): # Close the current",
"different from a Gaia setting.\"\"\" return self._get_pref('Int', name) def set_int_pref(self,",
"@property def manager(self): if hasattr(self, '_manager') and self._manager: return self._manager",
"by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() e =",
"self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self): if not self.device.is_online: try: self.connect_to_local_area_network()",
"self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' + 'window.navigator.mozMobileConnections && '",
"self.iterations = kwargs.pop('iterations') or 1 self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations",
"to the system app object when we have one self.wait_for_element_displayed(*_yes_button_locator)",
"return self._set_pref('Bool', name, value) def get_int_pref(self, name): \"\"\"Returns the value",
"displayed_app(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'),",
"return self.marionette.find_element(by, locator) except NoSuchElementException: pass else: raise TimeoutException( 'Element",
"self.device.start_b2g() # the emulator can be really slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout)",
"return self._get_pref('Int', name) def set_int_pref(self, name, value): \"\"\"Sets the value",
"def stop_b2g(self): if self.marionette.instance: # close the gecko instance attached",
"{ var expected = arguments[1]; window.screen.onmozorientationchange = function(e) { console.log(\"Received",
"'atoms', \"gaia_lock_screen.js\")) self.marionette.import_script(js) @property def is_locked(self): self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def",
"name '%s'\" % app.name def kill_all(self): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__,",
"return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result,",
"self.get_setting('ril.data.enabled') def connect_to_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\", special_powers=True) assert",
"script_timeout=60000) # TODO: Remove this sleep when Bug 924912 is",
"{}).items()] # unlock self.lockscreen.unlock() # If we are restarting all",
"to change setting with name '%s' to '%s'\" % (name,",
"timeout: time.sleep(0.5) try: if self.marionette.find_element(by, locator).is_displayed(): break except (NoSuchElementException, StaleElementException)",
"'adb') if dm_type == 'adb': self._manager = mozdevice.DeviceManagerADB() elif dm_type",
"if app.frame_id is None: raise Exception(\"App failed to launch; there",
"in now while (time.time() - start < timeout): if check(self.marionette.get_url()):",
"= self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True) assert result, 'Unable to remove all",
"self._set_pref('Bool', name, value) def get_int_pref(self, name): \"\"\"Returns the value of",
"marionetteScriptFinished(); } else { var expected = arguments[1]; window.screen.onmozorientationchange =",
"json.dumps(network)) assert result, 'Unable to connect to WiFi network' def",
"return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename)) def change_orientation(self, orientation): \"\"\" There are",
"running if self.iteration == 1: print \"\\n\" print \"Iteration %d",
"This Source Code Form is subject to the terms of",
"from marionette import MarionetteTestCase from marionette.by import By from marionette.errors",
"discoverable mode' @property def bluetooth_is_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\") @property def",
"progress=None): if not destination.count('.') > 0: destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]])",
"until the \\ return value is not False.\"\"\" end_time =",
"% remote_copy]) if progress: progress.update(i) self.manager.removeFile(destination) def restart_b2g(self): self.stop_b2g() time.sleep(2)",
"% avg_rss) # Close the summary file summary_file.close() # Write",
"output_str) def close_app(self): # Close the current app (self.app) by",
"self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform'] return self.testvars['is_android_build'] @property def is_online(self):",
"if not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755) self.log_name = \"%s/checkpoint_%s_%s.log\" % (self.checkpoint_path,",
"network provided' self.marionette.switch_to_frame() return self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\" % json.dumps(network)) @property def",
"data into .json output self.process_checkpoint_data() def checkpoint(self): # Console output",
"back to English self.data_layer.set_setting(\"language.current\", \"en-US\") # Switch off spanish keyboard",
"frame=None, src=None): self.frame = frame self.frame_id = frame self.src =",
"float(timeout) + time.time() e = None while time.time() < timeout:",
"phone can be passed in: portrait-primary(which is the default orientation),",
"GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) if app.frame_id is None: raise Exception(\"App",
"with the driver as an argument until the \\ return",
"};\"\"\", script_args=[self.screen_orientation, orientation]) @property def screen_width(self): return self.marionette.execute_script('return window.screen.width') @property",
"spanish keyboard before test self.data_layer.set_setting(\"keyboard.layouts.spanish\", False) # Set do not",
"def is_antenna_available(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property def is_fm_radio_enabled(self): return self.marionette.execute_script('return",
"= 10000 # deafult timeout in seconds for the wait_for",
"checkpoint_file: if next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5]) # Close the checkpoint file checkpoint_file.close()",
"result def delete_all_sms(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\", special_powers=True) def delete_all_call_log_entries(self):",
"GaiaDataLayer.pairBluetoothDevice(\"%s\")' % device_name) def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self,",
"SUT!') self._manager = mozdevice.DeviceManagerSUT(host=host) else: raise Exception('Unknown device manager type:",
"while (time.time() - start < timeout): if check(self.marionette.get_url()): return time.sleep(2)",
"pref, which is different from a Gaia setting.\"\"\" return self._set_pref('Bool',",
"close the gecko instance attached to marionette self.marionette.instance.close() elif self.is_android_build:",
"driver as an argument until the \\ return value is",
"self.iterations + 1): self.iteration = count self.marionette.log(\"%s iteration %d of",
"to app frame %s in time' % app_frame) class GaiaData(object):",
"= GaiaApps(self.marionette) self.data_layer = GaiaData(self.marionette, self.testvars) from gaiatest.apps.keyboard.app import Keyboard",
"orientation states which the phone can be passed in: portrait-primary(which",
"reset to home screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # kill any open",
"method provided with the driver as an argument until the",
"self.iterations)) # Print to console so can see what iteration",
"% self.checkpoint_path suite_summary_file = open(suite_summary_file_name, 'a') suite_summary_file.write('%s: %s\\n' % (self.test_method.__name__,",
"import ElementNotVisibleException from marionette.errors import TimeoutException from marionette.errors import StaleElementException",
"= network or self.testvars.get('wifi') assert network, 'No WiFi network provided'",
"to the terms of the Mozilla Public # License, v.",
"1000 * len(self.all_contacts))) result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True) assert result,",
"@property def bluetooth_is_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\") @property def is_cell_data_enabled(self): return",
"app frame %s in time' % app_frame) class GaiaData(object): def",
"progress.update(i) self.manager.removeFile(destination) def restart_b2g(self): self.stop_b2g() time.sleep(2) self.start_b2g() def start_b2g(self): if",
"WiFi network provided' self.marionette.switch_to_frame() return self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\" % json.dumps(network)) @property",
"MPL was not distributed with this # file, You can",
"one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self): if not self.device.is_online: try:",
"bluetooth_set_device_name(self, device_name): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name) assert result,",
"def setUp(self): try: MarionetteTestCase.setUp(self) except InvalidResponseException: if self.restart: pass self.device",
"bluetooth_is_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\") @property def is_cell_data_enabled(self): return self.get_setting('ril.data.enabled') def",
"url) return app @property def displayed_app(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return",
"apps self.apps.kill_all() # disable sound completely self.data_layer.set_volume(0) def install_marketplace(self): _yes_button_locator",
"is a fully qualified path self.device.manager.removeFile(filename) # Switch off keyboard",
"try: return self.marionette.find_element(by, locator) except NoSuchElementException: pass else: raise TimeoutException(",
"disable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\", special_powers=True) assert result, 'Unable",
"None self.data_layer = None MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase): def __init__(self, *args,",
"= os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) result = self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" %",
"pref to False self.data_layer.set_setting('edgesgesture.enabled', False) # disable carrier data connection",
"we're on while test is running if self.iteration == 1:",
"setting with name '%s' to '%s'\" % (name, value) def",
"data from %s\" % self.log_name) # Open the checkpoint file",
"os.path.pardir, 'atoms', \"gaia_lock_screen.js\")) self.marionette.import_script(js) @property def is_locked(self): self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked')",
"revert device to a clean state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() #",
"=== arguments[1]) { marionetteScriptFinished(); } else { var expected =",
"= self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();') if len(extension): return [filename for filename",
"value = json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\" % (datatype, name, value), special_powers=True)",
"= 60000 _search_timeout = 10000 # deafult timeout in seconds",
"text file summary_name = self.log_name.replace('.log', '_summary.log') summary_file = open(summary_name, 'w')",
"start B2G') self.marionette.wait_for_port() self.marionette.start_session() if self.is_android_build: self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend', function loaded(aEvent)",
"import time from marionette import MarionetteTestCase from marionette.by import By",
"= 0 for b2g_mem_value in b2g_rss_list: total += int(b2g_mem_value) avg_rss",
"except NoSuchElementException: pass else: raise TimeoutException( 'Element %s not present",
"for each checkpoint b2g_rss_list = [] for next_line in checkpoint_file:",
"%d\\n' % self.checkpoint_interval) summary_file.write('b2g_rss: ') summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\\navg_rss: %d\\n\\n' %",
"self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts))) result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True) assert",
"= self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True) assert result,",
"if not hasattr(self, '_has_wifi'): self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined')",
"self.marionette.execute_script('navigator.mozApps.install(\"%s\")' % mk['manifest']) # TODO add this to the system",
"except NoSuchElementException: break else: raise TimeoutException( 'Element %s still present",
"and focused in order for this to work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def",
"# Write to suite summary file suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' %",
"= self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\", %s)' % (name, value), special_powers=True) assert result,",
"self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self): if not self.device.is_online: try: self.connect_to_local_area_network() except:",
"filename is a fully qualified path self.device.manager.removeFile(filename) # Switch off",
"if self.restart: pass self.device = GaiaDevice(self.marionette, self.testvars) if self.restart and",
"in range(1, count + 1): remote_copy = '_%s.'.join(iter(destination.split('.'))) % i",
"return value is not False.\"\"\" end_time = time.time() + timeout",
"self._set_pref('Char', name, value) def set_volume(self, value): channels = ['alarm', 'content',",
"the method provided with the driver as an argument until",
"= '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path suite_summary_file = open(suite_summary_file_name, 'a') suite_summary_file.write('%s: %s\\n'",
"+ 'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection",
"marionette): self.marionette = marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_lock_screen.js\"))",
"'atoms', \"gaia_data_layer.js\")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" %",
"def is_cell_data_enabled(self): return self.get_setting('ril.data.enabled') def connect_to_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return",
"device has a network connection established (cell data, wifi, etc)",
"# Returns the state of only the currently active call",
"return GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) def switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def",
"device's bluetooth name to %s\" % device_name def bluetooth_set_device_discoverable_mode(self, discoverable):",
"return self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\") @property def is_antenna_available(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property",
"Gecko boolean pref, which is different from a Gaia setting.\"\"\"",
"[self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings', {}).items()] # unlock",
"bluetooth_disable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\") def bluetooth_pair_device(self, device_name): return self.marionette.execute_async_script('return",
"def runningApps(self): return self.marionette.execute_script(\"return GaiaApps.getRunningApps()\") def switch_to_frame(self, app_frame, url=None, timeout=30):",
"name): \"\"\"Returns the value of a Gecko boolean pref, which",
"InvalidResponseException: if self.restart: pass self.device = GaiaDevice(self.marionette, self.testvars) if self.restart",
"GaiaDataLayer.enableWiFi()\", special_powers=True) assert result, 'Unable to enable WiFi' def disable_wifi(self):",
"self.get_setting('*') def set_setting(self, name, value): import json value = json.dumps(value)",
"give the device some idle time\" % idle_time) time.sleep(idle_time) #",
"self.testvars) from gaiatest.apps.keyboard.app import Keyboard self.keyboard = Keyboard(self.marionette) self.cleanUp() def",
"name app = GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) if app.frame_id is",
"Gaia Endurance Test: %s\\n' % (self.cur_time, self.test_method.__name__)) output_str = self.device.manager.shellCheckOutput([\"b2g-ps\"])",
"\"'.\"); window.screen.mozLockOrientation(arguments[1]); };\"\"\", script_args=[self.screen_orientation, orientation]) @property def screen_width(self): return self.marionette.execute_script('return",
"setting.\"\"\" return self._get_pref('Bool', name) def set_bool_pref(self, name, value): \"\"\"Sets the",
"% name, special_powers=True) @property def all_settings(self): return self.get_setting('*') def set_setting(self,",
"open(summary_name, 'w') # Write the summarized checkpoint data summary_file.write('test_name: %s\\n'",
"marionetteScriptFinished(); }, function() { return window.screen.mozOrientation === expected; } );",
"default self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if self.data_layer.get_setting('ril.radio.disabled'): # enable the device radio,",
"push_resource(self, filename, count=1, destination=''): self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination])) def resource(self,",
"self.cur_time) with open(self.log_name, 'a') as log_file: log_file.write('%s Gaia Endurance Test:",
"in range(1, self.iterations + 1): self.iteration = count self.marionette.log(\"%s iteration",
"assert result, 'Unable to disable cell data' @property def is_cell_data_connected(self):",
"'Unable to lock screen' def unlock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.unlock()')",
"event.\"); waitFor( function() { window.screen.onmozorientationchange = null; marionetteScriptFinished(); }, function()",
"%s not present before timeout' % locator) def wait_for_element_not_present(self, by,",
"all contacts' self.marionette.set_script_timeout(default_script_timeout) def get_setting(self, name): return self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")' %",
"connect_to_network(self): if not self.device.is_online: try: self.connect_to_local_area_network() except: if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data()",
"the \\ return value is not False.\"\"\" end_time = time.time()",
"to start B2G') self.marionette.wait_for_port() self.marionette.start_session() if self.is_android_build: self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend', function",
"name=result.get('name'), origin=result.get('origin')) def switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self, app_name): self.marionette.switch_to_frame()",
"attached to marionette self.marionette.instance.start() elif self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g']) else: raise",
"def connect_to_wifi(self, network=None): network = network or self.testvars.get('wifi') assert network,",
"# Change language back to English self.data_layer.set_setting(\"language.current\", \"en-US\") # Switch",
"'of=%s' % remote_copy]) if progress: progress.update(i) self.manager.removeFile(destination) def restart_b2g(self): self.stop_b2g()",
"switch_to_frame(self, app_frame, url=None, timeout=30): self.marionette.switch_to_frame(app_frame) start = time.time() if not",
"setUp(self): try: MarionetteTestCase.setUp(self) except InvalidResponseException: if self.restart: pass self.device =",
"GaiaDataLayer.enableBluetooth()\") def bluetooth_disable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\") def bluetooth_pair_device(self, device_name):",
"self.marionette.set_search_timeout(self._search_timeout) self.lockscreen = LockScreen(self.marionette) self.apps = GaiaApps(self.marionette) self.data_layer = GaiaData(self.marionette,",
"None class GaiaTestCase(MarionetteTestCase): _script_timeout = 60000 _search_timeout = 10000 #",
"media if self.device.is_android_build: for filename in self.data_layer.media_files: # filename is",
"checkpoint data into .json self.marionette.log(\"processing checkpoint data from %s\" %",
"raise TimeoutException( 'Element %s not present before timeout' % locator)",
"kill any open apps self.apps.kill_all() # disable sound completely self.data_layer.set_volume(0)",
"value in self.testvars.get('settings', {}).items()] # unlock self.lockscreen.unlock() # If we",
"locator) def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout) +",
"try: self.connect_to_local_area_network() except: if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else: raise Exception('Unable to",
"Set do not track pref back to the default self.data_layer.set_setting('privacy.donottrackheader.value',",
"see what iteration we're on while test is running if",
"def set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" % date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def",
"import json number = json.dumps(number) message = json.dumps(message) result =",
"True except: return False def is_element_displayed(self, by, locator): try: return",
"self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result, 'Unable to lock screen' def unlock(self): self.marionette.switch_to_frame()",
"self.marionette.instance.close() elif self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g']) else: raise Exception('Unable to stop",
"this to work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self): self.marionette.execute_script(\"var telephony = window.navigator.mozTelephony;",
"= None while time.time() < timeout: time.sleep(0.5) try: if self.marionette.find_element(by,",
"bluetooth name to %s\" % device_name def bluetooth_set_device_discoverable_mode(self, discoverable): if",
"self.is_android_build: self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend', function loaded(aEvent) { if (aEvent.target.src.indexOf('ftu') != -1",
"'Element %s still present after timeout' % locator) def wait_for_element_displayed(self,",
"testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) # Change language back to",
"= arguments[1]; window.screen.onmozorientationchange = function(e) { console.log(\"Received 'onmozorientationchange' event.\"); waitFor(",
"result = self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result, 'Unable to lock screen' def",
"log_file.write('%s Checkpoint after iteration %d of %d:\\n' % (self.cur_time, self.iteration,",
"message = json.dumps(message) result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number,",
"!= -1 || aEvent.target.src.indexOf('homescreen') != -1) { window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished();",
"GaiaDataLayer.getMozTelephonyState()\") @property def is_antenna_available(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property def is_fm_radio_enabled(self):",
"disable sound completely self.data_layer.set_volume(0) def install_marketplace(self): _yes_button_locator = (By.ID, 'app-install-install-button')",
"the summary file summary_file.close() # Write to suite summary file",
"call or None if no active call return self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\")",
"\"\"\"Calls the method provided with the driver as an argument",
"'#cards-view li.card[data-origin*=\"%s\"] .close-card' % self.app_under_test.lower() _close_button_locator = ('css selector', locator_part_two)",
"hasattr(self, '_has_wifi'): self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined') return self._has_wifi",
"self.data_layer = GaiaData(self.marionette, self.testvars) from gaiatest.apps.keyboard.app import Keyboard self.keyboard =",
"of %d:\\n' % (self.cur_time, self.iteration, self.iterations)) log_file.write('%s\\n' % output_str) def",
"= 'Android' in self.marionette.session_capabilities['platform'] return self.testvars['is_android_build'] @property def is_online(self): #",
"value): \"\"\"Sets the value of a Gecko string pref, which",
"= self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" % app.origin) assert result, \"Failed to kill app",
"def connect_to_network(self): if not self.device.is_online: try: self.connect_to_local_area_network() except: if self.device.has_mobile_connection:",
"timeout' % locator) def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout): timeout =",
"time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # If first checkpoint, create the file if",
"result, 'Unable to connect to cell data' def disable_cell_data(self): self.marionette.switch_to_frame()",
"time.time() < timeout: time.sleep(0.5) try: return self.marionette.find_element(by, locator) except NoSuchElementException:",
"sys import time from marionette import MarionetteTestCase from marionette.by import",
"= testvars or {} js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_data_layer.js\"))",
"self.marionette.instance: # launch the gecko instance attached to marionette self.marionette.instance.start()",
"self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self, app_name): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" % app_name)",
"self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) # Change language back to English",
"< end_time: try: value = method(self.marionette) if value: return value",
"suite summary file suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path suite_summary_file =",
"try: if self.marionette.find_element(by, locator).is_displayed(): break except (NoSuchElementException, StaleElementException) as e:",
"{ marionetteScriptFinished(); } else { var expected = arguments[1]; window.screen.onmozorientationchange",
"available for devices.') dm_type = os.environ.get('DM_TRANS', 'adb') if dm_type ==",
"pref, which is different from a Gaia setting.\"\"\" return self._get_pref('Char',",
"mk['manifest']) # TODO add this to the system app object",
"special_powers=True) assert result, 'Unable to enable WiFi' def disable_wifi(self): self.marionette.switch_to_frame()",
"except StaleElementException: pass except NoSuchElementException: break else: raise TimeoutException( 'Element",
"os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755) self.log_name = \"%s/checkpoint_%s_%s.log\" % (self.checkpoint_path, self.test_method.__name__, self.cur_time)",
"def enable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\", special_powers=True) assert result,",
"self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\" % (datatype, name), special_powers=True) def _set_pref(self, datatype, name,",
"or self.iterations GaiaTestCase.__init__(self, *args, **kwargs) def drive(self, test, app): self.test_method",
"window.screen.mozOrientation') def wait_for_element_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout) +",
"self.marionette.log(\"sleeping %d seconds to give the device some idle time\"",
"the driver as an argument until the \\ return value",
"self.marionette.execute_script(\"var telephony = window.navigator.mozTelephony; \" + \"if(telephony.active) telephony.active.hangUp();\") @property def",
"summary_file.write('test_name: %s\\n' % self.test_method.__name__) summary_file.write('completed: %s\\n' % self.cur_time) summary_file.write('app_under_test: %s\\n'",
"@property def picture_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();') @property def video_files(self):",
"data' def disable_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\", special_powers=True) assert",
"self.iterations: self.checkpoint() # Finished, now process checkpoint data into .json",
"self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen = LockScreen(self.marionette) self.apps = GaiaApps(self.marionette) self.data_layer =",
"raise TimeoutException('Could not switch to app frame %s in time'",
"return self._get_pref('Bool', name) def set_bool_pref(self, name, value): \"\"\"Sets the value",
"checkpoint_file = open(self.log_name, 'r') # Grab every b2g rss reading",
"is_element_displayed(self, by, locator): try: return self.marionette.find_element(by, locator).is_displayed() except (NoSuchElementException, ElementNotVisibleException):",
"the checkpoint file checkpoint_file = open(self.log_name, 'r') # Grab every",
"return self._manager @property def is_android_build(self): if self.testvars.get('is_android_build') is None: self.testvars['is_android_build']",
"= '_%s.'.join(iter(destination.split('.'))) % i self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination, 'of=%s'",
"self.app_under_test.lower()) summary_file.write('total_iterations: %d\\n' % self.iterations) summary_file.write('checkpoint_interval: %d\\n' % self.checkpoint_interval) summary_file.write('b2g_rss:",
"string pref, which is different from a Gaia setting.\"\"\" return",
"_default_timeout = 30 def __init__(self, *args, **kwargs): self.restart = kwargs.pop('restart',",
"time.time() < timeout: time.sleep(0.5) try: if not self.marionette.find_element(by, locator).is_displayed(): break",
"name, value): \"\"\"Sets the value of a Gecko string pref,",
"((count % self.checkpoint_interval) == 0) or count == self.iterations: self.checkpoint()",
"locator).is_displayed() except (NoSuchElementException, ElementNotVisibleException): return False def tearDown(self): self.lockscreen =",
"\"gaia_data_layer.js\")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" % date_number)",
"to '%s'\" % (name, value) def _get_pref(self, datatype, name): return",
"mozdevice.DeviceManagerSUT(host=host) else: raise Exception('Unknown device manager type: %s' % dm_type)",
"return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();') @property def video_files(self): return self.marionette.execute_async_script( 'return",
"== self.iterations: self.checkpoint() # Finished, now process checkpoint data into",
"get_int_pref(self, name): \"\"\"Returns the value of a Gecko integer pref,",
"log needs to be open and focused in order for",
"+ 'return mobileConnection !== undefined') @property def has_wifi(self): if not",
"arguments[1]; window.screen.onmozorientationchange = function(e) { console.log(\"Received 'onmozorientationchange' event.\"); waitFor( function()",
"kill(self, app): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js)",
"= self.log_name.replace('.log', '_summary.log') summary_file = open(summary_name, 'w') # Write the",
"which is different from a Gaia setting.\"\"\" return self._get_pref('Int', name)",
"self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations GaiaTestCase.__init__(self, *args, **kwargs) def drive(self,",
"assert self.device.is_online else: raise Exception('Unable to connect to local area",
"Grab every b2g rss reading for each checkpoint b2g_rss_list =",
"LockScreen(self.marionette) self.apps = GaiaApps(self.marionette) self.data_layer = GaiaData(self.marionette, self.testvars) from gaiatest.apps.keyboard.app",
"%s\" % self.log_name) # Open the checkpoint file checkpoint_file =",
"def send_sms(self, number, message): import json number = json.dumps(number) message",
"timeout=_default_timeout): timeout = float(timeout) + time.time() e = None while",
"result = self.marionette.execute_async_script('return GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) def",
"log_file.write('%s Gaia Endurance Test: %s\\n' % (self.cur_time, self.test_method.__name__)) output_str =",
"a copy of the MPL was not distributed with this",
"for name, value in self.testvars.get('settings', {}).items()] # unlock self.lockscreen.unlock() #",
"different from a Gaia setting.\"\"\" return self._get_pref('Char', name) def set_char_pref(self,",
"self.data_layer.remove_all_contacts(self._script_timeout) # reset to home screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # kill",
"NoSuchElementException): raise TimeoutException('Element %s not present before timeout' % locator)",
"if self.iteration == 1: print \"\\n\" print \"Iteration %d of",
"frame %s in time' % app_frame) class GaiaData(object): def __init__(self,",
"return self.get_setting('*') def set_setting(self, name, value): import json value =",
"network=None): network = network or self.testvars.get('wifi') assert network, 'No WiFi",
"filename, count=1, destination=''): self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination])) def resource(self, filename):",
"TimeoutException('Element %s not present before timeout' % locator) else: raise",
"different from a Gaia setting.\"\"\" return self._set_pref('Char', name, value) def",
"def resource(self, filename): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename)) def change_orientation(self, orientation):",
"name, value): import json value = json.dumps(value) result = self.marionette.execute_async_script('return",
"except: return False def is_element_displayed(self, by, locator): try: return self.marionette.find_element(by,",
"'%s' to '%s'\" % (name, value) def _get_pref(self, datatype, name):",
"return self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\") def bluetooth_disable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\") def",
"self._get_pref('Bool', name) def set_bool_pref(self, name, value): \"\"\"Sets the value of",
"to recipient %s with text %s' % (number, message) class",
"@property def known_networks(self): return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property def active_telephony_state(self): #",
"self.test_method = test self.app_under_test = app # Now drive the",
"current app (self.app) by using the home button self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new",
"setUp if not self.restart: # disable passcode before restore settings",
"summary_file.write('checkpoint_interval: %d\\n' % self.checkpoint_interval) summary_file.write('b2g_rss: ') summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\\navg_rss: %d\\n\\n'",
"name, switch_to_frame=True, url=None, launch_timeout=None): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" % name,",
"self.app_under_test.lower() _close_button_locator = ('css selector', locator_part_two) close_card_app_button = self.marionette.find_element(*_close_button_locator) close_card_app_button.tap()",
"network, 'No WiFi network provided' self.marionette.switch_to_frame() return self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\" %",
"self.data_layer.set_setting(\"keyboard.ftu.enabled\", False) # restore settings from testvars [self.data_layer.set_setting(name, value) for",
"device some idle time\" % idle_time) time.sleep(idle_time) # Dump out",
"a summary text file summary_name = self.log_name.replace('.log', '_summary.log') summary_file =",
"loaded(aEvent) { if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') != -1)",
"return app @property def displayed_app(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaApps.displayedApp();')",
"@property def is_antenna_available(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property def is_fm_radio_enabled(self): return",
"enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True) def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False) @property def is_wifi_enabled(self):",
"permission_name, value): return self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s', '%s', '%s')\" % (app_name, permission_name,",
"self.checkpoint() # Finished, now process checkpoint data into .json output",
"while test is running if self.iteration == 1: print \"\\n\"",
"set_char_pref(self, name, value): \"\"\"Sets the value of a Gecko string",
"is not False.\"\"\" end_time = time.time() + timeout while time.time()",
"% device_name) def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self, device_name):",
"not track pref back to the default self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if",
"self._manager = mozdevice.DeviceManagerSUT(host=host) else: raise Exception('Unknown device manager type: %s'",
"or 1 self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations GaiaTestCase.__init__(self, *args, **kwargs)",
"push_file(self, source, count=1, destination='', progress=None): if not destination.count('.') > 0:",
"# this is used to keep all tests passing while",
"There are 4 orientation states which the phone can be",
"app): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) result",
"time? if ((count % self.checkpoint_interval) == 0) or count ==",
"channel, value) def bluetooth_enable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\") def bluetooth_disable(self):",
"pass except NoSuchElementException: break else: raise TimeoutException( 'Element %s still",
"bluetooth_enable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\") def bluetooth_disable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return",
"' + 'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; ' + 'return",
"self.marionette.set_script_timeout(default_script_timeout) def get_setting(self, name): return self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")' % name, special_powers=True)",
"a fully qualified path self.device.manager.removeFile(filename) # Switch off keyboard FTU",
"install the marketplace dev app self.marionette.execute_script('navigator.mozApps.install(\"%s\")' % mk['manifest']) # TODO",
"when Bug 924912 is addressed time.sleep(5) def stop_b2g(self): if self.marionette.instance:",
"os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) result = self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" % app.origin) assert",
"disable passcode before restore settings from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled',",
"class GaiaEnduranceTestCase(GaiaTestCase): def __init__(self, *args, **kwargs): self.iterations = kwargs.pop('iterations') or",
"is_wifi_connected(self, network=None): network = network or self.testvars.get('wifi') assert network, 'No",
"terms of the Mozilla Public # License, v. 2.0. If",
"= name self.origin = origin def __eq__(self, other): return self.__dict__",
"'%s'\" % app.name def kill_all(self): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir,",
"# deafult timeout in seconds for the wait_for methods _default_timeout",
"def set_char_pref(self, name, value): \"\"\"Sets the value of a Gecko",
"result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files) return result def delete_all_sms(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return",
"time.sleep(0.5) try: if not self.marionette.find_element(by, locator).is_displayed(): break except StaleElementException: pass",
"for devices.') dm_type = os.environ.get('DM_TRANS', 'adb') if dm_type == 'adb':",
"%s);\" % (datatype, name, value), special_powers=True) def get_bool_pref(self, name): \"\"\"Returns",
"return self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\") @property def is_cell_data_enabled(self): return self.get_setting('ril.data.enabled') def connect_to_cell_data(self):",
"app with name '%s'\" % name app = GaiaApp(frame=result.get('frame'), src=result.get('src'),",
"self.lockscreen.unlock() # If we are restarting all of these values",
"+ 'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection.data.connected;')",
"self.restart = kwargs.pop('restart', False) kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self, *args,",
"result = self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" % app.origin) assert result, \"Failed to kill",
"i self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination, 'of=%s' % remote_copy]) if",
"a Gaia setting.\"\"\" return self._set_pref('Int', name, value) def get_char_pref(self, name):",
"switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self, app_name): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" %",
"marionette self.marionette.instance.close() elif self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g']) else: raise Exception('Unable to",
"wait_for_element_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() while",
"(NoSuchElementException, StaleElementException) as e: pass else: # This is an",
"special_powers=True) assert result, 'Unable to send SMS to recipient %s",
"wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() while",
"+ time.time() e = None while time.time() < timeout: time.sleep(0.5)",
"self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\", special_powers=True) def delete_all_call_log_entries(self): \"\"\"The call log needs to",
"get_char_pref(self, name): \"\"\"Returns the value of a Gecko string pref,",
"host with SUT!') self._manager = mozdevice.DeviceManagerSUT(host=host) else: raise Exception('Unknown device",
"+ 1): self.iteration = count self.marionette.log(\"%s iteration %d of %d\"",
"raise Exception(\"App failed to launch; there is no app frame\")",
"with open(self.log_name, 'a') as log_file: log_file.write('%s Gaia Endurance Test: %s\\n'",
"checkpoint(self): # Console output so know what's happening if watching",
"idle time (for gc) idle_time = 30 self.marionette.log(\"sleeping %d seconds",
"def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False) @property def is_wifi_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\")",
"the current app (self.app) by using the home button self.marionette.switch_to_frame()",
"one at http://mozilla.org/MPL/2.0/. import json import os import sys import",
"MarionetteTestCase.__init__(self, *args, **kwargs) def setUp(self): try: MarionetteTestCase.setUp(self) except InvalidResponseException: if",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\", special_powers=True) assert result, 'Unable to",
"def active_telephony_state(self): # Returns the state of only the currently",
"if dm_type == 'adb': self._manager = mozdevice.DeviceManagerADB() elif dm_type ==",
"kwargs.pop('iterations') or 1 self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations GaiaTestCase.__init__(self, *args,",
"for channel in channels: self.set_setting('audio.volume.%s' % channel, value) def bluetooth_enable(self):",
"locator) except NoSuchElementException: pass else: raise TimeoutException( 'Element %s not",
"% (app_name, permission_name, value)) def launch(self, name, switch_to_frame=True, url=None, launch_timeout=None):",
"Console output so know what's happening if watching console print",
"self.lockscreen = LockScreen(self.marionette) self.apps = GaiaApps(self.marionette) self.data_layer = GaiaData(self.marionette, self.testvars)",
"slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen = LockScreen(self.marionette) self.apps = GaiaApps(self.marionette) self.data_layer",
"return self.testvars['is_android_build'] @property def is_online(self): # Returns true if the",
"value), special_powers=True) assert result, \"Unable to change setting with name",
"result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\", special_powers=True) assert result, 'Unable to connect",
"exist already if self.iteration in (0, self.checkpoint_interval): self.checkpoint_path = \"checkpoints\"",
"time.sleep(2) raise TimeoutException('Could not switch to app frame %s in",
"Create a summary text file summary_name = self.log_name.replace('.log', '_summary.log') summary_file",
"view _cards_view_locator = ('id', 'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator) # Sleep",
"install_marketplace(self): _yes_button_locator = (By.ID, 'app-install-install-button') mk = {\"name\": \"Marketplace Dev\",",
"earlier in the setUp if not self.restart: # disable passcode",
"# the emulator can be really slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen",
"self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\") @property def is_cell_data_enabled(self): return self.get_setting('ril.data.enabled') def connect_to_cell_data(self): self.marionette.switch_to_frame()",
"self.app_under_test = app # Now drive the actual test case",
"% self.checkpoint_interval) == 0) or count == self.iterations: self.checkpoint() #",
"len(b2g_rss_list) # Create a summary text file summary_name = self.log_name.replace('.log',",
"% (self.cur_time, self.iteration, self.iterations)) log_file.write('%s\\n' % output_str) def close_app(self): #",
"\"gaia_apps.js\")) self.marionette.import_script(js) result = self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" % app.origin) assert result, \"Failed",
"time.sleep(0.5) try: return self.marionette.find_element(by, locator) except NoSuchElementException: pass else: raise",
"raise Exception('Unable to stop B2G') self.marionette.client.close() self.marionette.session = None self.marionette.window",
"iterations for count in range(1, self.iterations + 1): self.iteration =",
"Process checkpoint data into .json self.marionette.log(\"processing checkpoint data from %s\"",
"home screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # kill any open apps self.apps.kill_all()",
"= method(self.marionette) if value: return value except (NoSuchElementException, StaleElementException): pass",
"time.time() while time.time() < timeout: time.sleep(0.5) try: return self.marionette.find_element(by, locator)",
"window.navigator.onLine;') @property def has_mobile_connection(self): # XXX: check bug-926169 # this",
"% (number, message) class GaiaDevice(object): def __init__(self, marionette, testvars=None): self.marionette",
"False) kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self, *args, **kwargs) def setUp(self):",
"in (0, self.checkpoint_interval): self.checkpoint_path = \"checkpoints\" if not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path,",
"else: # This is an effortless way to give extra",
"close_card_app_button = self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def process_checkpoint_data(self): # Process checkpoint data",
"__init__(self, *args, **kwargs): self.restart = kwargs.pop('restart', False) kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval',",
"pref, which is different from a Gaia setting.\"\"\" return self._get_pref('Int',",
"= marionette self.testvars = testvars or {} js = os.path.abspath(os.path.join(__file__,",
"return self._has_wifi def push_file(self, source, count=1, destination='', progress=None): if not",
"introducing multi-sim APIs return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || '",
"self.marionette.import_script(js) @property def is_locked(self): self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self): self.marionette.switch_to_frame()",
"addressed time.sleep(5) def stop_b2g(self): if self.marionette.instance: # close the gecko",
"what's happening if watching console print \"Checkpoint...\" sys.stdout.flush() # Sleep",
"APIs return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' + 'window.navigator.mozMobileConnections",
"self.marionette.start_session() if self.is_android_build: self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend', function loaded(aEvent) { if (aEvent.target.src.indexOf('ftu')",
"(self.cur_time, self.iteration, self.iterations)) log_file.write('%s\\n' % output_str) def close_app(self): # Close",
"+= int(b2g_mem_value) avg_rss = total / len(b2g_rss_list) # Create a",
"the value of a Gecko string pref, which is different",
"'return GaiaDataLayer.getAllMusic();') @property def picture_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();') @property",
"= float(timeout) + time.time() e = None while time.time() <",
"launch_timeout=None): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" % name, script_timeout=launch_timeout) assert result,",
"kill_active_call(self): self.marionette.execute_script(\"var telephony = window.navigator.mozTelephony; \" + \"if(telephony.active) telephony.active.hangUp();\") @property",
"\"Marketplace Dev\", \"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp \", } if not self.apps.is_app_installed(mk['name']): #",
"b2g_mem_value in b2g_rss_list: total += int(b2g_mem_value) avg_rss = total /",
"__init__(self, marionette, testvars=None): self.marionette = marionette self.testvars = testvars or",
"timed out\"): \"\"\"Calls the method provided with the driver as",
"still visible after timeout' % locator) def wait_for_condition(self, method, timeout=_default_timeout,",
"connection established (cell data, wifi, etc) return self.marionette.execute_script('return window.navigator.onLine;') @property",
"'onmozorientationchange' event.\"); waitFor( function() { window.screen.onmozorientationchange = null; marionetteScriptFinished(); },",
"return self.get_setting('ril.data.enabled') def connect_to_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\", special_powers=True)",
"marionette.by import By from marionette.errors import NoSuchElementException from marionette.errors import",
"% json.dumps(contact), special_powers=True) assert result, 'Unable to insert contact %s'",
"be really slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen = LockScreen(self.marionette) self.apps =",
"'/'.join(['sdcard', destination])) def resource(self, filename): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename)) def",
"%d of %d\" % (self.test_method.__name__, count, self.iterations)) # Print to",
"idle time\" % idle_time) time.sleep(idle_time) # Dump out some memory",
"GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result, 'Able to",
"float(timeout) + time.time() while time.time() < timeout: time.sleep(0.5) try: if",
"in b2g_rss_list: total += int(b2g_mem_value) avg_rss = total / len(b2g_rss_list)",
"expected; } ); }; console.log(\"Changing orientation to '\" + arguments[1]",
"which is different from a Gaia setting.\"\"\" return self._set_pref('Int', name,",
"'Able to set the device bluetooth discoverable mode' @property def",
"disable cell data' @property def is_cell_data_connected(self): # XXX: check bug-926169",
"not self.is_android_build: raise Exception('Device manager is only available for devices.')",
"in order for this to work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self): self.marionette.execute_script(\"var",
"@property def screen_orientation(self): return self.marionette.execute_script('return window.screen.mozOrientation') def wait_for_element_present(self, by, locator,",
"% idle_time) time.sleep(idle_time) # Dump out some memory status info",
"process checkpoint data into .json output self.process_checkpoint_data() def checkpoint(self): #",
"json.dumps(network)) @property def known_networks(self): return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property def active_telephony_state(self):",
"(self.device.is_android_build or self.marionette.instance): self.device.stop_b2g() if self.device.is_android_build: # revert device to",
"dm_type == 'adb': self._manager = mozdevice.DeviceManagerADB() elif dm_type == 'sut':",
"self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi() # remove",
"settings from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) # Change language",
"is subject to the terms of the Mozilla Public #",
"range(1, self.iterations + 1): self.iteration = count self.marionette.log(\"%s iteration %d",
"timeout' % locator) def wait_for_condition(self, method, timeout=_default_timeout, message=\"Condition timed out\"):",
"to local area network') def push_resource(self, filename, count=1, destination=''): self.device.push_file(self.resource(filename),",
"self.name = name self.origin = origin def __eq__(self, other): return",
"(name, value), special_powers=True) assert result, \"Unable to change setting with",
"= self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result, 'Unable to lock screen' def unlock(self):",
"Public # License, v. 2.0. If a copy of the",
"% name) def kill(self, app): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir,",
"\", } if not self.apps.is_app_installed(mk['name']): # install the marketplace dev",
"except: if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else: raise Exception('Unable to connect to",
"else: def check(now): return url in now while (time.time() -",
"924912 is addressed time.sleep(5) def stop_b2g(self): if self.marionette.instance: # close",
"GaiaData(object): def __init__(self, marionette, testvars=None): self.marionette = marionette self.testvars =",
"% (datatype, name), special_powers=True) def _set_pref(self, datatype, name, value): value",
"case iterations for count in range(1, self.iterations + 1): self.iteration",
"GaiaDevice(object): def __init__(self, marionette, testvars=None): self.marionette = marionette self.testvars =",
"an argument until the \\ return value is not False.\"\"\"",
"self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True) def insert_contact(self, contact): self.marionette.switch_to_frame() result",
"from marionette.errors import TimeoutException from marionette.errors import StaleElementException from marionette.errors",
"if not self.apps.is_app_installed(mk['name']): # install the marketplace dev app self.marionette.execute_script('navigator.mozApps.install(\"%s\")'",
"to network') assert self.device.is_online def connect_to_local_area_network(self): if not self.device.is_online: if",
"if isinstance(e, NoSuchElementException): raise TimeoutException('Element %s not present before timeout'",
"# revert device to a clean state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g()",
"count in range(1, self.iterations + 1): self.iteration = count self.marionette.log(\"%s",
"def disable_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\", special_powers=True) assert result,",
"GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name) assert result, \"Unable to set device's bluetooth",
"gestures pref to False self.data_layer.set_setting('edgesgesture.enabled', False) # disable carrier data",
"while time.time() < end_time: try: value = method(self.marionette) if value:",
"network or self.testvars.get('wifi') assert network, 'No WiFi network provided' self.marionette.switch_to_frame()",
"if not self.device.is_online: try: self.connect_to_local_area_network() except: if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else:",
"of the MPL was not distributed with this # file,",
"really slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen = LockScreen(self.marionette) self.apps = GaiaApps(self.marionette)",
"self.data_layer.set_setting(\"language.current\", \"en-US\") # Switch off spanish keyboard before test self.data_layer.set_setting(\"keyboard.layouts.spanish\",",
"marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_lock_screen.js\")) self.marionette.import_script(js) @property def",
"what iteration we're on while test is running if self.iteration",
"Returns true if the device has a network connection established",
"but not displayed before timeout' % locator) def wait_for_element_not_displayed(self, by,",
"provided' self.marionette.switch_to_frame() return self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\" % json.dumps(network)) @property def known_networks(self):",
"devices.') dm_type = os.environ.get('DM_TRANS', 'adb') if dm_type == 'adb': self._manager",
"fm_radio_frequency(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property def media_files(self): result = []",
"js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\") def runningApps(self):",
"%d\\n\\n' % avg_rss) # Close the summary file summary_file.close() #",
"% (self.checkpoint_path, self.test_method.__name__, self.cur_time) with open(self.log_name, 'a') as log_file: log_file.write('%s",
"_cards_view_locator = ('id', 'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator) # Sleep a",
"result, \"Unable to change setting with name '%s' to '%s'\"",
"return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property def active_telephony_state(self): # Returns the state",
"all of these values are reset to default earlier in",
"None self.marionette.window = None class GaiaTestCase(MarionetteTestCase): _script_timeout = 60000 _search_timeout",
"Sleep to give device idle time (for gc) idle_time =",
"raise TimeoutException( 'Element %s still visible after timeout' % locator)",
"self.marionette.find_element(by, locator) return True except: return False def is_element_displayed(self, by,",
"None self.apps = None self.data_layer = None MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase):",
"if self.marionette.instance: # launch the gecko instance attached to marionette",
"result, 'Unable to disable cell data' @property def is_cell_data_connected(self): #",
"open(self.log_name, 'a') as log_file: log_file.write('%s Checkpoint after iteration %d of",
"WiFi' def disable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\", special_powers=True) assert",
"self.manager.pushFile(source, destination) if count > 1: for i in range(1,",
"checkpoint, create the file if it doesn't exist already if",
"selector', locator_part_two) close_card_app_button = self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def process_checkpoint_data(self): # Process",
"to a clean state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() # the emulator",
"def all_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True) @property def sim_contacts(self):",
"raise Exception('Unable to start B2G') self.marionette.wait_for_port() self.marionette.start_session() if self.is_android_build: self.marionette.execute_async_script(\"\"\"",
"video_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();') def sdcard_files(self, extension=''): files =",
"app_name, permission_name, value): return self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s', '%s', '%s')\" % (app_name,",
"* len(self.all_contacts))) result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True) assert result, 'Unable",
"raise Exception('Unable to connect to local area network') def push_resource(self,",
"def process_checkpoint_data(self): # Process checkpoint data into .json self.marionette.log(\"processing checkpoint",
"can see what iteration we're on while test is running",
"to connect to cell data' def disable_cell_data(self): self.marionette.switch_to_frame() result =",
"'Unable to enable WiFi' def disable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return",
"import json value = json.dumps(value) result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\", %s)'",
"if len(extension): return [filename for filename in files if filename.endswith(extension)]",
"= test self.app_under_test = app # Now drive the actual",
"Tap the close icon for the current app locator_part_two =",
"filename): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename)) def change_orientation(self, orientation): \"\"\" There",
"MarionetteTestCase.setUp(self) except InvalidResponseException: if self.restart: pass self.device = GaiaDevice(self.marionette, self.testvars)",
"after iteration %d of %d:\\n' % (self.cur_time, self.iteration, self.iterations)) log_file.write('%s\\n'",
"value except (NoSuchElementException, StaleElementException): pass time.sleep(0.5) else: raise TimeoutException(message) def",
"def __eq__(self, other): return self.__dict__ == other.__dict__ class GaiaApps(object): def",
"time.time() < timeout: time.sleep(0.5) try: self.marionette.find_element(by, locator) except NoSuchElementException: break",
"hasattr(self, '_manager') and self._manager: return self._manager if not self.is_android_build: raise",
"so know what's happening if watching console print \"Checkpoint...\" sys.stdout.flush()",
"self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self, device_name): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' %",
"remote_copy = '_%s.'.join(iter(destination.split('.'))) % i self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination,",
"do not track pref back to the default self.data_layer.set_setting('privacy.donottrackheader.value', '-1')",
"# kill any open apps self.apps.kill_all() # disable sound completely",
"import json import os import sys import time from marionette",
"locator) return True except: return False def is_element_displayed(self, by, locator):",
"value) def _get_pref(self, datatype, name): return self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\" % (datatype,",
"< timeout: time.sleep(0.5) try: if not self.marionette.find_element(by, locator).is_displayed(): break except",
"switch to app frame %s in time' % app_frame) class",
"self.data_layer.connect_to_wifi() assert self.device.is_online else: raise Exception('Unable to connect to local",
"of only the currently active call or None if no",
"orientation): \"\"\" There are 4 orientation states which the phone",
"not self.device.is_online: try: self.connect_to_local_area_network() except: if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else: raise",
"destination) if count > 1: for i in range(1, count",
"else: raise Exception('Unable to connect to local area network') def",
"# If we are restarting all of these values are",
"avg_rss = total / len(b2g_rss_list) # Create a summary text",
"self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True) @property def sim_contacts(self): self.marionette.switch_to_frame() return",
"self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # kill any open apps self.apps.kill_all() # disable",
"reading for each checkpoint b2g_rss_list = [] for next_line in",
"keyboard FTU screen self.data_layer.set_setting(\"keyboard.ftu.enabled\", False) # restore settings from testvars",
"self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\" % (datatype, name, value), special_powers=True) def get_bool_pref(self, name):",
"GaiaDataLayer.getSetting(\"%s\")' % name, special_powers=True) @property def all_settings(self): return self.get_setting('*') def",
"self.restart: pass self.device = GaiaDevice(self.marionette, self.testvars) if self.restart and (self.device.is_android_build",
"GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True) assert result, 'Unable to",
"+ time.time() while time.time() < timeout: time.sleep(0.5) try: return self.marionette.find_element(by,",
"def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time()",
"def bluetooth_set_device_name(self, device_name): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name) assert",
"os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) def get_permission(self, app_name, permission_name): return self.marionette.execute_async_script(\"return",
"if self.is_android_build: self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend', function loaded(aEvent) { if (aEvent.target.src.indexOf('ftu') !=",
"timeout=30): self.marionette.switch_to_frame(app_frame) start = time.time() if not url: def check(now):",
"TODO add this to the system app object when we",
"' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection.data.connected;') def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled',",
"\"%s/checkpoint_%s_%s.log\" % (self.checkpoint_path, self.test_method.__name__, self.cur_time) with open(self.log_name, 'a') as log_file:",
"GaiaApps.getPermission('%s', '%s')\" % (app_name, permission_name)) def set_permission(self, app_name, permission_name, value):",
"window.navigator.mozBluetooth.enabled\") @property def is_cell_data_enabled(self): return self.get_setting('ril.data.enabled') def connect_to_cell_data(self): self.marionette.switch_to_frame() result",
"self.marionette.switch_to_frame() return self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\" % json.dumps(network)) @property def known_networks(self): return",
"is_element_present(self, by, locator): try: self.marionette.find_element(by, locator) return True except: return",
"undefined') @property def has_wifi(self): if not hasattr(self, '_has_wifi'): self._has_wifi =",
"return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property def is_fm_radio_enabled(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property",
"if not host: raise Exception('Must specify host with SUT!') self._manager",
"# file, You can obtain one at http://mozilla.org/MPL/2.0/. import json",
"data self.data_layer.remove_all_contacts(self._script_timeout) # reset to home screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") #",
"def unlock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result, 'Unable to",
"is addressed time.sleep(5) def stop_b2g(self): if self.marionette.instance: # close the",
"console.log(\"Received 'onmozorientationchange' event.\"); waitFor( function() { window.screen.onmozorientationchange = null; marionetteScriptFinished();",
"obtain one at http://mozilla.org/MPL/2.0/. import json import os import sys",
"different from a Gaia setting.\"\"\" return self._get_pref('Bool', name) def set_bool_pref(self,",
"os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\") def runningApps(self): return self.marionette.execute_script(\"return GaiaApps.getRunningApps()\")",
"'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\") def runningApps(self): return self.marionette.execute_script(\"return GaiaApps.getRunningApps()\") def",
"dev app self.marionette.execute_script('navigator.mozApps.install(\"%s\")' % mk['manifest']) # TODO add this to",
"'Unable to insert contact %s' % contact def remove_all_contacts(self, default_script_timeout=60000):",
"('css selector', locator_part_two) close_card_app_button = self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def process_checkpoint_data(self): #",
"'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection.data.connected;') def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True) def",
"kwargs.pop('checkpoint_interval') or self.iterations GaiaTestCase.__init__(self, *args, **kwargs) def drive(self, test, app):",
"Close the current app (self.app) by using the home button",
"if check(self.marionette.get_url()): return time.sleep(2) raise TimeoutException('Could not switch to app",
"import mozdevice class LockScreen(object): def __init__(self, marionette): self.marionette = marionette",
"from a Gaia setting.\"\"\" return self._get_pref('Int', name) def set_int_pref(self, name,",
"for b2g_mem_value in b2g_rss_list: total += int(b2g_mem_value) avg_rss = total",
"suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path suite_summary_file = open(suite_summary_file_name, 'a') suite_summary_file.write('%s:",
"2.0. If a copy of the MPL was not distributed",
"else: raise TimeoutException('Element %s present but not displayed before timeout'",
"Exception('Unable to stop B2G') self.marionette.client.close() self.marionette.session = None self.marionette.window =",
"result, 'Unable to enable WiFi' def disable_wifi(self): self.marionette.switch_to_frame() result =",
"for the current app locator_part_two = '#cards-view li.card[data-origin*=\"%s\"] .close-card' %",
"return self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")' % name, special_powers=True) @property def all_settings(self): return",
"checkpoint file checkpoint_file = open(self.log_name, 'r') # Grab every b2g",
"app_frame, url=None, timeout=30): self.marionette.switch_to_frame(app_frame) start = time.time() if not url:",
"def delete_all_sms(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\", special_powers=True) def delete_all_call_log_entries(self): \"\"\"The",
"self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" % name, script_timeout=launch_timeout) assert result, \"Failed to launch app",
"actual test case iterations for count in range(1, self.iterations +",
"'r') # Grab every b2g rss reading for each checkpoint",
"self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" % app.origin) assert result, \"Failed to kill app with",
"time\" % idle_time) time.sleep(idle_time) # Dump out some memory status",
"the close icon for the current app locator_part_two = '#cards-view",
"device_name def bluetooth_set_device_discoverable_mode(self, discoverable): if (discoverable): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);')",
"assert result, 'Unable to connect to WiFi network' def forget_all_networks(self):",
"def connect_to_local_area_network(self): if not self.device.is_online: if self.testvars.get('wifi') and self.device.has_wifi: self.data_layer.connect_to_wifi()",
"False def tearDown(self): self.lockscreen = None self.apps = None self.data_layer",
"(datatype, name, value), special_powers=True) def get_bool_pref(self, name): \"\"\"Returns the value",
"result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name) assert result, \"Unable to",
"self.marionette.client.close() self.marionette.session = None self.marionette.window = None class GaiaTestCase(MarionetteTestCase): _script_timeout",
"mozdevice class LockScreen(object): def __init__(self, marionette): self.marionette = marionette js",
"Finished, now process checkpoint data into .json output self.process_checkpoint_data() def",
"= open(self.log_name, 'r') # Grab every b2g rss reading for",
"(count, self.iterations) sys.stdout.flush() self.test_method() # Checkpoint time? if ((count %",
"present before timeout' % locator) else: raise TimeoutException('Element %s present",
"self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if self.data_layer.get_setting('ril.radio.disabled'): # enable the device radio, disable",
"False) # Re-set edge gestures pref to False self.data_layer.set_setting('edgesgesture.enabled', False)",
"import Keyboard self.keyboard = Keyboard(self.marionette) self.cleanUp() def cleanUp(self): # remove",
"object when we have one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self):",
"marionette self.marionette.instance.start() elif self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g']) else: raise Exception('Unable to",
"set_int_pref(self, name, value): \"\"\"Sets the value of a Gecko integer",
"result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True) assert result, 'Unable",
"if not url: def check(now): return \"about:blank\" not in now",
"is an effortless way to give extra debugging information if",
"result, \"Failed to launch app with name '%s'\" % name",
"if self.device.is_android_build: for filename in self.data_layer.media_files: # filename is a",
"datatype, name, value): value = json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\" % (datatype,",
"set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" % date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def all_contacts(self):",
"(number, message) class GaiaDevice(object): def __init__(self, marionette, testvars=None): self.marionette =",
"if self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi() #",
"return self.marionette.execute_script('return window.navigator.onLine;') @property def has_mobile_connection(self): # XXX: check bug-926169",
"value = method(self.marionette) if value: return value except (NoSuchElementException, StaleElementException):",
"self.lockscreen = None self.apps = None self.data_layer = None MarionetteTestCase.tearDown(self)",
"\"gaia_apps.js\")) self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\") def runningApps(self): return self.marionette.execute_script(\"return GaiaApps.getRunningApps()\") def switch_to_frame(self,",
"def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self, device_name): result =",
"special_powers=True) @property def all_settings(self): return self.get_setting('*') def set_setting(self, name, value):",
"GaiaEnduranceTestCase(GaiaTestCase): def __init__(self, *args, **kwargs): self.iterations = kwargs.pop('iterations') or 1",
"'notification'] for channel in channels: self.set_setting('audio.volume.%s' % channel, value) def",
"# reset to home screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # kill any",
"result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\", special_powers=True) assert result, 'Unable to disable",
"datatype, name): return self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\" % (datatype, name), special_powers=True) def",
"self.apps = None self.data_layer = None MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase): def",
"self.marionette = marionette self.testvars = testvars or {} @property def",
"StaleElementException) as e: pass else: # This is an effortless",
"\"Unable to change setting with name '%s' to '%s'\" %",
"cards view _cards_view_locator = ('id', 'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator) #",
"self.checkpoint_interval) == 0) or count == self.iterations: self.checkpoint() # Finished,",
"self.testvars.get('wifi') assert network, 'No WiFi network provided' self.marionette.switch_to_frame() return self.marionette.execute_script(\"return",
"os.environ.get('TEST_DEVICE') if not host: raise Exception('Must specify host with SUT!')",
"name) def set_char_pref(self, name, value): \"\"\"Sets the value of a",
"'_manager') and self._manager: return self._manager if not self.is_android_build: raise Exception('Device",
"def forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self, network=None): network =",
"memory status info self.marionette.log(\"checkpoint\") self.cur_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # If",
"time.sleep(0.5) try: self.marionette.find_element(by, locator) except NoSuchElementException: break else: raise TimeoutException(",
"self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination])) def resource(self, filename): return os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources',",
"the home button self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # Bring up the",
"default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts))) result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();',",
"try: return self.marionette.find_element(by, locator).is_displayed() except (NoSuchElementException, ElementNotVisibleException): return False def",
"GaiaTestCase(MarionetteTestCase): _script_timeout = 60000 _search_timeout = 10000 # deafult timeout",
"'1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) # Change language back to English self.data_layer.set_setting(\"language.current\",",
"); }; console.log(\"Changing orientation to '\" + arguments[1] + \"'.\");",
"(time.time() - start < timeout): if check(self.marionette.get_url()): return time.sleep(2) raise",
"to default earlier in the setUp if not self.restart: #",
"except NoSuchElementException: break else: raise TimeoutException( 'Element %s still visible",
"mozdevice.DeviceManagerADB() elif dm_type == 'sut': host = os.environ.get('TEST_DEVICE') if not",
"orientation]) @property def screen_width(self): return self.marionette.execute_script('return window.screen.width') @property def screen_orientation(self):",
"if self.device.is_android_build: # revert device to a clean state self.device.manager.removeDir('/data/local/storage/persistent')",
"to English self.data_layer.set_setting(\"language.current\", \"en-US\") # Switch off spanish keyboard before",
"self.data_layer.set_setting('ril.radio.disabled', False) # Re-set edge gestures pref to False self.data_layer.set_setting('edgesgesture.enabled',",
"is_cell_data_connected(self): # XXX: check bug-926169 # this is used to",
"lock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result, 'Unable to lock",
"count self.marionette.log(\"%s iteration %d of %d\" % (self.test_method.__name__, count, self.iterations))",
"def __init__(self, marionette): self.marionette = marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir,",
"tests passing while introducing multi-sim APIs return self.marionette.execute_script('var mobileConnection =",
"app self.marionette.execute_script('navigator.mozApps.install(\"%s\")' % mk['manifest']) # TODO add this to the",
"sound completely self.data_layer.set_volume(0) def install_marketplace(self): _yes_button_locator = (By.ID, 'app-install-install-button') mk",
"@property def fm_radio_frequency(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property def media_files(self): result",
"function loaded(aEvent) { if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') !=",
"%s' % (number, message) class GaiaDevice(object): def __init__(self, marionette, testvars=None):",
"set_setting(self, name, value): import json value = json.dumps(value) result =",
"message), special_powers=True) assert result, 'Unable to send SMS to recipient",
"Gecko string pref, which is different from a Gaia setting.\"\"\"",
"None: raise Exception(\"App failed to launch; there is no app",
"argument until the \\ return value is not False.\"\"\" end_time",
"self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True) assert result, 'Unable to insert",
"class GaiaData(object): def __init__(self, marionette, testvars=None): self.marionette = marionette self.testvars",
"self.origin = origin def __eq__(self, other): return self.__dict__ == other.__dict__",
"pref, which is different from a Gaia setting.\"\"\" return self._get_pref('Bool',",
"{ window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished(); } });\"\"\", script_timeout=60000) # TODO: Remove",
"track pref back to the default self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if self.data_layer.get_setting('ril.radio.disabled'):",
"# Now drive the actual test case iterations for count",
"setting.\"\"\" return self._get_pref('Int', name) def set_int_pref(self, name, value): \"\"\"Sets the",
"%d...\" % (count, self.iterations) sys.stdout.flush() self.test_method() # Checkpoint time? if",
"= kwargs.pop('restart', False) kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self, *args, **kwargs)",
"the value of a Gecko integer pref, which is different",
"# If first checkpoint, create the file if it doesn't",
"now else: def check(now): return url in now while (time.time()",
"# Checkpoint time? if ((count % self.checkpoint_interval) == 0) or",
"'%s')\" % (app_name, permission_name, value)) def launch(self, name, switch_to_frame=True, url=None,",
"'w') # Write the summarized checkpoint data summary_file.write('test_name: %s\\n' %",
"self.marionette.import_script(js) result = self.marionette.execute_async_script(\"GaiaApps.kill('%s');\" % app.origin) assert result, \"Failed to",
"media_files(self): result = [] result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files) return result def",
"\"\"\"Returns the value of a Gecko integer pref, which is",
"if (discoverable): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result = self.marionette.execute_async_script('return",
"self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts))) result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True)",
"else: raise TimeoutException( 'Element %s not present before timeout' %",
"to disable cell data' @property def is_cell_data_connected(self): # XXX: check",
"add this to the system app object when we have",
"= ['alarm', 'content', 'notification'] for channel in channels: self.set_setting('audio.volume.%s' %",
"== 'sut': host = os.environ.get('TEST_DEVICE') if not host: raise Exception('Must",
"method, timeout=_default_timeout, message=\"Condition timed out\"): \"\"\"Calls the method provided with",
"set_permission(self, app_name, permission_name, value): return self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s', '%s', '%s')\" %",
"% output_str) def close_app(self): # Close the current app (self.app)",
"'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection !==",
"self.marionette = marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js)",
"assert result, 'Unable to disable WiFi' def connect_to_wifi(self, network=None): network",
"not host: raise Exception('Must specify host with SUT!') self._manager =",
"Write the summarized checkpoint data summary_file.write('test_name: %s\\n' % self.test_method.__name__) summary_file.write('completed:",
"= Keyboard(self.marionette) self.cleanUp() def cleanUp(self): # remove media if self.device.is_android_build:",
"unlock screen' class GaiaApp(object): def __init__(self, origin=None, name=None, frame=None, src=None):",
"% self.log_name) # Open the checkpoint file checkpoint_file = open(self.log_name,",
"we are restarting all of these values are reset to",
"% contact def remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts)))",
"permission_name)) def set_permission(self, app_name, permission_name, value): return self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s', '%s',",
"from a Gaia setting.\"\"\" return self._set_pref('Int', name, value) def get_char_pref(self,",
"# Set do not track pref back to the default",
"self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property def is_fm_radio_enabled(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property def",
"self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s', '%s')\" % (app_name, permission_name)) def set_permission(self, app_name, permission_name,",
"summary_file.write('\\navg_rss: %d\\n\\n' % avg_rss) # Close the summary file summary_file.close()",
"to home screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # kill any open apps",
"TimeoutException from marionette.errors import StaleElementException from marionette.errors import InvalidResponseException import",
"def video_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();') def sdcard_files(self, extension=''): files",
"self.apps = GaiaApps(self.marionette) self.data_layer = GaiaData(self.marionette, self.testvars) from gaiatest.apps.keyboard.app import",
"= origin def __eq__(self, other): return self.__dict__ == other.__dict__ class",
"False) @property def is_wifi_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\") def enable_wifi(self): self.marionette.switch_to_frame()",
"self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # Bring up the cards view _cards_view_locator",
"self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator) # Sleep a bit time.sleep(5) # Tap",
"Exception(\"App failed to launch; there is no app frame\") if",
"a Gecko integer pref, which is different from a Gaia",
"@property def active_telephony_state(self): # Returns the state of only the",
"from marionette.errors import InvalidResponseException import mozdevice class LockScreen(object): def __init__(self,",
"GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) def switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame)",
"= self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\", special_powers=True) assert result, 'Unable to disable WiFi'",
"time.time() < timeout: time.sleep(0.5) try: if self.marionette.find_element(by, locator).is_displayed(): break except",
"(name, value) def _get_pref(self, datatype, name): return self.marionette.execute_script(\"return SpecialPowers.get%sPref('%s');\" %",
"self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) # Change language back to English self.data_layer.set_setting(\"language.current\", \"en-US\")",
"screen' def unlock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result, 'Unable",
"marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) def get_permission(self,",
"self.manager.mkDirs(destination) self.manager.pushFile(source, destination) if count > 1: for i in",
"Gaia setting.\"\"\" return self._get_pref('Int', name) def set_int_pref(self, name, value): \"\"\"Sets",
"return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();') @property def picture_files(self): return self.marionette.execute_async_script( 'return",
"any open apps self.apps.kill_all() # disable sound completely self.data_layer.set_volume(0) def",
"self.set_setting('audio.volume.%s' % channel, value) def bluetooth_enable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\")",
"GaiaApps.setPermission('%s', '%s', '%s')\" % (app_name, permission_name, value)) def launch(self, name,",
"insert_contact(self, contact): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True)",
"return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();') def sdcard_files(self, extension=''): files = self.marionette.execute_async_script(",
"expected = arguments[1]; window.screen.onmozorientationchange = function(e) { console.log(\"Received 'onmozorientationchange' event.\");",
"this is used to keep all tests passing while introducing",
"delete_all_sms(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\", special_powers=True) def delete_all_call_log_entries(self): \"\"\"The call",
"app frame\") if switch_to_frame: self.switch_to_frame(app.frame_id, url) return app @property def",
"present after timeout' % locator) def wait_for_element_displayed(self, by, locator, timeout=_default_timeout):",
"os.environ.get('DM_TRANS', 'adb') if dm_type == 'adb': self._manager = mozdevice.DeviceManagerADB() elif",
"return self._set_pref('Int', name, value) def get_char_pref(self, name): \"\"\"Returns the value",
"time.sleep(5) # Tap the close icon for the current app",
"switch_to_frame: self.switch_to_frame(app.frame_id, url) return app @property def displayed_app(self): self.marionette.switch_to_frame() result",
"(app_name, permission_name, value)) def launch(self, name, switch_to_frame=True, url=None, launch_timeout=None): self.marionette.switch_to_frame()",
"enable the device radio, disable Airplane mode self.data_layer.set_setting('ril.radio.disabled', False) #",
"iteration %d of %d:\\n' % (self.cur_time, self.iteration, self.iterations)) log_file.write('%s\\n' %",
"li.card[data-origin*=\"%s\"] .close-card' % self.app_under_test.lower() _close_button_locator = ('css selector', locator_part_two) close_card_app_button",
"the phone can be passed in: portrait-primary(which is the default",
"device to a clean state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() # the",
"self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' % (number, message), special_powers=True) assert result, 'Unable",
"float(timeout) + time.time() while time.time() < timeout: time.sleep(0.5) try: return",
"telephony = window.navigator.mozTelephony; \" + \"if(telephony.active) telephony.active.hangUp();\") @property def music_files(self):",
"def kill_all(self): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js)",
"self.marionette.execute_async_script(\"GaiaApps.killAll()\") def runningApps(self): return self.marionette.execute_script(\"return GaiaApps.getRunningApps()\") def switch_to_frame(self, app_frame, url=None,",
"= app # Now drive the actual test case iterations",
"') summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\\navg_rss: %d\\n\\n' % avg_rss) # Close the",
"self.marionette.execute_async_script(\"\"\" if (arguments[0] === arguments[1]) { marionetteScriptFinished(); } else {",
"(self.app) by using the home button self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") #",
"= self.marionette.execute_async_script('return GaiaApps.displayedApp();') return GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) def switch_to_displayed_app(self):",
"raise TimeoutException('Element %s not present before timeout' % locator) else:",
"self.checkpoint_path = \"checkpoints\" if not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755) self.log_name =",
"remove all contacts' self.marionette.set_script_timeout(default_script_timeout) def get_setting(self, name): return self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")'",
"to remove all contacts' self.marionette.set_script_timeout(default_script_timeout) def get_setting(self, name): return self.marionette.execute_async_script('return",
"' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection !== undefined') @property",
"window.navigator.mozFMRadio.enabled') @property def fm_radio_frequency(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property def media_files(self):",
"status info self.marionette.log(\"checkpoint\") self.cur_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # If first",
"self._set_pref('Int', name, value) def get_char_pref(self, name): \"\"\"Returns the value of",
"average b2g_rss total = 0 for b2g_mem_value in b2g_rss_list: total",
"= null; marionetteScriptFinished(); }, function() { return window.screen.mozOrientation === expected;",
"self.__dict__ == other.__dict__ class GaiaApps(object): def __init__(self, marionette): self.marionette =",
"def screen_orientation(self): return self.marionette.execute_script('return window.screen.mozOrientation') def wait_for_element_present(self, by, locator, timeout=_default_timeout):",
"change_orientation(self, orientation): \"\"\" There are 4 orientation states which the",
"each checkpoint b2g_rss_list = [] for next_line in checkpoint_file: if",
"data summary_file.write('test_name: %s\\n' % self.test_method.__name__) summary_file.write('completed: %s\\n' % self.cur_time) summary_file.write('app_under_test:",
"GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result, 'Able to set the device bluetooth discoverable",
"app object when we have one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def",
"return self.marionette.execute_script(\"return GaiaApps.getRunningApps()\") def switch_to_frame(self, app_frame, url=None, timeout=30): self.marionette.switch_to_frame(app_frame) start",
"# Open the checkpoint file checkpoint_file = open(self.log_name, 'r') #",
"'%s'\" % (name, value) def _get_pref(self, datatype, name): return self.marionette.execute_script(\"return",
"set_bool_pref(self, name, value): \"\"\"Sets the value of a Gecko boolean",
"check(now): return \"about:blank\" not in now else: def check(now): return",
"== 'adb': self._manager = mozdevice.DeviceManagerADB() elif dm_type == 'sut': host",
"self.iterations) sys.stdout.flush() self.test_method() # Checkpoint time? if ((count % self.checkpoint_interval)",
"'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection !== undefined') @property def has_wifi(self):",
"= self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name) assert result, \"Unable to set",
"def kill(self, app): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\"))",
"self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\", special_powers=True) assert result, 'Unable to connect to cell",
"Gaia setting.\"\"\" return self._set_pref('Int', name, value) def get_char_pref(self, name): \"\"\"Returns",
"manager(self): if hasattr(self, '_manager') and self._manager: return self._manager if not",
"origin def __eq__(self, other): return self.__dict__ == other.__dict__ class GaiaApps(object):",
"+ \"'.\"); window.screen.mozLockOrientation(arguments[1]); };\"\"\", script_args=[self.screen_orientation, orientation]) @property def screen_width(self): return",
"self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();') @property def picture_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();')",
"self.data_layer.set_volume(0) def install_marketplace(self): _yes_button_locator = (By.ID, 'app-install-install-button') mk = {\"name\":",
"__init__(self, *args, **kwargs): self.iterations = kwargs.pop('iterations') or 1 self.checkpoint_interval =",
"dm_type) return self._manager @property def is_android_build(self): if self.testvars.get('is_android_build') is None:",
"passing while introducing multi-sim APIs return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection",
"' + 'return mobileConnection !== undefined') @property def has_wifi(self): if",
"back to the default self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if self.data_layer.get_setting('ril.radio.disabled'): # enable",
"focused in order for this to work.\"\"\" self.marionette.execute_script('window.wrappedJSObject.RecentsDBManager.deleteAll();') def kill_active_call(self):",
"function() { window.screen.onmozorientationchange = null; marionetteScriptFinished(); }, function() { return",
"MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase): def __init__(self, *args, **kwargs): self.iterations = kwargs.pop('iterations')",
"self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self, network=None): network = network or self.testvars.get('wifi')",
"count, self.iterations)) # Print to console so can see what",
"extra debugging information if isinstance(e, NoSuchElementException): raise TimeoutException('Element %s not",
"Returns the state of only the currently active call or",
"# Calculate the average b2g_rss total = 0 for b2g_mem_value",
"locator) except NoSuchElementException: break else: raise TimeoutException( 'Element %s still",
"GaiaDataLayer.getAllPictures();') @property def video_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();') def sdcard_files(self,",
"'Unable to disable cell data' @property def is_cell_data_connected(self): # XXX:",
"name, value), special_powers=True) def get_bool_pref(self, name): \"\"\"Returns the value of",
"TimeoutException(message) def is_element_present(self, by, locator): try: self.marionette.find_element(by, locator) return True",
"= frame self.frame_id = frame self.src = src self.name =",
"-1) { window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished(); } });\"\"\", script_timeout=60000) # TODO:",
"\"\"\"Returns the value of a Gecko boolean pref, which is",
"type: %s' % dm_type) return self._manager @property def is_android_build(self): if",
"NoSuchElementException: pass else: raise TimeoutException( 'Element %s not present before",
"js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) result = self.marionette.execute_async_script(\"GaiaApps.kill('%s');\"",
"of the Mozilla Public # License, v. 2.0. If a",
"stop_b2g(self): if self.marionette.instance: # close the gecko instance attached to",
"launch app with name '%s'\" % name app = GaiaApp(frame=result.get('frame'),",
"the device has a network connection established (cell data, wifi,",
"'_summary.log') summary_file = open(summary_name, 'w') # Write the summarized checkpoint",
"if self.data_layer.get_setting('ril.radio.disabled'): # enable the device radio, disable Airplane mode",
"= marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) def",
"window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished(); } });\"\"\", script_timeout=60000) # TODO: Remove this",
"result, 'Able to set the device bluetooth discoverable mode' @property",
"json.dumps(contact), special_powers=True) assert result, 'Unable to insert contact %s' %",
"to connect to WiFi network' def forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()')",
"def is_element_present(self, by, locator): try: self.marionette.find_element(by, locator) return True except:",
"test self.app_under_test = app # Now drive the actual test",
"uninstall(self, name): self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" % name) def kill(self, app): self.marionette.switch_to_frame()",
"name, value): \"\"\"Sets the value of a Gecko boolean pref,",
"carrier data connection if self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if self.device.has_wifi: self.data_layer.enable_wifi()",
"methods _default_timeout = 30 def __init__(self, *args, **kwargs): self.restart =",
"if it doesn't exist already if self.iteration in (0, self.checkpoint_interval):",
"special_powers=True) def get_bool_pref(self, name): \"\"\"Returns the value of a Gecko",
"there is no app frame\") if switch_to_frame: self.switch_to_frame(app.frame_id, url) return",
"= 30 self.marionette.log(\"sleeping %d seconds to give the device some",
"Endurance Test: %s\\n' % (self.cur_time, self.test_method.__name__)) output_str = self.device.manager.shellCheckOutput([\"b2g-ps\"]) with",
"if ((count % self.checkpoint_interval) == 0) or count == self.iterations:",
"raise Exception('Must specify host with SUT!') self._manager = mozdevice.DeviceManagerSUT(host=host) else:",
"the Mozilla Public # License, v. 2.0. If a copy",
"effortless way to give extra debugging information if isinstance(e, NoSuchElementException):",
"self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\") def",
"as log_file: log_file.write('%s Checkpoint after iteration %d of %d:\\n' %",
"self.checkpoint_path suite_summary_file = open(suite_summary_file_name, 'a') suite_summary_file.write('%s: %s\\n' % (self.test_method.__name__, avg_rss))",
"_script_timeout = 60000 _search_timeout = 10000 # deafult timeout in",
"self.log_name = \"%s/checkpoint_%s_%s.log\" % (self.checkpoint_path, self.test_method.__name__, self.cur_time) with open(self.log_name, 'a')",
"self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def process_checkpoint_data(self): # Process checkpoint data into .json",
"if self.marionette.instance: # close the gecko instance attached to marionette",
"b2g_rss total = 0 for b2g_mem_value in b2g_rss_list: total +=",
"before test self.data_layer.set_setting(\"keyboard.layouts.spanish\", False) # Set do not track pref",
"__init__(self, origin=None, name=None, frame=None, src=None): self.frame = frame self.frame_id =",
"not self.restart: # disable passcode before restore settings from testvars",
"method(self.marionette) if value: return value except (NoSuchElementException, StaleElementException): pass time.sleep(0.5)",
"remove data self.data_layer.remove_all_contacts(self._script_timeout) # reset to home screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\")",
"debugging information if isinstance(e, NoSuchElementException): raise TimeoutException('Element %s not present",
"\"\"\" There are 4 orientation states which the phone can",
"= None self.apps = None self.data_layer = None MarionetteTestCase.tearDown(self) class",
"self.manager.removeFile(destination) def restart_b2g(self): self.stop_b2g() time.sleep(2) self.start_b2g() def start_b2g(self): if self.marionette.instance:",
"self.marionette.instance: # close the gecko instance attached to marionette self.marionette.instance.close()",
"connect to WiFi network' def forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def",
"call return self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\") @property def is_antenna_available(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable')",
"% self.cur_time) summary_file.write('app_under_test: %s\\n' % self.app_under_test.lower()) summary_file.write('total_iterations: %d\\n' % self.iterations)",
"output self.process_checkpoint_data() def checkpoint(self): # Console output so know what's",
"# Create a summary text file summary_name = self.log_name.replace('.log', '_summary.log')",
"name) def kill(self, app): self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms',",
"network' def forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self, network=None): network",
"self.manager.shellCheckOutput(['stop', 'b2g']) else: raise Exception('Unable to stop B2G') self.marionette.client.close() self.marionette.session",
"connect to local area network') def push_resource(self, filename, count=1, destination=''):",
"__eq__(self, other): return self.__dict__ == other.__dict__ class GaiaApps(object): def __init__(self,",
"or self.testvars.get('wifi') assert network, 'No WiFi network provided' self.enable_wifi() self.marionette.switch_to_frame()",
"provided with the driver as an argument until the \\",
"device_name): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name) assert result, \"Unable",
"is running if self.iteration == 1: print \"\\n\" print \"Iteration",
"json.dumps(number) message = json.dumps(message) result = self.marionette.execute_async_script('return GaiaDataLayer.sendSMS(%s, %s)' %",
"name, value): \"\"\"Sets the value of a Gecko integer pref,",
"GaiaApps(object): def __init__(self, marionette): self.marionette = marionette js = os.path.abspath(os.path.join(__file__,",
"= frame self.src = src self.name = name self.origin =",
"('id', 'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator) # Sleep a bit time.sleep(5)",
"pass else: # This is an effortless way to give",
"marionette self.testvars = testvars or {} @property def manager(self): if",
"checkpoint b2g_rss_list = [] for next_line in checkpoint_file: if next_line.startswith(\"b2g\"):",
"% (app_name, permission_name)) def set_permission(self, app_name, permission_name, value): return self.marionette.execute_async_script(\"return",
"script_args=[self.screen_orientation, orientation]) @property def screen_width(self): return self.marionette.execute_script('return window.screen.width') @property def",
"json import os import sys import time from marionette import",
"@property def has_mobile_connection(self): # XXX: check bug-926169 # this is",
"def has_mobile_connection(self): # XXX: check bug-926169 # this is used",
"locator).is_displayed(): break except StaleElementException: pass except NoSuchElementException: break else: raise",
"in seconds for the wait_for methods _default_timeout = 30 def",
"self.marionette.find_element(by, locator).is_displayed(): break except (NoSuchElementException, StaleElementException) as e: pass else:",
"to be open and focused in order for this to",
"# This Source Code Form is subject to the terms",
"def bluetooth_set_device_discoverable_mode(self, discoverable): if (discoverable): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else:",
"self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.lock()') assert",
"def __init__(self, *args, **kwargs): self.iterations = kwargs.pop('iterations') or 1 self.checkpoint_interval",
"of these values are reset to default earlier in the",
"v. 2.0. If a copy of the MPL was not",
"end_time = time.time() + timeout while time.time() < end_time: try:",
"output_str = self.device.manager.shellCheckOutput([\"b2g-ps\"]) with open(self.log_name, 'a') as log_file: log_file.write('%s Checkpoint",
"= self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\", special_powers=True) assert result, 'Unable to enable WiFi'",
"# Console output so know what's happening if watching console",
"\"\"\"Sets the value of a Gecko integer pref, which is",
"name) def set_int_pref(self, name, value): \"\"\"Sets the value of a",
"@property def all_settings(self): return self.get_setting('*') def set_setting(self, name, value): import",
"remote_copy]) if progress: progress.update(i) self.manager.removeFile(destination) def restart_b2g(self): self.stop_b2g() time.sleep(2) self.start_b2g()",
"sleep when Bug 924912 is addressed time.sleep(5) def stop_b2g(self): if",
"self.marionette.instance): self.device.stop_b2g() if self.device.is_android_build: # revert device to a clean",
"be passed in: portrait-primary(which is the default orientation), landscape-primary, portrait-secondary",
"= function(e) { console.log(\"Received 'onmozorientationchange' event.\"); waitFor( function() { window.screen.onmozorientationchange",
"None: self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform'] return self.testvars['is_android_build'] @property def",
"# Finished, now process checkpoint data into .json output self.process_checkpoint_data()",
"class LockScreen(object): def __init__(self, marionette): self.marionette = marionette js =",
"summary file summary_file.close() # Write to suite summary file suite_summary_file_name",
"'if=%s' % destination, 'of=%s' % remote_copy]) if progress: progress.update(i) self.manager.removeFile(destination)",
"@property def is_locked(self): self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self): self.marionette.switch_to_frame() result",
"drive(self, test, app): self.test_method = test self.app_under_test = app #",
"every b2g rss reading for each checkpoint b2g_rss_list = []",
"only available for devices.') dm_type = os.environ.get('DM_TRANS', 'adb') if dm_type",
"= None self.marionette.window = None class GaiaTestCase(MarionetteTestCase): _script_timeout = 60000",
"and self.device.has_wifi: self.data_layer.connect_to_wifi() assert self.device.is_online else: raise Exception('Unable to connect",
"into .json self.marionette.log(\"processing checkpoint data from %s\" % self.log_name) #",
"# Tap the close icon for the current app locator_part_two",
"marionette.errors import NoSuchElementException from marionette.errors import ElementNotVisibleException from marionette.errors import",
"testvars=None): self.marionette = marionette self.testvars = testvars or {} js",
"pref back to the default self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if self.data_layer.get_setting('ril.radio.disabled'): #",
"GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) def switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self,",
"message): import json number = json.dumps(number) message = json.dumps(message) result",
"assert result, 'Unable to send SMS to recipient %s with",
"start < timeout): if check(self.marionette.get_url()): return time.sleep(2) raise TimeoutException('Could not",
"'Unable to send SMS to recipient %s with text %s'",
"elif dm_type == 'sut': host = os.environ.get('TEST_DEVICE') if not host:",
"value) def set_volume(self, value): channels = ['alarm', 'content', 'notification'] for",
"B2G') self.marionette.client.close() self.marionette.session = None self.marionette.window = None class GaiaTestCase(MarionetteTestCase):",
"network') def push_resource(self, filename, count=1, destination=''): self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination]))",
"self.device = GaiaDevice(self.marionette, self.testvars) if self.restart and (self.device.is_android_build or self.marionette.instance):",
"'return mobileConnection.data.connected;') def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True) def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False)",
"def manager(self): if hasattr(self, '_manager') and self._manager: return self._manager if",
"if self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi() # remove data self.data_layer.remove_all_contacts(self._script_timeout) #",
"summary_file.close() # Write to suite summary file suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log'",
"bug-926169 # this is used to keep all tests passing",
"failed to launch; there is no app frame\") if switch_to_frame:",
"in time' % app_frame) class GaiaData(object): def __init__(self, marionette, testvars=None):",
"app # Now drive the actual test case iterations for",
"for filename in files if filename.endswith(extension)] return files def send_sms(self,",
"Keyboard(self.marionette) self.cleanUp() def cleanUp(self): # remove media if self.device.is_android_build: for",
"origin=result.get('origin')) def switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self, app_name): self.marionette.switch_to_frame() return",
"Gaia setting.\"\"\" return self._set_pref('Bool', name, value) def get_int_pref(self, name): \"\"\"Returns",
"try: value = method(self.marionette) if value: return value except (NoSuchElementException,",
"of a Gecko integer pref, which is different from a",
"value) for name, value in self.testvars.get('settings', {}).items()] # unlock self.lockscreen.unlock()",
"portrait-secondary and landscape-secondary \"\"\" self.marionette.execute_async_script(\"\"\" if (arguments[0] === arguments[1]) {",
"\"Checkpoint...\" sys.stdout.flush() # Sleep to give device idle time (for",
"checkpoint file checkpoint_file.close() # Calculate the average b2g_rss total =",
"\"Iteration %d of %d...\" % (count, self.iterations) sys.stdout.flush() self.test_method() #",
"not self.device.is_online: if self.testvars.get('wifi') and self.device.has_wifi: self.data_layer.connect_to_wifi() assert self.device.is_online else:",
"qualified path self.device.manager.removeFile(filename) # Switch off keyboard FTU screen self.data_layer.set_setting(\"keyboard.ftu.enabled\",",
"# disable sound completely self.data_layer.set_volume(0) def install_marketplace(self): _yes_button_locator = (By.ID,",
"discoverable): if (discoverable): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result =",
"self.iteration == 1: print \"\\n\" print \"Iteration %d of %d...\"",
"return self._set_pref('Char', name, value) def set_volume(self, value): channels = ['alarm',",
"def is_cell_data_connected(self): # XXX: check bug-926169 # this is used",
"distributed with this # file, You can obtain one at",
"result, \"Unable to set device's bluetooth name to %s\" %",
"(NoSuchElementException, ElementNotVisibleException): return False def tearDown(self): self.lockscreen = None self.apps",
"to send SMS to recipient %s with text %s' %",
"\"Unable to set device's bluetooth name to %s\" % device_name",
"= '/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source, destination) if count > 1:",
"os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_lock_screen.js\")) self.marionette.import_script(js) @property def is_locked(self): self.marionette.switch_to_frame() return",
"# License, v. 2.0. If a copy of the MPL",
"of %d...\" % (count, self.iterations) sys.stdout.flush() self.test_method() # Checkpoint time?",
"self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self, network=None): network = network or",
"mode' @property def bluetooth_is_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozBluetooth.enabled\") @property def is_cell_data_enabled(self):",
"# Sleep to give device idle time (for gc) idle_time",
"# Grab every b2g rss reading for each checkpoint b2g_rss_list",
"def is_locked(self): self.marionette.switch_to_frame() return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked') def lock(self): self.marionette.switch_to_frame() result =",
"@property def is_fm_radio_enabled(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property def fm_radio_frequency(self): return",
"GaiaApps(self.marionette) self.data_layer = GaiaData(self.marionette, self.testvars) from gaiatest.apps.keyboard.app import Keyboard self.keyboard",
"SpecialPowers.get%sPref('%s');\" % (datatype, name), special_powers=True) def _set_pref(self, datatype, name, value):",
"'Element %s not present before timeout' % locator) def wait_for_element_not_present(self,",
"Airplane mode self.data_layer.set_setting('ril.radio.disabled', False) # Re-set edge gestures pref to",
"from a Gaia setting.\"\"\" return self._get_pref('Char', name) def set_char_pref(self, name,",
"self.marionette.instance.start() elif self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g']) else: raise Exception('Unable to start",
"def switch_to_frame(self, app_frame, url=None, timeout=30): self.marionette.switch_to_frame(app_frame) start = time.time() if",
"print \"\\n\" print \"Iteration %d of %d...\" % (count, self.iterations)",
"= '#cards-view li.card[data-origin*=\"%s\"] .close-card' % self.app_under_test.lower() _close_button_locator = ('css selector',",
"% name, script_timeout=launch_timeout) assert result, \"Failed to launch app with",
"XXX: check bug-926169 # this is used to keep all",
"undefined') return self._has_wifi def push_file(self, source, count=1, destination='', progress=None): if",
"{ console.log(\"Received 'onmozorientationchange' event.\"); waitFor( function() { window.screen.onmozorientationchange = null;",
"self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\") def enable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\", special_powers=True)",
"self.device.manager.removeFile(filename) # Switch off keyboard FTU screen self.data_layer.set_setting(\"keyboard.ftu.enabled\", False) #",
"is None: self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform'] return self.testvars['is_android_build'] @property",
"frame self.src = src self.name = name self.origin = origin",
"timeout): if check(self.marionette.get_url()): return time.sleep(2) raise TimeoutException('Could not switch to",
"= os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_data_layer.js\")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def set_time(self, date_number):",
"app.origin) assert result, \"Failed to kill app with name '%s'\"",
"return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property def media_files(self): result = [] result.extend(self.music_files)",
"portrait-primary(which is the default orientation), landscape-primary, portrait-secondary and landscape-secondary \"\"\"",
"else: raise Exception('Unknown device manager type: %s' % dm_type) return",
"self.data_layer.get_setting('ril.radio.disabled'): # enable the device radio, disable Airplane mode self.data_layer.set_setting('ril.radio.disabled',",
"time.sleep(0.5) else: raise TimeoutException(message) def is_element_present(self, by, locator): try: self.marionette.find_element(by,",
"%s still visible after timeout' % locator) def wait_for_condition(self, method,",
"WiFi network provided' self.enable_wifi() self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\" %",
"from marionette.errors import NoSuchElementException from marionette.errors import ElementNotVisibleException from marionette.errors",
"self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True) def insert_contact(self, contact): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return",
"special_powers=True) def _set_pref(self, datatype, name, value): value = json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s',",
"If we are restarting all of these values are reset",
"def get_permission(self, app_name, permission_name): return self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s', '%s')\" % (app_name,",
"connect_to_local_area_network(self): if not self.device.is_online: if self.testvars.get('wifi') and self.device.has_wifi: self.data_layer.connect_to_wifi() assert",
"name, value in self.testvars.get('settings', {}).items()] # unlock self.lockscreen.unlock() # If",
"self.marionette.set_search_timeout(10000) def set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" % date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property",
"start = time.time() if not url: def check(now): return \"about:blank\"",
"known_networks(self): return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property def active_telephony_state(self): # Returns the",
"or {} js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_data_layer.js\")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000)",
"if count > 1: for i in range(1, count +",
"is different from a Gaia setting.\"\"\" return self._set_pref('Char', name, value)",
"timeout=_default_timeout): timeout = float(timeout) + time.time() while time.time() < timeout:",
"start_b2g(self): if self.marionette.instance: # launch the gecko instance attached to",
"network provided' self.enable_wifi() self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\" % json.dumps(network))",
"result = [] result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files) return result def delete_all_sms(self):",
"this sleep when Bug 924912 is addressed time.sleep(5) def stop_b2g(self):",
"restore settings from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) # Change",
"\"https://marketplace-dev.allizom.org/manifest.webapp \", } if not self.apps.is_app_installed(mk['name']): # install the marketplace",
"Exception('Unknown device manager type: %s' % dm_type) return self._manager @property",
"default earlier in the setUp if not self.restart: # disable",
"'/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source, destination) if count > 1: for",
"from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) # Change language back",
"self.device.has_wifi: self.data_layer.connect_to_wifi() assert self.device.is_online else: raise Exception('Unable to connect to",
"self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi() # remove data",
"can be passed in: portrait-primary(which is the default orientation), landscape-primary,",
"tearDown(self): self.lockscreen = None self.apps = None self.data_layer = None",
"Exception('Unable to start B2G') self.marionette.wait_for_port() self.marionette.start_session() if self.is_android_build: self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend',",
"class GaiaApps(object): def __init__(self, marionette): self.marionette = marionette js =",
"Bug 924912 is addressed time.sleep(5) def stop_b2g(self): if self.marionette.instance: #",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\" % json.dumps(network)) assert result, 'Unable",
"or {} @property def manager(self): if hasattr(self, '_manager') and self._manager:",
"1): remote_copy = '_%s.'.join(iter(destination.split('.'))) % i self.manager._checkCmd(['shell', 'dd', 'if=%s' %",
"self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\", special_powers=True) assert result, 'Unable to enable WiFi' def",
"This is an effortless way to give extra debugging information",
"try: self.marionette.find_element(by, locator) return True except: return False def is_element_displayed(self,",
"os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_data_layer.js\")) self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME)",
"wait_for_element_not_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() while",
"self.iterations) summary_file.write('checkpoint_interval: %d\\n' % self.checkpoint_interval) summary_file.write('b2g_rss: ') summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\\navg_rss:",
"|| aEvent.target.src.indexOf('homescreen') != -1) { window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished(); } });\"\"\",",
"'No WiFi network provided' self.marionette.switch_to_frame() return self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\" % json.dumps(network))",
"the actual test case iterations for count in range(1, self.iterations",
"open apps self.apps.kill_all() # disable sound completely self.data_layer.set_volume(0) def install_marketplace(self):",
"time.time() + timeout while time.time() < end_time: try: value =",
"extension=''): files = self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();') if len(extension): return [filename",
"self.data_layer.media_files: # filename is a fully qualified path self.device.manager.removeFile(filename) #",
"bit time.sleep(5) # Tap the close icon for the current",
"'return GaiaDataLayer.getAllVideos();') def sdcard_files(self, extension=''): files = self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();')",
"self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined') return self._has_wifi def push_file(self, source, count=1,",
"< timeout: time.sleep(0.5) try: if self.marionette.find_element(by, locator).is_displayed(): break except (NoSuchElementException,",
"edge gestures pref to False self.data_layer.set_setting('edgesgesture.enabled', False) # disable carrier",
"no app frame\") if switch_to_frame: self.switch_to_frame(app.frame_id, url) return app @property",
"result, 'Unable to remove all contacts' self.marionette.set_script_timeout(default_script_timeout) def get_setting(self, name):",
"destination=''): self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination])) def resource(self, filename): return os.path.abspath(os.path.join(os.path.dirname(__file__),",
"null; marionetteScriptFinished(); }, function() { return window.screen.mozOrientation === expected; }",
"GaiaDataLayer.getAllMusic();') @property def picture_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();') @property def",
"contacts' self.marionette.set_script_timeout(default_script_timeout) def get_setting(self, name): return self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")' % name,",
"%s)' % (name, value), special_powers=True) assert result, \"Unable to change",
"def is_app_installed(self, app_name): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" % app_name) def uninstall(self,",
"raise TimeoutException( 'Element %s still present after timeout' % locator)",
"value = json.dumps(value) result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\", %s)' % (name,",
"Source Code Form is subject to the terms of the",
"pref, which is different from a Gaia setting.\"\"\" return self._set_pref('Int',",
"% (number, message), special_powers=True) assert result, 'Unable to send SMS",
"'No WiFi network provided' self.enable_wifi() self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\"",
"self.manager.shellCheckOutput(['start', 'b2g']) else: raise Exception('Unable to start B2G') self.marionette.wait_for_port() self.marionette.start_session()",
"iteration %d of %d\" % (self.test_method.__name__, count, self.iterations)) # Print",
"to marionette self.marionette.instance.close() elif self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g']) else: raise Exception('Unable",
"self.marionette = marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_lock_screen.js\")) self.marionette.import_script(js)",
"GaiaDataLayer.isWiFiConnected(%s)\" % json.dumps(network)) @property def known_networks(self): return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property",
"has a network connection established (cell data, wifi, etc) return",
"self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" % date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def all_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return",
"def sim_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True) def insert_contact(self, contact):",
"close_app(self): # Close the current app (self.app) by using the",
"\"gaia_apps.js\")) self.marionette.import_script(js) def get_permission(self, app_name, permission_name): return self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s', '%s')\"",
"data, wifi, etc) return self.marionette.execute_script('return window.navigator.onLine;') @property def has_mobile_connection(self): #",
"by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() while time.time()",
"date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def all_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True)",
"value), special_powers=True) def get_bool_pref(self, name): \"\"\"Returns the value of a",
"self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def all_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True) @property",
"check bug-926169 # this is used to keep all tests",
"the device bluetooth discoverable mode' @property def bluetooth_is_enabled(self): return self.marionette.execute_script(\"return",
"self.restart and (self.device.is_android_build or self.marionette.instance): self.device.stop_b2g() if self.device.is_android_build: # revert",
"self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property def active_telephony_state(self): # Returns the state of",
"TimeoutException( 'Element %s still visible after timeout' % locator) def",
"TODO: Remove this sleep when Bug 924912 is addressed time.sleep(5)",
"self.cur_time) summary_file.write('app_under_test: %s\\n' % self.app_under_test.lower()) summary_file.write('total_iterations: %d\\n' % self.iterations) summary_file.write('checkpoint_interval:",
"in self.marionette.session_capabilities['platform'] return self.testvars['is_android_build'] @property def is_online(self): # Returns true",
"json.dumps(value) result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\", %s)' % (name, value), special_powers=True)",
"else: raise TimeoutException( 'Element %s still present after timeout' %",
"area network') def push_resource(self, filename, count=1, destination=''): self.device.push_file(self.resource(filename), count, '/'.join(['sdcard',",
"with open(self.log_name, 'a') as log_file: log_file.write('%s Checkpoint after iteration %d",
"if self.testvars.get('is_android_build') is None: self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform'] return",
"special_powers=True) assert result, 'Unable to remove all contacts' self.marionette.set_script_timeout(default_script_timeout) def",
"file checkpoint_file.close() # Calculate the average b2g_rss total = 0",
"mobileConnection.data.connected;') def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True) def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False) @property",
"cell data' @property def is_cell_data_connected(self): # XXX: check bug-926169 #",
"restarting all of these values are reset to default earlier",
"assert result, 'Unable to lock screen' def unlock(self): self.marionette.switch_to_frame() result",
"is different from a Gaia setting.\"\"\" return self._set_pref('Bool', name, value)",
"self.checkpoint_interval): self.checkpoint_path = \"checkpoints\" if not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755) self.log_name",
"self.data_layer.disable_wifi() # remove data self.data_layer.remove_all_contacts(self._script_timeout) # reset to home screen",
"locator_part_two) close_card_app_button = self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def process_checkpoint_data(self): # Process checkpoint",
"get_setting(self, name): return self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")' % name, special_powers=True) @property def",
"for the wait_for methods _default_timeout = 30 def __init__(self, *args,",
"are restarting all of these values are reset to default",
"info self.marionette.log(\"checkpoint\") self.cur_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # If first checkpoint,",
"False) # disable carrier data connection if self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming()",
"bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self, device_name): result = self.marionette.execute_async_script('return",
"different from a Gaia setting.\"\"\" return self._set_pref('Int', name, value) def",
"mode self.data_layer.set_setting('ril.radio.disabled', False) # Re-set edge gestures pref to False",
"else: raise TimeoutException(message) def is_element_present(self, by, locator): try: self.marionette.find_element(by, locator)",
"return self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s', '%s', '%s')\" % (app_name, permission_name, value)) def",
"except (NoSuchElementException, ElementNotVisibleException): return False def tearDown(self): self.lockscreen = None",
"% date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT) @property def all_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();',",
"src=result.get('src'), name=result.get('name'), origin=result.get('origin')) def switch_to_displayed_app(self): self.marionette.switch_to_default_content() self.marionette.switch_to_frame(self.displayed_app.frame) def is_app_installed(self, app_name):",
"self.marionette.session_capabilities['platform'] return self.testvars['is_android_build'] @property def is_online(self): # Returns true if",
"if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else: raise Exception('Unable to connect to network')",
"30 self.marionette.log(\"sleeping %d seconds to give the device some idle",
"landscape-secondary \"\"\" self.marionette.execute_async_script(\"\"\" if (arguments[0] === arguments[1]) { marionetteScriptFinished(); }",
"Exception('Device manager is only available for devices.') dm_type = os.environ.get('DM_TRANS',",
"is different from a Gaia setting.\"\"\" return self._set_pref('Int', name, value)",
"this to the system app object when we have one",
"self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name) assert result, \"Unable to set device's",
"device idle time (for gc) idle_time = 30 self.marionette.log(\"sleeping %d",
"the emulator can be really slow! self.marionette.set_script_timeout(self._script_timeout) self.marionette.set_search_timeout(self._search_timeout) self.lockscreen =",
"be open and focused in order for this to work.\"\"\"",
"(cell data, wifi, etc) return self.marionette.execute_script('return window.navigator.onLine;') @property def has_mobile_connection(self):",
"% locator) def wait_for_condition(self, method, timeout=_default_timeout, message=\"Condition timed out\"): \"\"\"Calls",
"NoSuchElementException: break else: raise TimeoutException( 'Element %s still visible after",
"self.data_layer.set_setting(\"keyboard.layouts.spanish\", False) # Set do not track pref back to",
"the device some idle time\" % idle_time) time.sleep(idle_time) # Dump",
"def has_wifi(self): if not hasattr(self, '_has_wifi'): self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager",
"checkpoint data from %s\" % self.log_name) # Open the checkpoint",
"the wait_for methods _default_timeout = 30 def __init__(self, *args, **kwargs):",
"self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # Bring up the cards view _cards_view_locator =",
"if no active call return self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\") @property def is_antenna_available(self):",
"attached to marionette self.marionette.instance.close() elif self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g']) else: raise",
"self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\") def bluetooth_disable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\") def bluetooth_pair_device(self,",
"self.marionette.find_element(by, locator) except NoSuchElementException: break else: raise TimeoutException( 'Element %s",
"'Unable to connect to WiFi network' def forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return",
"Gecko integer pref, which is different from a Gaia setting.\"\"\"",
"= self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined') return self._has_wifi def push_file(self, source,",
"launch the gecko instance attached to marionette self.marionette.instance.start() elif self.is_android_build:",
"to marionette self.marionette.instance.start() elif self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g']) else: raise Exception('Unable",
"(NoSuchElementException, StaleElementException): pass time.sleep(0.5) else: raise TimeoutException(message) def is_element_present(self, by,",
"total / len(b2g_rss_list) # Create a summary text file summary_name",
"name, script_timeout=launch_timeout) assert result, \"Failed to launch app with name",
"deafult timeout in seconds for the wait_for methods _default_timeout =",
"off keyboard FTU screen self.data_layer.set_setting(\"keyboard.ftu.enabled\", False) # restore settings from",
"while time.time() < timeout: time.sleep(0.5) try: self.marionette.find_element(by, locator) except NoSuchElementException:",
"screen_orientation(self): return self.marionette.execute_script('return window.screen.mozOrientation') def wait_for_element_present(self, by, locator, timeout=_default_timeout): timeout",
"self.test_method.__name__) summary_file.write('completed: %s\\n' % self.cur_time) summary_file.write('app_under_test: %s\\n' % self.app_under_test.lower()) summary_file.write('total_iterations:",
"int(b2g_mem_value) avg_rss = total / len(b2g_rss_list) # Create a summary",
"return True except: return False def is_element_displayed(self, by, locator): try:",
"def get_bool_pref(self, name): \"\"\"Returns the value of a Gecko boolean",
"url in now while (time.time() - start < timeout): if",
"or count == self.iterations: self.checkpoint() # Finished, now process checkpoint",
"%d\\n' % self.iterations) summary_file.write('checkpoint_interval: %d\\n' % self.checkpoint_interval) summary_file.write('b2g_rss: ') summary_file.write(',",
"@property def media_files(self): result = [] result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files) return",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True) assert result,",
"def wait_for_element_not_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time()",
"def cleanUp(self): # remove media if self.device.is_android_build: for filename in",
"timeout' % locator) def wait_for_element_not_present(self, by, locator, timeout=_default_timeout): timeout =",
"result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\", %s)' % (name, value), special_powers=True) assert",
"not self.apps.is_app_installed(mk['name']): # install the marketplace dev app self.marionette.execute_script('navigator.mozApps.install(\"%s\")' %",
"a bit time.sleep(5) # Tap the close icon for the",
"suite_summary_file = open(suite_summary_file_name, 'a') suite_summary_file.write('%s: %s\\n' % (self.test_method.__name__, avg_rss)) suite_summary_file.close()",
"(arguments[0] === arguments[1]) { marionetteScriptFinished(); } else { var expected",
"if self.restart and (self.device.is_android_build or self.marionette.instance): self.device.stop_b2g() if self.device.is_android_build: #",
"number, message): import json number = json.dumps(number) message = json.dumps(message)",
"|| ' + 'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; ' +",
"gecko instance attached to marionette self.marionette.instance.start() elif self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g'])",
"self._manager = mozdevice.DeviceManagerADB() elif dm_type == 'sut': host = os.environ.get('TEST_DEVICE')",
"= self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def process_checkpoint_data(self): # Process checkpoint data into",
"close_card_app_button.tap() def process_checkpoint_data(self): # Process checkpoint data into .json self.marionette.log(\"processing",
"def music_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();') @property def picture_files(self): return",
"have one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self): if not self.device.is_online:",
"in channels: self.set_setting('audio.volume.%s' % channel, value) def bluetooth_enable(self): self.marionette.switch_to_frame() return",
"a clean state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() # the emulator can",
"assert result, 'Unable to remove all contacts' self.marionette.set_script_timeout(default_script_timeout) def get_setting(self,",
"to %s\" % device_name def bluetooth_set_device_discoverable_mode(self, discoverable): if (discoverable): result",
"restart_b2g(self): self.stop_b2g() time.sleep(2) self.start_b2g() def start_b2g(self): if self.marionette.instance: # launch",
"the default orientation), landscape-primary, portrait-secondary and landscape-secondary \"\"\" self.marionette.execute_async_script(\"\"\" if",
"return url in now while (time.time() - start < timeout):",
"self.device.is_online def connect_to_local_area_network(self): if not self.device.is_online: if self.testvars.get('wifi') and self.device.has_wifi:",
"orientation to '\" + arguments[1] + \"'.\"); window.screen.mozLockOrientation(arguments[1]); };\"\"\", script_args=[self.screen_orientation,",
"self.frame_id = frame self.src = src self.name = name self.origin",
"locator): try: self.marionette.find_element(by, locator) return True except: return False def",
"def is_android_build(self): if self.testvars.get('is_android_build') is None: self.testvars['is_android_build'] = 'Android' in",
"a Gaia setting.\"\"\" return self._get_pref('Int', name) def set_int_pref(self, name, value):",
"script_timeout=launch_timeout) assert result, \"Failed to launch app with name '%s'\"",
"def uninstall(self, name): self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" % name) def kill(self, app):",
"self.marionette = marionette self.testvars = testvars or {} js =",
"a Gecko boolean pref, which is different from a Gaia",
"locator) def wait_for_element_not_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout) +",
"result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result, 'Able to set the",
"== 0) or count == self.iterations: self.checkpoint() # Finished, now",
"self.src = src self.name = name self.origin = origin def",
"% device_name def bluetooth_set_device_discoverable_mode(self, discoverable): if (discoverable): result = self.marionette.execute_async_script('return",
"self.set_setting('ril.data.roaming_enabled', True) def disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False) @property def is_wifi_enabled(self): return",
"return value except (NoSuchElementException, StaleElementException): pass time.sleep(0.5) else: raise TimeoutException(message)",
"assert result, \"Unable to set device's bluetooth name to %s\"",
"value) def get_int_pref(self, name): \"\"\"Returns the value of a Gecko",
"name, value): value = json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\" % (datatype, name,",
"self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\", special_powers=True) assert result, 'Unable to",
"self.data_layer = None MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase): def __init__(self, *args, **kwargs):",
"= self.marionette.execute_async_script('return GaiaDataLayer.insertContact(%s);' % json.dumps(contact), special_powers=True) assert result, 'Unable to",
"'Unable to unlock screen' class GaiaApp(object): def __init__(self, origin=None, name=None,",
"different from a Gaia setting.\"\"\" return self._set_pref('Bool', name, value) def",
"to launch; there is no app frame\") if switch_to_frame: self.switch_to_frame(app.frame_id,",
"values are reset to default earlier in the setUp if",
"== other.__dict__ class GaiaApps(object): def __init__(self, marionette): self.marionette = marionette",
"Change language back to English self.data_layer.set_setting(\"language.current\", \"en-US\") # Switch off",
"screen' class GaiaApp(object): def __init__(self, origin=None, name=None, frame=None, src=None): self.frame",
"\"\"\"Sets the value of a Gecko string pref, which is",
"Code Form is subject to the terms of the Mozilla",
"# restore settings from testvars [self.data_layer.set_setting(name, value) for name, value",
"with name '%s' to '%s'\" % (name, value) def _get_pref(self,",
"self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi() # remove data self.data_layer.remove_all_contacts(self._script_timeout) # reset",
"= window.navigator.mozTelephony; \" + \"if(telephony.active) telephony.active.hangUp();\") @property def music_files(self): return",
"time (for gc) idle_time = 30 self.marionette.log(\"sleeping %d seconds to",
"os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) def get_permission(self, app_name, permission_name): return",
"way to give extra debugging information if isinstance(e, NoSuchElementException): raise",
"stop B2G') self.marionette.client.close() self.marionette.session = None self.marionette.window = None class",
"self.marionette.execute_script('return window.screen.width') @property def screen_orientation(self): return self.marionette.execute_script('return window.screen.mozOrientation') def wait_for_element_present(self,",
"not present before timeout' % locator) def wait_for_element_not_present(self, by, locator,",
"output so know what's happening if watching console print \"Checkpoint...\"",
"= os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_lock_screen.js\")) self.marionette.import_script(js) @property def is_locked(self): self.marionette.switch_to_frame()",
"def restart_b2g(self): self.stop_b2g() time.sleep(2) self.start_b2g() def start_b2g(self): if self.marionette.instance: #",
"passcode before restore settings from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False)",
"is_antenna_available(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable') @property def is_fm_radio_enabled(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled')",
"console.log(\"Changing orientation to '\" + arguments[1] + \"'.\"); window.screen.mozLockOrientation(arguments[1]); };\"\"\",",
"Bring up the cards view _cards_view_locator = ('id', 'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new",
"wifi, etc) return self.marionette.execute_script('return window.navigator.onLine;') @property def has_mobile_connection(self): # XXX:",
"def kill_active_call(self): self.marionette.execute_script(\"var telephony = window.navigator.mozTelephony; \" + \"if(telephony.active) telephony.active.hangUp();\")",
"@property def is_cell_data_connected(self): # XXX: check bug-926169 # this is",
"specify host with SUT!') self._manager = mozdevice.DeviceManagerSUT(host=host) else: raise Exception('Unknown",
"self.testvars) if self.restart and (self.device.is_android_build or self.marionette.instance): self.device.stop_b2g() if self.device.is_android_build:",
"self.process_checkpoint_data() def checkpoint(self): # Console output so know what's happening",
"result = self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result, 'Unable to unlock screen' class",
"enable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\", special_powers=True) assert result, 'Unable",
"delete_all_call_log_entries(self): \"\"\"The call log needs to be open and focused",
"by, locator): try: return self.marionette.find_element(by, locator).is_displayed() except (NoSuchElementException, ElementNotVisibleException): return",
"= time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # If first checkpoint, create the file",
"total += int(b2g_mem_value) avg_rss = total / len(b2g_rss_list) # Create",
"self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self, device_name): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);'",
"% i self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination, 'of=%s' % remote_copy])",
"time.time() e = None while time.time() < timeout: time.sleep(0.5) try:",
"self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" % app_name) def uninstall(self, name): self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\"",
"self.testvars.get('settings', {}).items()] # unlock self.lockscreen.unlock() # If we are restarting",
"except (NoSuchElementException, StaleElementException) as e: pass else: # This is",
"None if no active call return self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\") @property def",
"if filename.endswith(extension)] return files def send_sms(self, number, message): import json",
"= self.device.manager.shellCheckOutput([\"b2g-ps\"]) with open(self.log_name, 'a') as log_file: log_file.write('%s Checkpoint after",
"# Close the current app (self.app) by using the home",
"rss reading for each checkpoint b2g_rss_list = [] for next_line",
"Print to console so can see what iteration we're on",
"def launch(self, name, switch_to_frame=True, url=None, launch_timeout=None): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\"",
"self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result, 'Able",
"0755) self.log_name = \"%s/checkpoint_%s_%s.log\" % (self.checkpoint_path, self.test_method.__name__, self.cur_time) with open(self.log_name,",
"\"\"\" self.marionette.execute_async_script(\"\"\" if (arguments[0] === arguments[1]) { marionetteScriptFinished(); } else",
"b2g_rss_list: total += int(b2g_mem_value) avg_rss = total / len(b2g_rss_list) #",
"is_app_installed(self, app_name): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"GaiaApps.locateWithName('%s')\" % app_name) def uninstall(self, name):",
"% destination, 'of=%s' % remote_copy]) if progress: progress.update(i) self.manager.removeFile(destination) def",
"GaiaDataLayer.disableCellData()\", special_powers=True) assert result, 'Unable to disable cell data' @property",
"files = self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();') if len(extension): return [filename for",
"json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\" % (datatype, name, value), special_powers=True) def get_bool_pref(self,",
"return self._manager if not self.is_android_build: raise Exception('Device manager is only",
"time.sleep(5) def stop_b2g(self): if self.marionette.instance: # close the gecko instance",
"app (self.app) by using the home button self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\")",
"true if the device has a network connection established (cell",
"arguments[1]) { marionetteScriptFinished(); } else { var expected = arguments[1];",
"%s' % dm_type) return self._manager @property def is_android_build(self): if self.testvars.get('is_android_build')",
"connect_to_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\", special_powers=True) assert result, 'Unable",
"for count in range(1, self.iterations + 1): self.iteration = count",
"files def send_sms(self, number, message): import json number = json.dumps(number)",
"self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\", special_powers=True) assert result, 'Unable to disable WiFi' def",
"log_file: log_file.write('%s Gaia Endurance Test: %s\\n' % (self.cur_time, self.test_method.__name__)) output_str",
"FTU screen self.data_layer.set_setting(\"keyboard.ftu.enabled\", False) # restore settings from testvars [self.data_layer.set_setting(name,",
"Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator) # Sleep a bit time.sleep(5) # Tap the",
"self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();') if len(extension): return [filename for filename in",
"disable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', False) @property def is_wifi_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\") def",
"self.marionette.import_script(js) self.marionette.set_search_timeout(10000) def set_time(self, date_number): self.marionette.set_context(self.marionette.CONTEXT_CHROME) self.marionette.execute_script(\"window.navigator.mozTime.set(%s);\" % date_number) self.marionette.set_context(self.marionette.CONTEXT_CONTENT)",
"'b2g']) else: raise Exception('Unable to stop B2G') self.marionette.client.close() self.marionette.session =",
"self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")' % name, special_powers=True) @property def all_settings(self): return self.get_setting('*')",
"device_name): return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")' % device_name) def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return",
"\"Failed to launch app with name '%s'\" % name app",
"# launch the gecko instance attached to marionette self.marionette.instance.start() elif",
"= None MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase): def __init__(self, *args, **kwargs): self.iterations",
"bluetooth_set_device_discoverable_mode(self, discoverable): if (discoverable): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result",
"sdcard_files(self, extension=''): files = self.marionette.execute_async_script( 'return GaiaDataLayer.getAllSDCardFiles();') if len(extension): return",
"out some memory status info self.marionette.log(\"checkpoint\") self.cur_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())",
"self.log_name) # Open the checkpoint file checkpoint_file = open(self.log_name, 'r')",
"timeout while time.time() < end_time: try: value = method(self.marionette) if",
".json self.marionette.log(\"processing checkpoint data from %s\" % self.log_name) # Open",
"GaiaDataLayer.disableBluetooth()\") def bluetooth_pair_device(self, device_name): return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")' % device_name) def",
"'_%s.'.join(iter(destination.split('.'))) % i self.manager._checkCmd(['shell', 'dd', 'if=%s' % destination, 'of=%s' %",
"not False.\"\"\" end_time = time.time() + timeout while time.time() <",
"self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g']) else: raise Exception('Unable to stop B2G') self.marionette.client.close()",
"give extra debugging information if isinstance(e, NoSuchElementException): raise TimeoutException('Element %s",
"self.marionette.switch_to_frame() js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) result =",
"%s' % contact def remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000 *",
"json value = json.dumps(value) result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\", %s)' %",
"to give extra debugging information if isinstance(e, NoSuchElementException): raise TimeoutException('Element",
"to suite summary file suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path suite_summary_file",
"is the default orientation), landscape-primary, portrait-secondary and landscape-secondary \"\"\" self.marionette.execute_async_script(\"\"\"",
"self.marionette.log(\"%s iteration %d of %d\" % (self.test_method.__name__, count, self.iterations)) #",
"(number, message), special_powers=True) assert result, 'Unable to send SMS to",
"dm_type == 'sut': host = os.environ.get('TEST_DEVICE') if not host: raise",
"marionette.errors import ElementNotVisibleException from marionette.errors import TimeoutException from marionette.errors import",
"# Close the summary file summary_file.close() # Write to suite",
"# Print to console so can see what iteration we're",
"bluetooth_pair_device(self, device_name): return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")' % device_name) def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame()",
"result = self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" % name, script_timeout=launch_timeout) assert result, \"Failed to",
"a Gaia setting.\"\"\" return self._set_pref('Char', name, value) def set_volume(self, value):",
"= [] for next_line in checkpoint_file: if next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5]) #",
"self.device.is_online: if self.testvars.get('wifi') and self.device.has_wifi: self.data_layer.connect_to_wifi() assert self.device.is_online else: raise",
"loaded); marionetteScriptFinished(); } });\"\"\", script_timeout=60000) # TODO: Remove this sleep",
"= float(timeout) + time.time() while time.time() < timeout: time.sleep(0.5) try:",
"after timeout' % locator) def wait_for_element_displayed(self, by, locator, timeout=_default_timeout): timeout",
"wait_for_condition(self, method, timeout=_default_timeout, message=\"Condition timed out\"): \"\"\"Calls the method provided",
"import sys import time from marionette import MarionetteTestCase from marionette.by",
"self.iteration in (0, self.checkpoint_interval): self.checkpoint_path = \"checkpoints\" if not os.path.exists(self.checkpoint_path):",
"in now else: def check(now): return url in now while",
"end_time: try: value = method(self.marionette) if value: return value except",
"gecko instance attached to marionette self.marionette.instance.close() elif self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g'])",
"self.marionette.find_element(by, locator).is_displayed() except (NoSuchElementException, ElementNotVisibleException): return False def tearDown(self): self.lockscreen",
"[] for next_line in checkpoint_file: if next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5]) # Close",
"os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\") def runningApps(self): return self.marionette.execute_script(\"return",
"= open(summary_name, 'w') # Write the summarized checkpoint data summary_file.write('test_name:",
"which is different from a Gaia setting.\"\"\" return self._get_pref('Bool', name)",
"which is different from a Gaia setting.\"\"\" return self._get_pref('Char', name)",
"states which the phone can be passed in: portrait-primary(which is",
"\" + \"if(telephony.active) telephony.active.hangUp();\") @property def music_files(self): return self.marionette.execute_async_script( 'return",
"self.frame = frame self.frame_id = frame self.src = src self.name",
"} else { var expected = arguments[1]; window.screen.onmozorientationchange = function(e)",
"% locator) def wait_for_element_not_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout)",
"from %s\" % self.log_name) # Open the checkpoint file checkpoint_file",
"host = os.environ.get('TEST_DEVICE') if not host: raise Exception('Must specify host",
"the current app locator_part_two = '#cards-view li.card[data-origin*=\"%s\"] .close-card' % self.app_under_test.lower()",
"window.navigator.mozWifiManager.enabled;\") def enable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.enableWiFi()\", special_powers=True) assert",
"raise Exception('Device manager is only available for devices.') dm_type =",
"= os.environ.get('DM_TRANS', 'adb') if dm_type == 'adb': self._manager = mozdevice.DeviceManagerADB()",
"return self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s', '%s')\" % (app_name, permission_name)) def set_permission(self, app_name,",
"switch_to_frame=True, url=None, launch_timeout=None): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" % name, script_timeout=launch_timeout)",
"'Android' in self.marionette.session_capabilities['platform'] return self.testvars['is_android_build'] @property def is_online(self): # Returns",
"return result def delete_all_sms(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\", special_powers=True) def",
"= src self.name = name self.origin = origin def __eq__(self,",
"self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")' % device_name) def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()') def",
"self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\" % json.dumps(network)) assert result, 'Unable to connect to",
"subject to the terms of the Mozilla Public # License,",
"src self.name = name self.origin = origin def __eq__(self, other):",
"4 orientation states which the phone can be passed in:",
"which the phone can be passed in: portrait-primary(which is the",
"or self.testvars.get('wifi') assert network, 'No WiFi network provided' self.marionette.switch_to_frame() return",
"for next_line in checkpoint_file: if next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5]) # Close the",
"the checkpoint file checkpoint_file.close() # Calculate the average b2g_rss total",
"return self._get_pref('Char', name) def set_char_pref(self, name, value): \"\"\"Sets the value",
"app with name '%s'\" % app.name def kill_all(self): self.marionette.switch_to_frame() js",
"timeout: time.sleep(0.5) try: if not self.marionette.find_element(by, locator).is_displayed(): break except StaleElementException:",
"app_name) def uninstall(self, name): self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" % name) def kill(self,",
"marionette.errors import StaleElementException from marionette.errors import InvalidResponseException import mozdevice class",
"to False self.data_layer.set_setting('edgesgesture.enabled', False) # disable carrier data connection if",
"\"Failed to kill app with name '%s'\" % app.name def",
"name), special_powers=True) def _set_pref(self, datatype, name, value): value = json.dumps(value)",
"to insert contact %s' % contact def remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame()",
"result, 'Unable to unlock screen' class GaiaApp(object): def __init__(self, origin=None,",
"channel in channels: self.set_setting('audio.volume.%s' % channel, value) def bluetooth_enable(self): self.marionette.switch_to_frame()",
"value) def get_char_pref(self, name): \"\"\"Returns the value of a Gecko",
"or None if no active call return self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\") @property",
"self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property def fm_radio_frequency(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency') @property def",
"value): return self.marionette.execute_async_script(\"return GaiaApps.setPermission('%s', '%s', '%s')\" % (app_name, permission_name, value))",
"def lock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.lock()') assert result, 'Unable to",
"def picture_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();') @property def video_files(self): return",
"% (self.cur_time, self.test_method.__name__)) output_str = self.device.manager.shellCheckOutput([\"b2g-ps\"]) with open(self.log_name, 'a') as",
"passed in: portrait-primary(which is the default orientation), landscape-primary, portrait-secondary and",
"self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\", special_powers=True) assert result, 'Unable to disable cell data'",
"origin=result.get('origin')) if app.frame_id is None: raise Exception(\"App failed to launch;",
"window.navigator.mozMobileConnection || ' + 'window.navigator.mozMobileConnections && ' + 'window.navigator.mozMobileConnections[0]; '",
"window.screen.onmozorientationchange = null; marionetteScriptFinished(); }, function() { return window.screen.mozOrientation ===",
"checkpoint data summary_file.write('test_name: %s\\n' % self.test_method.__name__) summary_file.write('completed: %s\\n' % self.cur_time)",
"else: result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result, 'Able to set",
"set device's bluetooth name to %s\" % device_name def bluetooth_set_device_discoverable_mode(self,",
"screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # kill any open apps self.apps.kill_all() #",
"timeout = float(timeout) + time.time() while time.time() < timeout: time.sleep(0.5)",
"StaleElementException from marionette.errors import InvalidResponseException import mozdevice class LockScreen(object): def",
"unlock(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result, 'Unable to unlock",
"return time.sleep(2) raise TimeoutException('Could not switch to app frame %s",
"assert network, 'No WiFi network provided' self.marionette.switch_to_frame() return self.marionette.execute_script(\"return GaiaDataLayer.isWiFiConnected(%s)\"",
"path self.device.manager.removeFile(filename) # Switch off keyboard FTU screen self.data_layer.set_setting(\"keyboard.ftu.enabled\", False)",
"is_cell_data_enabled(self): return self.get_setting('ril.data.enabled') def connect_to_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToCellData()\",",
"= [] result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files) return result def delete_all_sms(self): self.marionette.switch_to_frame()",
"GaiaDataLayer.connectToCellData()\", special_powers=True) assert result, 'Unable to connect to cell data'",
"radio, disable Airplane mode self.data_layer.set_setting('ril.radio.disabled', False) # Re-set edge gestures",
"self.is_android_build: raise Exception('Device manager is only available for devices.') dm_type",
"result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\", special_powers=True) assert result, 'Unable to disable",
"len(self.all_contacts))) result = self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True) assert result, 'Unable to",
"%d of %d...\" % (count, self.iterations) sys.stdout.flush() self.test_method() # Checkpoint",
"count=1, destination=''): self.device.push_file(self.resource(filename), count, '/'.join(['sdcard', destination])) def resource(self, filename): return",
"(aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') != -1) { window.removeEventListener('mozbrowserloadend', loaded);",
"Write to suite summary file suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path",
"= os.environ.get('TEST_DEVICE') if not host: raise Exception('Must specify host with",
"src=result.get('src'), name=result.get('name'), origin=result.get('origin')) if app.frame_id is None: raise Exception(\"App failed",
"(By.ID, 'app-install-install-button') mk = {\"name\": \"Marketplace Dev\", \"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp \",",
"and self._manager: return self._manager if not self.is_android_build: raise Exception('Device manager",
"InvalidResponseException import mozdevice class LockScreen(object): def __init__(self, marionette): self.marionette =",
"import By from marionette.errors import NoSuchElementException from marionette.errors import ElementNotVisibleException",
"% json.dumps(network)) @property def known_networks(self): return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property def",
"%s\" % device_name def bluetooth_set_device_discoverable_mode(self, discoverable): if (discoverable): result =",
"time.time() while time.time() < timeout: time.sleep(0.5) try: self.marionette.find_element(by, locator) except",
"network = network or self.testvars.get('wifi') assert network, 'No WiFi network",
"before restore settings from testvars self.data_layer.set_setting('lockscreen.passcode-lock.code', '1111') self.data_layer.set_setting('lockscreen.passcode-lock.enabled', False) #",
"locator) def wait_for_element_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout) +",
"self.log_name.replace('.log', '_summary.log') summary_file = open(summary_name, 'w') # Write the summarized",
"GaiaDataLayer.getKnownNetworks()') @property def active_telephony_state(self): # Returns the state of only",
"WiFi network' def forget_all_networks(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self, network=None):",
"def __init__(self, origin=None, name=None, frame=None, src=None): self.frame = frame self.frame_id",
"= time.time() if not url: def check(now): return \"about:blank\" not",
"*args, **kwargs): self.iterations = kwargs.pop('iterations') or 1 self.checkpoint_interval = kwargs.pop('checkpoint_interval')",
"= ('css selector', locator_part_two) close_card_app_button = self.marionette.find_element(*_close_button_locator) close_card_app_button.tap() def process_checkpoint_data(self):",
"a Gaia setting.\"\"\" return self._get_pref('Bool', name) def set_bool_pref(self, name, value):",
"count > 1: for i in range(1, count + 1):",
"cell data' def disable_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\", special_powers=True)",
"Gaia setting.\"\"\" return self._set_pref('Char', name, value) def set_volume(self, value): channels",
"0: destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source, destination) if count",
"file checkpoint_file = open(self.log_name, 'r') # Grab every b2g rss",
"self.marionette.execute_async_script('return GaiaDataLayer.removeAllContacts();', special_powers=True) assert result, 'Unable to remove all contacts'",
"self.device.is_online: try: self.connect_to_local_area_network() except: if self.device.has_mobile_connection: self.data_layer.connect_to_cell_data() else: raise Exception('Unable",
"when we have one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator) def connect_to_network(self): if",
"[filename for filename in files if filename.endswith(extension)] return files def",
"no active call return self.marionette.execute_script(\"return GaiaDataLayer.getMozTelephonyState()\") @property def is_antenna_available(self): return",
"+ 1): remote_copy = '_%s.'.join(iter(destination.split('.'))) % i self.manager._checkCmd(['shell', 'dd', 'if=%s'",
"window.screen.width') @property def screen_orientation(self): return self.marionette.execute_script('return window.screen.mozOrientation') def wait_for_element_present(self, by,",
"LockScreen(object): def __init__(self, marionette): self.marionette = marionette js = os.path.abspath(os.path.join(__file__,",
"summary text file summary_name = self.log_name.replace('.log', '_summary.log') summary_file = open(summary_name,",
"to kill app with name '%s'\" % app.name def kill_all(self):",
"if not self.is_android_build: raise Exception('Device manager is only available for",
"iteration we're on while test is running if self.iteration ==",
"name=None, frame=None, src=None): self.frame = frame self.frame_id = frame self.src",
"% self.checkpoint_interval) summary_file.write('b2g_rss: ') summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\\navg_rss: %d\\n\\n' % avg_rss)",
"GaiaDataLayer.forgetAllNetworks()') def is_wifi_connected(self, network=None): network = network or self.testvars.get('wifi') assert",
"window.navigator.mozTelephony; \" + \"if(telephony.active) telephony.active.hangUp();\") @property def music_files(self): return self.marionette.execute_async_script(",
"English self.data_layer.set_setting(\"language.current\", \"en-US\") # Switch off spanish keyboard before test",
"raise Exception('Unknown device manager type: %s' % dm_type) return self._manager",
"'a') as log_file: log_file.write('%s Gaia Endurance Test: %s\\n' % (self.cur_time,",
"%s with text %s' % (number, message) class GaiaDevice(object): def",
"GaiaTestCase.__init__(self, *args, **kwargs) def drive(self, test, app): self.test_method = test",
"in: portrait-primary(which is the default orientation), landscape-primary, portrait-secondary and landscape-secondary",
"not destination.count('.') > 0: destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source,",
"self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();') @property def video_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();')",
"reset to default earlier in the setUp if not self.restart:",
"name): \"\"\"Returns the value of a Gecko integer pref, which",
"Remove this sleep when Bug 924912 is addressed time.sleep(5) def",
"file, You can obtain one at http://mozilla.org/MPL/2.0/. import json import",
"locator) def wait_for_condition(self, method, timeout=_default_timeout, message=\"Condition timed out\"): \"\"\"Calls the",
"before timeout' % locator) else: raise TimeoutException('Element %s present but",
"def checkpoint(self): # Console output so know what's happening if",
"if watching console print \"Checkpoint...\" sys.stdout.flush() # Sleep to give",
"if switch_to_frame: self.switch_to_frame(app.frame_id, url) return app @property def displayed_app(self): self.marionette.switch_to_frame()",
"result.extend(self.video_files) return result def delete_all_sms(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.deleteAllSms();\", special_powers=True)",
"'_has_wifi'): self._has_wifi = self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined') return self._has_wifi def",
"assert result, 'Unable to connect to cell data' def disable_cell_data(self):",
"file summary_file.close() # Write to suite summary file suite_summary_file_name =",
"get_bool_pref(self, name): \"\"\"Returns the value of a Gecko boolean pref,",
"from a Gaia setting.\"\"\" return self._get_pref('Bool', name) def set_bool_pref(self, name,",
"def get_char_pref(self, name): \"\"\"Returns the value of a Gecko string",
"break except StaleElementException: pass except NoSuchElementException: break else: raise TimeoutException(",
"is_online(self): # Returns true if the device has a network",
"destination='', progress=None): if not destination.count('.') > 0: destination = '/'.join([destination,",
"connection if self.device.has_mobile_connection: self.data_layer.disable_cell_data() self.data_layer.disable_cell_roaming() if self.device.has_wifi: self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi()",
"pass time.sleep(0.5) else: raise TimeoutException(message) def is_element_present(self, by, locator): try:",
"connect_to_wifi(self, network=None): network = network or self.testvars.get('wifi') assert network, 'No",
"def drive(self, test, app): self.test_method = test self.app_under_test = app",
"/ len(b2g_rss_list) # Create a summary text file summary_name =",
"if not self.marionette.find_element(by, locator).is_displayed(): break except StaleElementException: pass except NoSuchElementException:",
"def _set_pref(self, datatype, name, value): value = json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\"",
"not url: def check(now): return \"about:blank\" not in now else:",
"%s in time' % app_frame) class GaiaData(object): def __init__(self, marionette,",
"special_powers=True) assert result, \"Unable to change setting with name '%s'",
"= mozdevice.DeviceManagerADB() elif dm_type == 'sut': host = os.environ.get('TEST_DEVICE') if",
"device radio, disable Airplane mode self.data_layer.set_setting('ril.radio.disabled', False) # Re-set edge",
"system app object when we have one self.wait_for_element_displayed(*_yes_button_locator) self.marionette.find_element(*_yes_button_locator).tap() self.wait_for_element_not_displayed(*_yes_button_locator)",
"# enable the device radio, disable Airplane mode self.data_layer.set_setting('ril.radio.disabled', False)",
"home button self.marionette.switch_to_frame() self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('home'));\") # Bring up the cards",
"def remove_all_contacts(self, default_script_timeout=60000): self.marionette.switch_to_frame() self.marionette.set_script_timeout(max(default_script_timeout, 1000 * len(self.all_contacts))) result =",
"% self.app_under_test.lower()) summary_file.write('total_iterations: %d\\n' % self.iterations) summary_file.write('checkpoint_interval: %d\\n' % self.checkpoint_interval)",
"self.device.is_android_build: # revert device to a clean state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla')",
"@property def sim_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();', special_powers=True) def insert_contact(self,",
"def get_int_pref(self, name): \"\"\"Returns the value of a Gecko integer",
"time.time() < end_time: try: value = method(self.marionette) if value: return",
"0) or count == self.iterations: self.checkpoint() # Finished, now process",
"= self.marionette.execute_async_script(\"GaiaApps.launchWithName('%s')\" % name, script_timeout=launch_timeout) assert result, \"Failed to launch",
"is no app frame\") if switch_to_frame: self.switch_to_frame(app.frame_id, url) return app",
"window.screen.mozLockOrientation(arguments[1]); };\"\"\", script_args=[self.screen_orientation, orientation]) @property def screen_width(self): return self.marionette.execute_script('return window.screen.width')",
"all tests passing while introducing multi-sim APIs return self.marionette.execute_script('var mobileConnection",
"app_name, permission_name): return self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s', '%s')\" % (app_name, permission_name)) def",
"self.marionette.log(\"processing checkpoint data from %s\" % self.log_name) # Open the",
"= None self.data_layer = None MarionetteTestCase.tearDown(self) class GaiaEnduranceTestCase(GaiaTestCase): def __init__(self,",
"self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() # the emulator can be really slow!",
"self.testvars = testvars or {} js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms',",
"multi-sim APIs return self.marionette.execute_script('var mobileConnection = window.navigator.mozMobileConnection || ' +",
"the marketplace dev app self.marionette.execute_script('navigator.mozApps.install(\"%s\")' % mk['manifest']) # TODO add",
"return self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True) @property def sim_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return",
"of a Gecko string pref, which is different from a",
"# Dump out some memory status info self.marionette.log(\"checkpoint\") self.cur_time =",
"+ 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection.data.connected;') def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True)",
"__init__(self, marionette): self.marionette = marionette js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms',",
"locator, timeout=_default_timeout): timeout = float(timeout) + time.time() e = None",
"already if self.iteration in (0, self.checkpoint_interval): self.checkpoint_path = \"checkpoints\" if",
"Gaia setting.\"\"\" return self._get_pref('Char', name) def set_char_pref(self, name, value): \"\"\"Sets",
"app @property def displayed_app(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script('return GaiaApps.displayedApp();') return",
"print \"Iteration %d of %d...\" % (count, self.iterations) sys.stdout.flush() self.test_method()",
"self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\") def runningApps(self): return self.marionette.execute_script(\"return GaiaApps.getRunningApps()\") def switch_to_frame(self, app_frame,",
"'b2g']) else: raise Exception('Unable to start B2G') self.marionette.wait_for_port() self.marionette.start_session() if",
"% app_frame) class GaiaData(object): def __init__(self, marionette, testvars=None): self.marionette =",
"value) def bluetooth_enable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\") def bluetooth_disable(self): self.marionette.switch_to_frame()",
"= self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(true);') else: result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceDiscoverableMode(false);') assert result,",
"channels = ['alarm', 'content', 'notification'] for channel in channels: self.set_setting('audio.volume.%s'",
"Event('home'));\") # kill any open apps self.apps.kill_all() # disable sound",
"result, \"Failed to kill app with name '%s'\" % app.name",
"self.data_layer.enable_wifi() self.data_layer.forget_all_networks() self.data_layer.disable_wifi() # remove data self.data_layer.remove_all_contacts(self._script_timeout) # reset to",
"Form is subject to the terms of the Mozilla Public",
"js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) def get_permission(self, app_name,",
"test self.data_layer.set_setting(\"keyboard.layouts.spanish\", False) # Set do not track pref back",
"return window.screen.mozOrientation === expected; } ); }; console.log(\"Changing orientation to",
"self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.enableBluetooth()\") def bluetooth_disable(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\")",
"False) # Change language back to English self.data_layer.set_setting(\"language.current\", \"en-US\") #",
"# unlock self.lockscreen.unlock() # If we are restarting all of",
"'\" + arguments[1] + \"'.\"); window.screen.mozLockOrientation(arguments[1]); };\"\"\", script_args=[self.screen_orientation, orientation]) @property",
"from marionette.errors import ElementNotVisibleException from marionette.errors import TimeoutException from marionette.errors",
"value of a Gecko string pref, which is different from",
"self.device.is_android_build: for filename in self.data_layer.media_files: # filename is a fully",
"os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename)) def change_orientation(self, orientation): \"\"\" There are 4",
"# Switch off spanish keyboard before test self.data_layer.set_setting(\"keyboard.layouts.spanish\", False) #",
"time' % app_frame) class GaiaData(object): def __init__(self, marionette, testvars=None): self.marionette",
"MarionetteTestCase from marionette.by import By from marionette.errors import NoSuchElementException from",
"name): \"\"\"Returns the value of a Gecko string pref, which",
"def bluetooth_pair_device(self, device_name): return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")' % device_name) def bluetooth_unpair_all_devices(self):",
"so can see what iteration we're on while test is",
"in self.testvars.get('settings', {}).items()] # unlock self.lockscreen.unlock() # If we are",
"= GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) if app.frame_id is None: raise",
"to disable WiFi' def connect_to_wifi(self, network=None): network = network or",
"= None class GaiaTestCase(MarionetteTestCase): _script_timeout = 60000 _search_timeout = 10000",
"url=None, timeout=30): self.marionette.switch_to_frame(app_frame) start = time.time() if not url: def",
"self.cleanUp() def cleanUp(self): # remove media if self.device.is_android_build: for filename",
"self.testvars = testvars or {} @property def manager(self): if hasattr(self,",
"'sut': host = os.environ.get('TEST_DEVICE') if not host: raise Exception('Must specify",
"GaiaDataLayer.unpairAllBluetoothDevices()') def bluetooth_set_device_name(self, device_name): result = self.marionette.execute_async_script('return GaiaDataLayer.bluetoothSetDeviceName(%s);' % device_name)",
"GaiaDataLayer.deleteAllSms();\", special_powers=True) def delete_all_call_log_entries(self): \"\"\"The call log needs to be",
"self.test_method.__name__, self.cur_time) with open(self.log_name, 'a') as log_file: log_file.write('%s Gaia Endurance",
"name): return self.marionette.execute_async_script('return GaiaDataLayer.getSetting(\"%s\")' % name, special_powers=True) @property def all_settings(self):",
"language back to English self.data_layer.set_setting(\"language.current\", \"en-US\") # Switch off spanish",
"= ('id', 'cards-view') self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new Event('holdhome'));\") self.wait_for_element_displayed(*_cards_view_locator) # Sleep a bit",
"&& ' + 'window.navigator.mozMobileConnections[0]; ' + 'return mobileConnection.data.connected;') def enable_cell_roaming(self):",
"GaiaDataLayer.getAllSDCardFiles();') if len(extension): return [filename for filename in files if",
"TimeoutException('Could not switch to app frame %s in time' %",
"test is running if self.iteration == 1: print \"\\n\" print",
"destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source, destination) if count >",
"device manager type: %s' % dm_type) return self._manager @property def",
"If first checkpoint, create the file if it doesn't exist",
"filename in files if filename.endswith(extension)] return files def send_sms(self, number,",
"1: print \"\\n\" print \"Iteration %d of %d...\" % (count,",
"music_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();') @property def picture_files(self): return self.marionette.execute_async_script(",
"Open the checkpoint file checkpoint_file = open(self.log_name, 'r') # Grab",
"name, value) def get_char_pref(self, name): \"\"\"Returns the value of a",
"value): \"\"\"Sets the value of a Gecko integer pref, which",
"origin=None, name=None, frame=None, src=None): self.frame = frame self.frame_id = frame",
"window.addEventListener('mozbrowserloadend', function loaded(aEvent) { if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen')",
"= json.dumps(value) result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting(\"%s\", %s)' % (name, value),",
"js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_lock_screen.js\")) self.marionette.import_script(js) @property def is_locked(self):",
"def known_networks(self): return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()') @property def active_telephony_state(self): # Returns",
"result = self.marionette.execute_async_script(\"return GaiaDataLayer.connectToWiFi(%s)\" % json.dumps(network)) assert result, 'Unable to",
"source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source, destination) if count > 1: for i",
"if progress: progress.update(i) self.manager.removeFile(destination) def restart_b2g(self): self.stop_b2g() time.sleep(2) self.start_b2g() def",
"instance attached to marionette self.marionette.instance.start() elif self.is_android_build: self.manager.shellCheckOutput(['start', 'b2g']) else:",
"\"\"\"Returns the value of a Gecko string pref, which is",
"not self.marionette.find_element(by, locator).is_displayed(): break except StaleElementException: pass except NoSuchElementException: break",
"only the currently active call or None if no active",
"return self.marionette.execute_async_script(\"return GaiaDataLayer.disableBluetooth()\") def bluetooth_pair_device(self, device_name): return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")' %",
"with this # file, You can obtain one at http://mozilla.org/MPL/2.0/.",
"+ arguments[1] + \"'.\"); window.screen.mozLockOrientation(arguments[1]); };\"\"\", script_args=[self.screen_orientation, orientation]) @property def",
"testvars=None): self.marionette = marionette self.testvars = testvars or {} @property",
"Test: %s\\n' % (self.cur_time, self.test_method.__name__)) output_str = self.device.manager.shellCheckOutput([\"b2g-ps\"]) with open(self.log_name,",
"is different from a Gaia setting.\"\"\" return self._get_pref('Char', name) def",
"to the default self.data_layer.set_setting('privacy.donottrackheader.value', '-1') if self.data_layer.get_setting('ril.radio.disabled'): # enable the",
"time.sleep(2) self.start_b2g() def start_b2g(self): if self.marionette.instance: # launch the gecko",
"summary_name = self.log_name.replace('.log', '_summary.log') summary_file = open(summary_name, 'w') # Write",
"result, 'Unable to send SMS to recipient %s with text",
"change setting with name '%s' to '%s'\" % (name, value)",
"%s)' % (number, message), special_powers=True) assert result, 'Unable to send",
"= self.marionette.execute_async_script('GaiaLockScreen.unlock()') assert result, 'Unable to unlock screen' class GaiaApp(object):",
"not os.path.exists(self.checkpoint_path): os.makedirs(self.checkpoint_path, 0755) self.log_name = \"%s/checkpoint_%s_%s.log\" % (self.checkpoint_path, self.test_method.__name__,",
"% app.origin) assert result, \"Failed to kill app with name",
"% mk['manifest']) # TODO add this to the system app",
"e: pass else: # This is an effortless way to",
"for filename in self.data_layer.media_files: # filename is a fully qualified",
"to launch app with name '%s'\" % name app =",
"' + 'return mobileConnection.data.connected;') def enable_cell_roaming(self): self.set_setting('ril.data.roaming_enabled', True) def disable_cell_roaming(self):",
"device_name) assert result, \"Unable to set device's bluetooth name to",
"by, locator): try: self.marionette.find_element(by, locator) return True except: return False",
"idle_time = 30 self.marionette.log(\"sleeping %d seconds to give the device",
"import os import sys import time from marionette import MarionetteTestCase",
"which is different from a Gaia setting.\"\"\" return self._set_pref('Char', name,",
"_set_pref(self, datatype, name, value): value = json.dumps(value) self.marionette.execute_script(\"SpecialPowers.set%sPref('%s', %s);\" %",
"the device radio, disable Airplane mode self.data_layer.set_setting('ril.radio.disabled', False) # Re-set",
"Mozilla Public # License, v. 2.0. If a copy of",
"watching console print \"Checkpoint...\" sys.stdout.flush() # Sleep to give device",
"return \"about:blank\" not in now else: def check(now): return url",
"class GaiaDevice(object): def __init__(self, marionette, testvars=None): self.marionette = marionette self.testvars",
"these values are reset to default earlier in the setUp",
"to console so can see what iteration we're on while",
"the value of a Gecko boolean pref, which is different",
"check(now): return url in now while (time.time() - start <",
"(self.checkpoint_path, self.test_method.__name__, self.cur_time) with open(self.log_name, 'a') as log_file: log_file.write('%s Gaia",
"% device_name) assert result, \"Unable to set device's bluetooth name",
"try: self.marionette.find_element(by, locator) except NoSuchElementException: break else: raise TimeoutException( 'Element",
"= self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\", special_powers=True) assert result, 'Unable to disable cell",
"Exception('Unable to connect to network') assert self.device.is_online def connect_to_local_area_network(self): if",
"a network connection established (cell data, wifi, etc) return self.marionette.execute_script('return",
"'.join(b2g_rss_list)) summary_file.write('\\navg_rss: %d\\n\\n' % avg_rss) # Close the summary file",
"self.test_method.__name__)) output_str = self.device.manager.shellCheckOutput([\"b2g-ps\"]) with open(self.log_name, 'a') as log_file: log_file.write('%s",
"Switch off spanish keyboard before test self.data_layer.set_setting(\"keyboard.layouts.spanish\", False) # Set",
"not switch to app frame %s in time' % app_frame)",
"dm_type = os.environ.get('DM_TRANS', 'adb') if dm_type == 'adb': self._manager =",
"break else: raise TimeoutException( 'Element %s still present after timeout'",
"not in now else: def check(now): return url in now",
"self.testvars.get('is_android_build') is None: self.testvars['is_android_build'] = 'Android' in self.marionette.session_capabilities['platform'] return self.testvars['is_android_build']",
"def is_element_displayed(self, by, locator): try: return self.marionette.find_element(by, locator).is_displayed() except (NoSuchElementException,",
"as e: pass else: # This is an effortless way",
"testvars or {} @property def manager(self): if hasattr(self, '_manager') and",
"screen self.data_layer.set_setting(\"keyboard.ftu.enabled\", False) # restore settings from testvars [self.data_layer.set_setting(name, value)",
"arguments[1] + \"'.\"); window.screen.mozLockOrientation(arguments[1]); };\"\"\", script_args=[self.screen_orientation, orientation]) @property def screen_width(self):",
"network connection established (cell data, wifi, etc) return self.marionette.execute_script('return window.navigator.onLine;')",
"self.set_setting('ril.data.roaming_enabled', False) @property def is_wifi_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\") def enable_wifi(self):",
"isinstance(e, NoSuchElementException): raise TimeoutException('Element %s not present before timeout' %",
"1 self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations GaiaTestCase.__init__(self, *args, **kwargs) def",
"def disable_wifi(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableWiFi()\", special_powers=True) assert result,",
"is None: raise Exception(\"App failed to launch; there is no",
"summarized checkpoint data summary_file.write('test_name: %s\\n' % self.test_method.__name__) summary_file.write('completed: %s\\n' %",
"= {\"name\": \"Marketplace Dev\", \"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp \", } if not",
"assert result, \"Failed to launch app with name '%s'\" %",
"if next_line.startswith(\"b2g\"): b2g_rss_list.append(next_line.split()[5]) # Close the checkpoint file checkpoint_file.close() #",
"log_file.write('%s\\n' % output_str) def close_app(self): # Close the current app",
"value): channels = ['alarm', 'content', 'notification'] for channel in channels:",
"range(1, count + 1): remote_copy = '_%s.'.join(iter(destination.split('.'))) % i self.manager._checkCmd(['shell',",
"= os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_apps.js\")) self.marionette.import_script(js) self.marionette.execute_async_script(\"GaiaApps.killAll()\") def runningApps(self): return",
"are reset to default earlier in the setUp if not",
"'adb': self._manager = mozdevice.DeviceManagerADB() elif dm_type == 'sut': host =",
"\"about:blank\" not in now else: def check(now): return url in",
"destination.count('.') > 0: destination = '/'.join([destination, source.rpartition(os.path.sep)[-1]]) self.manager.mkDirs(destination) self.manager.pushFile(source, destination)",
"@property def screen_width(self): return self.marionette.execute_script('return window.screen.width') @property def screen_orientation(self): return",
"the terms of the Mozilla Public # License, v. 2.0.",
"send SMS to recipient %s with text %s' % (number,",
"to set device's bluetooth name to %s\" % device_name def",
"self.marionette.execute_script('return window.screen.mozOrientation') def wait_for_element_present(self, by, locator, timeout=_default_timeout): timeout = float(timeout)",
"\"if(telephony.active) telephony.active.hangUp();\") @property def music_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllMusic();') @property",
"self.keyboard = Keyboard(self.marionette) self.cleanUp() def cleanUp(self): # remove media if",
"is used to keep all tests passing while introducing multi-sim",
"kwargs.pop('iterations', None) kwargs.pop('checkpoint_interval', None) MarionetteTestCase.__init__(self, *args, **kwargs) def setUp(self): try:",
"off spanish keyboard before test self.data_layer.set_setting(\"keyboard.layouts.spanish\", False) # Set do",
"% name app = GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) if app.frame_id",
"result, 'Unable to disable WiFi' def connect_to_wifi(self, network=None): network =",
"visible after timeout' % locator) def wait_for_condition(self, method, timeout=_default_timeout, message=\"Condition",
"the file if it doesn't exist already if self.iteration in",
"def is_wifi_enabled(self): return self.marionette.execute_script(\"return window.navigator.mozWifiManager.enabled;\") def enable_wifi(self): self.marionette.switch_to_frame() result =",
"log_file: log_file.write('%s Checkpoint after iteration %d of %d:\\n' % (self.cur_time,",
"window.navigator.mozFMRadio.frequency') @property def media_files(self): result = [] result.extend(self.music_files) result.extend(self.picture_files) result.extend(self.video_files)",
"permission_name, value)) def launch(self, name, switch_to_frame=True, url=None, launch_timeout=None): self.marionette.switch_to_frame() result",
"B2G') self.marionette.wait_for_port() self.marionette.start_session() if self.is_android_build: self.marionette.execute_async_script(\"\"\" window.addEventListener('mozbrowserloadend', function loaded(aEvent) {",
"assert result, 'Unable to insert contact %s' % contact def",
"-1 || aEvent.target.src.indexOf('homescreen') != -1) { window.removeEventListener('mozbrowserloadend', loaded); marionetteScriptFinished(); }",
"and landscape-secondary \"\"\" self.marionette.execute_async_script(\"\"\" if (arguments[0] === arguments[1]) { marionetteScriptFinished();",
"while time.time() < timeout: time.sleep(0.5) try: if self.marionette.find_element(by, locator).is_displayed(): break",
"%s not present before timeout' % locator) else: raise TimeoutException('Element",
"summary_file.write('completed: %s\\n' % self.cur_time) summary_file.write('app_under_test: %s\\n' % self.app_under_test.lower()) summary_file.write('total_iterations: %d\\n'",
"raise TimeoutException('Element %s present but not displayed before timeout' %",
"self._get_pref('Char', name) def set_char_pref(self, name, value): \"\"\"Sets the value of",
"StaleElementException: pass except NoSuchElementException: break else: raise TimeoutException( 'Element %s",
"setting.\"\"\" return self._set_pref('Int', name, value) def get_char_pref(self, name): \"\"\"Returns the",
"present but not displayed before timeout' % locator) def wait_for_element_not_displayed(self,",
"Gaia setting.\"\"\" return self._get_pref('Bool', name) def set_bool_pref(self, name, value): \"\"\"Sets",
"\"manifest\": \"https://marketplace-dev.allizom.org/manifest.webapp \", } if not self.apps.is_app_installed(mk['name']): # install the",
"+ time.time() while time.time() < timeout: time.sleep(0.5) try: self.marionette.find_element(by, locator)",
"{ if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') != -1) {",
"etc) return self.marionette.execute_script('return window.navigator.onLine;') @property def has_mobile_connection(self): # XXX: check",
"not distributed with this # file, You can obtain one",
"== 1: print \"\\n\" print \"Iteration %d of %d...\" %",
"state self.device.manager.removeDir('/data/local/storage/persistent') self.device.manager.removeDir('/data/b2g/mozilla') self.device.start_b2g() # the emulator can be really",
"return self.marionette.find_element(by, locator).is_displayed() except (NoSuchElementException, ElementNotVisibleException): return False def tearDown(self):",
"text %s' % (number, message) class GaiaDevice(object): def __init__(self, marionette,",
"icon for the current app locator_part_two = '#cards-view li.card[data-origin*=\"%s\"] .close-card'",
"copy of the MPL was not distributed with this #",
"while time.time() < timeout: time.sleep(0.5) try: return self.marionette.find_element(by, locator) except",
"\\ return value is not False.\"\"\" end_time = time.time() +",
"len(extension): return [filename for filename in files if filename.endswith(extension)] return",
"self._get_pref('Int', name) def set_int_pref(self, name, value): \"\"\"Sets the value of",
"out\"): \"\"\"Calls the method provided with the driver as an",
"to cell data' def disable_cell_data(self): self.marionette.switch_to_frame() result = self.marionette.execute_async_script(\"return GaiaDataLayer.disableCellData()\",",
"self.iteration = count self.marionette.log(\"%s iteration %d of %d\" % (self.test_method.__name__,",
"assert result, 'Able to set the device bluetooth discoverable mode'",
"import NoSuchElementException from marionette.errors import ElementNotVisibleException from marionette.errors import TimeoutException",
"return self.__dict__ == other.__dict__ class GaiaApps(object): def __init__(self, marionette): self.marionette",
"count == self.iterations: self.checkpoint() # Finished, now process checkpoint data",
"['alarm', 'content', 'notification'] for channel in channels: self.set_setting('audio.volume.%s' % channel,",
"self.marionette.execute_async_script( 'return GaiaDataLayer.getAllVideos();') def sdcard_files(self, extension=''): files = self.marionette.execute_async_script( 'return",
"# Returns true if the device has a network connection",
"GaiaDataLayer.disableWiFi()\", special_powers=True) assert result, 'Unable to disable WiFi' def connect_to_wifi(self,",
"instance attached to marionette self.marionette.instance.close() elif self.is_android_build: self.manager.shellCheckOutput(['stop', 'b2g']) else:",
"self.marionette.switch_to_frame() self.marionette.execute_async_script(\"GaiaApps.uninstallWithName('%s')\" % name) def kill(self, app): self.marionette.switch_to_frame() js =",
"picture_files(self): return self.marionette.execute_async_script( 'return GaiaDataLayer.getAllPictures();') @property def video_files(self): return self.marionette.execute_async_script(",
"has_mobile_connection(self): # XXX: check bug-926169 # this is used to",
"from testvars [self.data_layer.set_setting(name, value) for name, value in self.testvars.get('settings', {}).items()]",
"return self.marionette.execute_script('return window.screen.mozOrientation') def wait_for_element_present(self, by, locator, timeout=_default_timeout): timeout =",
"name self.origin = origin def __eq__(self, other): return self.__dict__ ==",
"else: raise TimeoutException( 'Element %s still visible after timeout' %",
"try: if not self.marionette.find_element(by, locator).is_displayed(): break except StaleElementException: pass except",
"mobileConnection !== undefined') @property def has_wifi(self): if not hasattr(self, '_has_wifi'):",
"name, special_powers=True) @property def all_settings(self): return self.get_setting('*') def set_setting(self, name,",
"# remove data self.data_layer.remove_all_contacts(self._script_timeout) # reset to home screen self.marionette.execute_script(\"window.wrappedJSObject.dispatchEvent(new",
"doesn't exist already if self.iteration in (0, self.checkpoint_interval): self.checkpoint_path =",
"'return GaiaDataLayer.getAllSDCardFiles();') if len(extension): return [filename for filename in files",
"marionette self.testvars = testvars or {} js = os.path.abspath(os.path.join(__file__, os.path.pardir,",
"GaiaDataLayer.setSetting(\"%s\", %s)' % (name, value), special_powers=True) assert result, \"Unable to",
"else: raise Exception('Unable to connect to network') assert self.device.is_online def",
"with SUT!') self._manager = mozdevice.DeviceManagerSUT(host=host) else: raise Exception('Unknown device manager",
"import StaleElementException from marionette.errors import InvalidResponseException import mozdevice class LockScreen(object):",
"self.device.manager.shellCheckOutput([\"b2g-ps\"]) with open(self.log_name, 'a') as log_file: log_file.write('%s Checkpoint after iteration",
"wait_for_element_displayed(self, by, locator, timeout=_default_timeout): timeout = float(timeout) + time.time() e",
"'%s'\" % name app = GaiaApp(frame=result.get('frame'), src=result.get('src'), name=result.get('name'), origin=result.get('origin')) if",
"# Bring up the cards view _cards_view_locator = ('id', 'cards-view')",
"testvars or {} js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', \"gaia_data_layer.js\")) self.marionette.import_script(js)",
"summary_file.write(', '.join(b2g_rss_list)) summary_file.write('\\navg_rss: %d\\n\\n' % avg_rss) # Close the summary",
"return self.marionette.execute_async_script('return GaiaDataLayer.pairBluetoothDevice(\"%s\")' % device_name) def bluetooth_unpair_all_devices(self): self.marionette.switch_to_frame() self.marionette.execute_async_script('return GaiaDataLayer.unpairAllBluetoothDevices()')",
"to '\" + arguments[1] + \"'.\"); window.screen.mozLockOrientation(arguments[1]); };\"\"\", script_args=[self.screen_orientation, orientation])",
"self.stop_b2g() time.sleep(2) self.start_b2g() def start_b2g(self): if self.marionette.instance: # launch the",
"+ time.time() while time.time() < timeout: time.sleep(0.5) try: if not",
"is different from a Gaia setting.\"\"\" return self._get_pref('Int', name) def",
"> 1: for i in range(1, count + 1): remote_copy",
"http://mozilla.org/MPL/2.0/. import json import os import sys import time from",
"\"\\n\" print \"Iteration %d of %d...\" % (count, self.iterations) sys.stdout.flush()",
"self.marionette.execute_async_script('return GaiaDataLayer.getAllContacts();', special_powers=True) @property def sim_contacts(self): self.marionette.switch_to_frame() return self.marionette.execute_async_script('return GaiaDataLayer.getSIMContacts();',",
"def change_orientation(self, orientation): \"\"\" There are 4 orientation states which",
"open(self.log_name, 'r') # Grab every b2g rss reading for each",
"gaiatest.apps.keyboard.app import Keyboard self.keyboard = Keyboard(self.marionette) self.cleanUp() def cleanUp(self): #",
"self.marionette.import_script(js) def get_permission(self, app_name, permission_name): return self.marionette.execute_async_script(\"return GaiaApps.getPermission('%s', '%s')\" %",
"def is_fm_radio_enabled(self): return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled') @property def fm_radio_frequency(self): return self.marionette.execute_script('return",
"source, count=1, destination='', progress=None): if not destination.count('.') > 0: destination"
] |
[
"else if (current.isSameNode(child)) { return count; } if (current.nodeType ==",
"inconsistencies # // on whether the calculations are inside or",
"(e.code == 4) { # return 0; # } #",
"what is in GWT 1.5 for getAbsoluteLeft. err... #\"\"\" #",
"= elem.parentNode; while (parent) { if (parent.scrollLeft > 0) {",
"if (parent == child) { return true; } } else",
"left = $doc.getBoxObjectFor(elem).x; var parent = elem.parentNode; while (parent) {",
"1)) { child = null; } } return false; \"\"\")",
"( typeof elem.getBoundingClientRect == 'function' ) { var left =",
"// values when overflow is not visible. We have to",
"{ if (parent.scrollTop > 0) { top -= parent.scrollTop; }",
"fixed before they release, so this can # // be",
"1.5 for getAbsoluteTop. err... #\"\"\" # // We cannot use",
"$doc.documentElement.scrollLeft; } // Older Firefox can use getBoxObjectFor else {",
"in GWT 1.5 for getAbsoluteTop. err... #\"\"\" # // We",
"try { # return $doc.getBoxObjectFor(elem).screenY # - $doc.getBoxObjectFor($doc.documentElement).screenY; # }",
"erroneous # // values when overflow is not visible. We",
"{ # // This works around a bug in the",
"} try { child = child.parentNode; } catch(e) { //",
"denied to get property // HTMLDivElement.parentNode' // See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return",
"left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } // Older Firefox can",
"== 2) { return 4; } else if (button ==",
"left - parent.scrollLeft; } parent = parent.parentNode; } return left",
"works around a bug in the FF3 betas. The bug",
"= elem.parentNode; while (parent) { if (parent.scrollTop > 0) {",
"DOM.sCaptureElem)) DOM.sCaptureElem = null; if (!elem.isSameNode) { if (elem ==",
") { var left = Math.ceil(elem.getBoundingClientRect().left); return left + $doc.body.scrollLeft",
"false; \"\"\") def releaseCapture(elem): JS(\"\"\" if ((DOM.sCaptureElem != null) &&",
"= button.ownerDocument; if (doc != null) { var evt =",
"== 'function' ) { var left = Math.ceil(elem.getBoundingClientRect().left); return left",
"element's # // border. # try { # return $doc.getBoxObjectFor(elem).screenY",
"FF3 betas. The bug # // should be fixed before",
"whether the calculations are inside or outside of the element's",
"((DOM.sCaptureElem != null) && DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem = null; if",
"when overflow is not visible. We have to difference screenX",
"if(button == 2) { return 4; } else if (button",
"var doc = button.ownerDocument; if (doc != null) { var",
"return false; } if (!elem1.isSameNode) { return (elem1 == elem2);",
"$doc.documentElement.scrollLeft; } \"\"\") # This is what is in GWT",
"null) && DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem = null; if (!elem.isSameNode) {",
"typeof elem.getBoundingClientRect == 'function' ) { var left = Math.ceil(elem.getBoundingClientRect().left);",
"if ((DOM.sCaptureElem != null) && DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem = null;",
"removed at a later date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # //",
"isOrHasChild(parent, child): JS(\"\"\" while (child) { if ((!parent.isSameNode)) { if",
"# } catch (e) { # // This works around",
"has 1px offset. if ( typeof elem.getBoundingClientRect == 'function' )",
"4 # if (e.code == 4) { # return 0;",
"\"\"\") def isOrHasChild(parent, child): JS(\"\"\" while (child) { if ((!parent.isSameNode))",
"while (parent) { if (parent.scrollLeft > 0) { left =",
"= $doc.getBoxObjectFor(elem).y; var parent = elem.parentNode; while (parent) { if",
"expects getBoundingClientRect if ( typeof elem.getBoundingClientRect == 'function' ) {",
"Math.ceil(elem.getBoundingClientRect().left); return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } // Older",
"e; # } #\"\"\" def getAbsoluteTop(elem): JS(\"\"\" // Firefox 3",
"|| !elem2) { return false; } if (!elem1.isSameNode) { return",
"use DOMImpl here because offsetLeft/Top return erroneous # // values",
"// Firefox 3 expects getBoundingClientRect // getBoundingClientRect can be float:",
"+ $doc.documentElement.scrollLeft; } // Older Firefox can use getBoxObjectFor else",
"current.isSameNode) { if (current == child) { return count; }",
"is not visible. We have to difference screenY # //",
"return false; \"\"\") def releaseCapture(elem): JS(\"\"\" if ((DOM.sCaptureElem != null)",
"false, false, false, false, 0, null); button.dispatchEvent(evt); } \"\"\") def",
"+ $doc.body.scrollTop + $doc.documentElement.scrollTop; } // Older Firefox can use",
"parent = elem.parentNode; while (parent) { if (parent.scrollTop > 0)",
"top -= parent.scrollTop; } parent = parent.parentNode; } return top",
"child) { return true; } } else if (parent.isSameNode(child)) {",
"} else if (!elem1 || !elem2) { return false; }",
"count; } if (current.nodeType == 1) { ++count; } current",
"releaseCapture(elem): JS(\"\"\" if ((DOM.sCaptureElem != null) && DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem",
"if (!elem1 || !elem2) { return false; } if (!elem1.isSameNode)",
"# // on whether the calculations are inside or outside",
"if ( typeof elem.getBoundingClientRect == 'function' ) { var left",
"+ $doc.documentElement.scrollTop; } \"\"\") def getChildIndex(parent, child): JS(\"\"\" var count",
"(!elem.isSameNode) { if (elem == $wnd.__captureElem) { $wnd.__captureElem = null;",
"true; } } else if (parent.isSameNode(child)) { return true; }",
"#\"\"\" def getAbsoluteTop(elem): JS(\"\"\" // Firefox 3 expects getBoundingClientRect if",
"{ return true; } } else if (parent.isSameNode(child)) { return",
"null; if (!elem.isSameNode) { if (elem == $wnd.__captureElem) { $wnd.__captureElem",
"} else if (current.isSameNode(child)) { return count; } if (current.nodeType",
"release, so this can # // be removed at a",
"0, 0, 0, 0, false, false, false, false, 0, null);",
"if ((!parent.isSameNode)) { if (parent == child) { return true;",
"use getBoxObjectFor else { var top = $doc.getBoxObjectFor(elem).y; var parent",
"true, null, 0, 0, 0, 0, 0, false, false, false,",
"bug in the FF3 betas. The bug # // should",
"# // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR == 4 # if",
"float: 73.1 instead of 74, see // gwt's workaround at",
"here due to a change in getBoxObjectFor which causes inconsistencies",
"!= null) { var evt = doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true, true,",
"} #\"\"\" def getAbsoluteLeft(elem): JS(\"\"\" // Firefox 3 expects getBoundingClientRect",
"so this can # // be removed at a later",
"(current.isSameNode(child)) { return count; } if (current.nodeType == 1) {",
"if (parent.scrollLeft > 0) { left = left - parent.scrollLeft;",
"{ if (elem == $wnd.__captureElem) { $wnd.__captureElem = null; }",
"difference screenY # // here due to a change in",
"should be fixed before they release, so this can #",
"of 74, see // gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please",
"DOMException.WRONG_DOCUMENT_ERR == 4 # if (e.code == 4) { #",
"if (doc != null) { var evt = doc.createEvent('MouseEvents'); evt.initMouseEvent('click',",
"e; # } #\"\"\" def getAbsoluteLeft(elem): JS(\"\"\" // Firefox 3",
"3 expects getBoundingClientRect // getBoundingClientRect can be float: 73.1 instead",
"0, current = parent.firstChild; while (current) { if (! current.isSameNode)",
"# throw e; # } #\"\"\" def getAbsoluteTop(elem): JS(\"\"\" //",
"-= parent.scrollTop; } parent = parent.parentNode; } return top +",
"{ var left = $doc.getBoxObjectFor(elem).x; var parent = elem.parentNode; while",
"if (button == 3) { return 2; } else {",
"be removed at a later date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 #",
"{ # return $doc.getBoxObjectFor(elem).screenY # - $doc.getBoxObjectFor($doc.documentElement).screenY; # } catch",
"# - $doc.getBoxObjectFor($doc.documentElement).screenX; # } catch (e) { # //",
"top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } \"\"\") def getChildIndex(parent, child):",
"// getBoundingClientRect can be float: 73.1 instead of 74, see",
"(parent) { if (parent.scrollLeft > 0) { left = left",
"== 3) { return 2; } else { return button",
"GWT 1.5 for getAbsoluteLeft. err... #\"\"\" # // We cannot",
"JS(\"\"\" // Firefox 3 expects getBoundingClientRect // getBoundingClientRect can be",
"return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } \"\"\") # This",
"on 'Permission denied to get property // HTMLDivElement.parentNode' // See",
"1.5 for getAbsoluteLeft. err... #\"\"\" # // We cannot use",
"parent.parentNode; } return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } \"\"\")",
"try { child = child.parentNode; } catch(e) { // Give",
"= doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true, true, null, 0, 0, 0, 0,",
"Firefox can use getBoxObjectFor else { var top = $doc.getBoxObjectFor(elem).y;",
"{ child = child.parentNode; } catch(e) { // Give up",
"= parent.firstChild; while (current) { if (! current.isSameNode) { if",
"else if (button == 3) { return 2; } else",
"cannot use DOMImpl here because offsetLeft/Top return erroneous # //",
"} \"\"\") def compare(elem1, elem2): JS(\"\"\" if (!elem1 && !elem2)",
"def releaseCapture(elem): JS(\"\"\" if ((DOM.sCaptureElem != null) && DOM.compare(elem, DOM.sCaptureElem))",
"null); button.dispatchEvent(evt); } \"\"\") def compare(elem1, elem2): JS(\"\"\" if (!elem1",
"values when overflow is not visible. We have to difference",
"parent.parentNode; } return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } \"\"\")",
"top = Math.ceil(elem.getBoundingClientRect().top); return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; }",
"Firefox 3 expects getBoundingClientRect if ( typeof elem.getBoundingClientRect == 'function'",
"+ $doc.body.scrollTop + $doc.documentElement.scrollTop; } \"\"\") def getChildIndex(parent, child): JS(\"\"\"",
"{ child = null; } } return false; \"\"\") def",
"change in getBoxObjectFor which causes inconsistencies # // on whether",
"is in GWT 1.5 for getAbsoluteTop. err... #\"\"\" # //",
"$doc.getBoxObjectFor(elem).screenX # - $doc.getBoxObjectFor($doc.documentElement).screenX; # } catch (e) { #",
"= Math.ceil(elem.getBoundingClientRect().left); return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } //",
"# try { # return $doc.getBoxObjectFor(elem).screenX # - $doc.getBoxObjectFor($doc.documentElement).screenX; #",
"compare(elem1, elem2): JS(\"\"\" if (!elem1 && !elem2) { return true;",
"(e) { # // This works around a bug in",
"elem2): JS(\"\"\" if (!elem1 && !elem2) { return true; }",
"# // border. # try { # return $doc.getBoxObjectFor(elem).screenY #",
"$doc.getBoxObjectFor(elem).x; var parent = elem.parentNode; while (parent) { if (parent.scrollLeft",
"return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } \"\"\") def getChildIndex(parent,",
"This works around a bug in the FF3 betas. The",
"{ var top = $doc.getBoxObjectFor(elem).y; var parent = elem.parentNode; while",
"button = evt.which; if(button == 2) { return 4; }",
"not visible. We have to difference screenY # // here",
"0) { top -= parent.scrollTop; } parent = parent.parentNode; }",
"they release, so this can # // be removed at",
"date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR == 4 #",
"+ $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } \"\"\") # This is what",
"} current = current.nextSibling; } return -1; \"\"\") def isOrHasChild(parent,",
"{ return 2; } else { return button || 0;",
"{ return count; } if (current.nodeType == 1) { ++count;",
"can use getBoxObjectFor else { var top = $doc.getBoxObjectFor(elem).y; var",
"return erroneous # // values when overflow is not visible.",
"because offsetLeft/Top return erroneous # // values when overflow is",
"0, 0, 0, false, false, false, false, 0, null); button.dispatchEvent(evt);",
"in GWT 1.5 for getAbsoluteLeft. err... #\"\"\" # // We",
"3 expects getBoundingClientRect if ( typeof elem.getBoundingClientRect == 'function' )",
"# return $doc.getBoxObjectFor(elem).screenX # - $doc.getBoxObjectFor($doc.documentElement).screenX; # } catch (e)",
"// Firefox 3 expects getBoundingClientRect if ( typeof elem.getBoundingClientRect ==",
"to difference screenX # // here due to a change",
"$wnd.__captureElem = null; } } else if (elem.isSameNode($wnd.__captureElem)) { $wnd.__captureElem",
"(current) { if (! current.isSameNode) { if (current == child)",
"= null; } } else if (elem.isSameNode($wnd.__captureElem)) { $wnd.__captureElem =",
"true; } try { child = child.parentNode; } catch(e) {",
"what is in GWT 1.5 for getAbsoluteTop. err... #\"\"\" #",
"JS(\"\"\" var count = 0, current = parent.firstChild; while (current)",
"} else if (elem.isSameNode($wnd.__captureElem)) { $wnd.__captureElem = null; } \"\"\")",
"} else if (button == 3) { return 2; }",
"- $doc.getBoxObjectFor($doc.documentElement).screenY; # } catch (e) { # // This",
"( typeof elem.getBoundingClientRect == 'function' ) { var top =",
"{ if (parent.scrollLeft > 0) { left = left -",
"> 0) { top -= parent.scrollTop; } parent = parent.parentNode;",
"if (!elem1.isSameNode) { return (elem1 == elem2); } return (elem1.isSameNode(elem2));",
"buttonClick(button): JS(\"\"\" var doc = button.ownerDocument; if (doc != null)",
"&& (child.nodeType != 1)) { child = null; } }",
"visible. We have to difference screenY # // here due",
"We cannot use DOMImpl here because offsetLeft/Top return erroneous #",
"$doc.body.scrollLeft + $doc.documentElement.scrollLeft; } \"\"\") # This is what is",
"((!parent.isSameNode)) { if (parent == child) { return true; }",
"\"\"\") def compare(elem1, elem2): JS(\"\"\" if (!elem1 && !elem2) {",
"implementation has 1px offset. if ( typeof elem.getBoundingClientRect == 'function'",
"# // DOMException.WRONG_DOCUMENT_ERR == 4 # if (e.code == 4)",
"calculations are inside or outside of the element's # //",
"{ return (elem1 == elem2); } return (elem1.isSameNode(elem2)); \"\"\") def",
"// This works around a bug in the FF3 betas.",
"getBoundingClientRect if ( typeof elem.getBoundingClientRect == 'function' ) { var",
"if (parent.scrollTop > 0) { top -= parent.scrollTop; } parent",
"left = Math.ceil(elem.getBoundingClientRect().left); return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; }",
"a change in getBoxObjectFor which causes inconsistencies # // on",
"false, false, false, 0, null); button.dispatchEvent(evt); } \"\"\") def compare(elem1,",
"{ top -= parent.scrollTop; } parent = parent.parentNode; } return",
"for getAbsoluteLeft. err... #\"\"\" # // We cannot use DOMImpl",
"(elem1 == elem2); } return (elem1.isSameNode(elem2)); \"\"\") def eventGetButton(evt): JS(\"\"\"",
"(parent == child) { return true; } } else if",
"- $doc.getBoxObjectFor($doc.documentElement).screenX; # } catch (e) { # // This",
"screenY # // here due to a change in getBoxObjectFor",
"{ if (! current.isSameNode) { if (current == child) {",
"} if (child && (child.nodeType != 1)) { child =",
"getBoxObjectFor else { var top = $doc.getBoxObjectFor(elem).y; var parent =",
"workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please note, their implementation has 1px",
"0, 0, 0, 0, 0, false, false, false, false, 0,",
"is what is in GWT 1.5 for getAbsoluteLeft. err... #\"\"\"",
"typeof elem.getBoundingClientRect == 'function' ) { var top = Math.ceil(elem.getBoundingClientRect().top);",
"true; } else if (!elem1 || !elem2) { return false;",
"Older Firefox can use getBoxObjectFor else { var left =",
"return (elem1.isSameNode(elem2)); \"\"\") def eventGetButton(evt): JS(\"\"\" var button = evt.which;",
"later date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR == 4",
"This is what is in GWT 1.5 for getAbsoluteTop. err...",
"def getAbsoluteTop(elem): JS(\"\"\" // Firefox 3 expects getBoundingClientRect if (",
"property // HTMLDivElement.parentNode' // See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false; } if",
"return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } // Older Firefox",
"catch (e) { # // This works around a bug",
"= null; if (!elem.isSameNode) { if (elem == $wnd.__captureElem) {",
"return $doc.getBoxObjectFor(elem).screenY # - $doc.getBoxObjectFor($doc.documentElement).screenY; # } catch (e) {",
"} \"\"\") # This is what is in GWT 1.5",
"https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false; } if (child && (child.nodeType != 1))",
"!elem2) { return false; } if (!elem1.isSameNode) { return (elem1",
"def getChildIndex(parent, child): JS(\"\"\" var count = 0, current =",
"$doc.documentElement.scrollTop; } \"\"\") def getChildIndex(parent, child): JS(\"\"\" var count =",
"{ var evt = doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true, true, null, 0,",
"(!elem1 && !elem2) { return true; } else if (!elem1",
"if (current == child) { return count; } } else",
"on whether the calculations are inside or outside of the",
"button || 0; } \"\"\") # This is what is",
"74, see // gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please note,",
"Give up on 'Permission denied to get property // HTMLDivElement.parentNode'",
"visible. We have to difference screenX # // here due",
"child.parentNode; } catch(e) { // Give up on 'Permission denied",
"// Older Firefox can use getBoxObjectFor else { var left",
"== elem2); } return (elem1.isSameNode(elem2)); \"\"\") def eventGetButton(evt): JS(\"\"\" var",
"return $doc.getBoxObjectFor(elem).screenX # - $doc.getBoxObjectFor($doc.documentElement).screenX; # } catch (e) {",
"if (! current.isSameNode) { if (current == child) { return",
"= $doc.getBoxObjectFor(elem).x; var parent = elem.parentNode; while (parent) { if",
"# return 0; # } # throw e; # }",
"for getAbsoluteTop. err... #\"\"\" # // We cannot use DOMImpl",
"top = $doc.getBoxObjectFor(elem).y; var parent = elem.parentNode; while (parent) {",
"\"\"\") def eventGetButton(evt): JS(\"\"\" var button = evt.which; if(button ==",
"0; } \"\"\") # This is what is in GWT",
"(child.nodeType != 1)) { child = null; } } return",
"or outside of the element's # // border. # try",
"} return false; \"\"\") def releaseCapture(elem): JS(\"\"\" if ((DOM.sCaptureElem !=",
"if (!elem.isSameNode) { if (elem == $wnd.__captureElem) { $wnd.__captureElem =",
"if (!elem1 && !elem2) { return true; } else if",
"// be removed at a later date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111",
"a bug in the FF3 betas. The bug # //",
"if (parent.isSameNode(child)) { return true; } try { child =",
"} return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } \"\"\") def",
"false; } if (!elem1.isSameNode) { return (elem1 == elem2); }",
"$doc.getBoxObjectFor($doc.documentElement).screenY; # } catch (e) { # // This works",
"evt = doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true, true, null, 0, 0, 0,",
"elem2); } return (elem1.isSameNode(elem2)); \"\"\") def eventGetButton(evt): JS(\"\"\" var button",
"# // values when overflow is not visible. We have",
"return true; } else if (!elem1 || !elem2) { return",
"return -1; \"\"\") def isOrHasChild(parent, child): JS(\"\"\" while (child) {",
"// Please note, their implementation has 1px offset. if (",
"+ $doc.documentElement.scrollLeft; } \"\"\") # This is what is in",
"false; } if (child && (child.nodeType != 1)) { child",
"of the element's # // border. # try { #",
"child) { return count; } } else if (current.isSameNode(child)) {",
"JS(\"\"\" // Firefox 3 expects getBoundingClientRect if ( typeof elem.getBoundingClientRect",
"\"\"\") def getChildIndex(parent, child): JS(\"\"\" var count = 0, current",
"else { var left = $doc.getBoxObjectFor(elem).x; var parent = elem.parentNode;",
"== 4 # if (e.code == 4) { # return",
"} if (!elem1.isSameNode) { return (elem1 == elem2); } return",
"while (parent) { if (parent.scrollTop > 0) { top -=",
"current = current.nextSibling; } return -1; \"\"\") def isOrHasChild(parent, child):",
"getChildIndex(parent, child): JS(\"\"\" var count = 0, current = parent.firstChild;",
"overflow is not visible. We have to difference screenY #",
"# } # throw e; # } #\"\"\" def getAbsoluteTop(elem):",
"parent = parent.parentNode; } return top + $doc.body.scrollTop + $doc.documentElement.scrollTop;",
"try { # return $doc.getBoxObjectFor(elem).screenX # - $doc.getBoxObjectFor($doc.documentElement).screenX; # }",
"} } else if (parent.isSameNode(child)) { return true; } try",
"due to a change in getBoxObjectFor which causes inconsistencies #",
"(!elem1 || !elem2) { return false; } if (!elem1.isSameNode) {",
"are inside or outside of the element's # // border.",
"not visible. We have to difference screenX # // here",
"// See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false; } if (child && (child.nodeType",
"# // be removed at a later date. # //",
"} if (current.nodeType == 1) { ++count; } current =",
"at a later date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR",
"Math.ceil(elem.getBoundingClientRect().top); return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } // Older",
"$doc.body.scrollTop + $doc.documentElement.scrollTop; } \"\"\") def getChildIndex(parent, child): JS(\"\"\" var",
"This is what is in GWT 1.5 for getAbsoluteLeft. err...",
"before they release, so this can # // be removed",
"2) { return 4; } else if (button == 3)",
"= parent.parentNode; } return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; }",
"See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false; } if (child && (child.nodeType !=",
"at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please note, their implementation has 1px offset.",
"throw e; # } #\"\"\" def getAbsoluteTop(elem): JS(\"\"\" // Firefox",
"# // here due to a change in getBoxObjectFor which",
"border. # try { # return $doc.getBoxObjectFor(elem).screenX # - $doc.getBoxObjectFor($doc.documentElement).screenX;",
"elem.getBoundingClientRect == 'function' ) { var top = Math.ceil(elem.getBoundingClientRect().top); return",
"JS(\"\"\" var button = evt.which; if(button == 2) { return",
"Firefox can use getBoxObjectFor else { var left = $doc.getBoxObjectFor(elem).x;",
"# return $doc.getBoxObjectFor(elem).screenY # - $doc.getBoxObjectFor($doc.documentElement).screenY; # } catch (e)",
"elem.getBoundingClientRect == 'function' ) { var left = Math.ceil(elem.getBoundingClientRect().left); return",
"doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true, true, null, 0, 0, 0, 0, 0,",
"} \"\"\") def getChildIndex(parent, child): JS(\"\"\" var count = 0,",
"def buttonClick(button): JS(\"\"\" var doc = button.ownerDocument; if (doc !=",
"} parent = parent.parentNode; } return left + $doc.body.scrollLeft +",
"} } else if (current.isSameNode(child)) { return count; } if",
"= left - parent.scrollLeft; } parent = parent.parentNode; } return",
"top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } // Older Firefox can",
"# // We cannot use DOMImpl here because offsetLeft/Top return",
"== child) { return true; } } else if (parent.isSameNode(child))",
"# } #\"\"\" def getAbsoluteTop(elem): JS(\"\"\" // Firefox 3 expects",
"in getBoxObjectFor which causes inconsistencies # // on whether the",
"{ return 4; } else if (button == 3) {",
"DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem = null; if (!elem.isSameNode) { if (elem",
"Firefox 3 expects getBoundingClientRect // getBoundingClientRect can be float: 73.1",
"betas. The bug # // should be fixed before they",
"difference screenX # // here due to a change in",
"# if (e.code == 4) { # return 0; #",
"def compare(elem1, elem2): JS(\"\"\" if (!elem1 && !elem2) { return",
"} return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } \"\"\") #",
"0, false, false, false, false, 0, null); button.dispatchEvent(evt); } \"\"\")",
"HTMLDivElement.parentNode' // See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false; } if (child &&",
"{ var top = Math.ceil(elem.getBoundingClientRect().top); return top + $doc.body.scrollTop +",
"else if (!elem1 || !elem2) { return false; } if",
"return count; } if (current.nodeType == 1) { ++count; }",
"= current.nextSibling; } return -1; \"\"\") def isOrHasChild(parent, child): JS(\"\"\"",
"(child && (child.nodeType != 1)) { child = null; }",
"We have to difference screenY # // here due to",
"getBoundingClientRect can be float: 73.1 instead of 74, see //",
"a later date. # // https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR ==",
"have to difference screenY # // here due to a",
"if (e.code == 4) { # return 0; # }",
"3) { return 2; } else { return button ||",
"The bug # // should be fixed before they release,",
"// We cannot use DOMImpl here because offsetLeft/Top return erroneous",
"JS(\"\"\" while (child) { if ((!parent.isSameNode)) { if (parent ==",
") { var top = Math.ceil(elem.getBoundingClientRect().top); return top + $doc.body.scrollTop",
"use getBoxObjectFor else { var left = $doc.getBoxObjectFor(elem).x; var parent",
"null; } } else if (elem.isSameNode($wnd.__captureElem)) { $wnd.__captureElem = null;",
"screenX # // here due to a change in getBoxObjectFor",
"(elem1.isSameNode(elem2)); \"\"\") def eventGetButton(evt): JS(\"\"\" var button = evt.which; if(button",
"== 1) { ++count; } current = current.nextSibling; } return",
"} } else if (elem.isSameNode($wnd.__captureElem)) { $wnd.__captureElem = null; }",
"if ( typeof elem.getBoundingClientRect == 'function' ) { var top",
"} else { return button || 0; } \"\"\") #",
"elem.parentNode; while (parent) { if (parent.scrollTop > 0) { top",
"can # // be removed at a later date. #",
"button.ownerDocument; if (doc != null) { var evt = doc.createEvent('MouseEvents');",
"count; } } else if (current.isSameNode(child)) { return count; }",
"if (current.nodeType == 1) { ++count; } current = current.nextSibling;",
"{ if (current == child) { return count; } }",
"(current.nodeType == 1) { ++count; } current = current.nextSibling; }",
"// gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please note, their implementation",
"$wnd.__captureElem) { $wnd.__captureElem = null; } } else if (elem.isSameNode($wnd.__captureElem))",
"} # throw e; # } #\"\"\" def getAbsoluteTop(elem): JS(\"\"\"",
"else { var top = $doc.getBoxObjectFor(elem).y; var parent = elem.parentNode;",
"(!elem1.isSameNode) { return (elem1 == elem2); } return (elem1.isSameNode(elem2)); \"\"\")",
"'function' ) { var top = Math.ceil(elem.getBoundingClientRect().top); return top +",
"err... #\"\"\" # // We cannot use DOMImpl here because",
"var button = evt.which; if(button == 2) { return 4;",
"== child) { return count; } } else if (current.isSameNode(child))",
"return 4; } else if (button == 3) { return",
"return false; } if (child && (child.nodeType != 1)) {",
"catch(e) { // Give up on 'Permission denied to get",
"# // should be fixed before they release, so this",
"{ return false; } if (!elem1.isSameNode) { return (elem1 ==",
"parent = parent.parentNode; } return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft;",
"(parent) { if (parent.scrollTop > 0) { top -= parent.scrollTop;",
"4) { # return 0; # } # throw e;",
"this can # // be removed at a later date.",
"null) { var evt = doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true, true, null,",
"4; } else if (button == 3) { return 2;",
"- parent.scrollLeft; } parent = parent.parentNode; } return left +",
"bug # // should be fixed before they release, so",
"var left = Math.ceil(elem.getBoundingClientRect().left); return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft;",
"null; } } return false; \"\"\") def releaseCapture(elem): JS(\"\"\" if",
"\"\"\") # This is what is in GWT 1.5 for",
"evt.which; if(button == 2) { return 4; } else if",
"the element's # // border. # try { # return",
"# throw e; # } #\"\"\" def getAbsoluteLeft(elem): JS(\"\"\" //",
"{ return true; } try { child = child.parentNode; }",
"&& !elem2) { return true; } else if (!elem1 ||",
"$doc.getBoxObjectFor($doc.documentElement).screenX; # } catch (e) { # // This works",
"while (child) { if ((!parent.isSameNode)) { if (parent == child)",
"$doc.getBoxObjectFor(elem).screenY # - $doc.getBoxObjectFor($doc.documentElement).screenY; # } catch (e) { #",
"// https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR == 4 # if (e.code",
"} // Older Firefox can use getBoxObjectFor else { var",
"can use getBoxObjectFor else { var left = $doc.getBoxObjectFor(elem).x; var",
"== 'function' ) { var top = Math.ceil(elem.getBoundingClientRect().top); return top",
"{ var left = Math.ceil(elem.getBoundingClientRect().left); return left + $doc.body.scrollLeft +",
"DOM.sCaptureElem = null; if (!elem.isSameNode) { if (elem == $wnd.__captureElem)",
"getBoundingClientRect // getBoundingClientRect can be float: 73.1 instead of 74,",
"Please note, their implementation has 1px offset. if ( typeof",
"'function' ) { var left = Math.ceil(elem.getBoundingClientRect().left); return left +",
"&& DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem = null; if (!elem.isSameNode) { if",
"// border. # try { # return $doc.getBoxObjectFor(elem).screenX # -",
"element's # // border. # try { # return $doc.getBoxObjectFor(elem).screenX",
"++count; } current = current.nextSibling; } return -1; \"\"\") def",
"// HTMLDivElement.parentNode' // See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false; } if (child",
"+ $doc.documentElement.scrollTop; } // Older Firefox can use getBoxObjectFor else",
"// border. # try { # return $doc.getBoxObjectFor(elem).screenY # -",
"GWT 1.5 for getAbsoluteTop. err... #\"\"\" # // We cannot",
"false, 0, null); button.dispatchEvent(evt); } \"\"\") def compare(elem1, elem2): JS(\"\"\"",
"# // border. # try { # return $doc.getBoxObjectFor(elem).screenX #",
"1px offset. if ( typeof elem.getBoundingClientRect == 'function' ) {",
"{ return button || 0; } \"\"\") # This is",
"We have to difference screenX # // here due to",
"} catch (e) { # // This works around a",
"note, their implementation has 1px offset. if ( typeof elem.getBoundingClientRect",
"var count = 0, current = parent.firstChild; while (current) {",
"while (current) { if (! current.isSameNode) { if (current ==",
"is in GWT 1.5 for getAbsoluteLeft. err... #\"\"\" # //",
"border. # try { # return $doc.getBoxObjectFor(elem).screenY # - $doc.getBoxObjectFor($doc.documentElement).screenY;",
"var evt = doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true, true, null, 0, 0,",
"evt.initMouseEvent('click', true, true, null, 0, 0, 0, 0, 0, false,",
"\"\"\") def releaseCapture(elem): JS(\"\"\" if ((DOM.sCaptureElem != null) && DOM.compare(elem,",
"elem.parentNode; while (parent) { if (parent.scrollLeft > 0) { left",
"{ # return $doc.getBoxObjectFor(elem).screenX # - $doc.getBoxObjectFor($doc.documentElement).screenX; # } catch",
"# - $doc.getBoxObjectFor($doc.documentElement).screenY; # } catch (e) { # //",
"# // This works around a bug in the FF3",
"(button == 3) { return 2; } else { return",
"getAbsoluteTop. err... #\"\"\" # // We cannot use DOMImpl here",
"(doc != null) { var evt = doc.createEvent('MouseEvents'); evt.initMouseEvent('click', true,",
"= null; } } return false; \"\"\") def releaseCapture(elem): JS(\"\"\"",
"// should be fixed before they release, so this can",
"child): JS(\"\"\" while (child) { if ((!parent.isSameNode)) { if (parent",
"see // gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please note, their",
"} return -1; \"\"\") def isOrHasChild(parent, child): JS(\"\"\" while (child)",
"button.dispatchEvent(evt); } \"\"\") def compare(elem1, elem2): JS(\"\"\" if (!elem1 &&",
"// Give up on 'Permission denied to get property //",
"// DOMException.WRONG_DOCUMENT_ERR == 4 # if (e.code == 4) {",
"false, false, 0, null); button.dispatchEvent(evt); } \"\"\") def compare(elem1, elem2):",
"} else if (parent.isSameNode(child)) { return true; } try {",
"if (elem == $wnd.__captureElem) { $wnd.__captureElem = null; } }",
"child): JS(\"\"\" var count = 0, current = parent.firstChild; while",
"get property // HTMLDivElement.parentNode' // See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false; }",
"getBoxObjectFor else { var left = $doc.getBoxObjectFor(elem).x; var parent =",
"return 2; } else { return button || 0; }",
"$doc.body.scrollLeft + $doc.documentElement.scrollLeft; } // Older Firefox can use getBoxObjectFor",
"(current == child) { return count; } } else if",
"2; } else { return button || 0; } \"\"\")",
"to difference screenY # // here due to a change",
"getAbsoluteTop(elem): JS(\"\"\" // Firefox 3 expects getBoundingClientRect if ( typeof",
"{ if (parent == child) { return true; } }",
"is what is in GWT 1.5 for getAbsoluteTop. err... #\"\"\"",
"} # throw e; # } #\"\"\" def getAbsoluteLeft(elem): JS(\"\"\"",
"https://bugzilla.mozilla.org/show_bug.cgi?id=409111 # // DOMException.WRONG_DOCUMENT_ERR == 4 # if (e.code ==",
"# This is what is in GWT 1.5 for getAbsoluteTop.",
"# try { # return $doc.getBoxObjectFor(elem).screenY # - $doc.getBoxObjectFor($doc.documentElement).screenY; #",
"(parent.isSameNode(child)) { return true; } try { child = child.parentNode;",
"Older Firefox can use getBoxObjectFor else { var top =",
"-1; \"\"\") def isOrHasChild(parent, child): JS(\"\"\" while (child) { if",
"73.1 instead of 74, see // gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47",
"#\"\"\" def getAbsoluteLeft(elem): JS(\"\"\" // Firefox 3 expects getBoundingClientRect //",
"{ left = left - parent.scrollLeft; } parent = parent.parentNode;",
"gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please note, their implementation has",
"else if (parent.isSameNode(child)) { return true; } try { child",
"|| 0; } \"\"\") # This is what is in",
"0, 0, false, false, false, false, 0, null); button.dispatchEvent(evt); }",
"(child) { if ((!parent.isSameNode)) { if (parent == child) {",
"{ return count; } } else if (current.isSameNode(child)) { return",
"else { return button || 0; } \"\"\") # This",
"'Permission denied to get property // HTMLDivElement.parentNode' // See https://bugzilla.mozilla.org/show_bug.cgi?id=208427",
"child = null; } } return false; \"\"\") def releaseCapture(elem):",
"#\"\"\" # // We cannot use DOMImpl here because offsetLeft/Top",
"var top = Math.ceil(elem.getBoundingClientRect().top); return top + $doc.body.scrollTop + $doc.documentElement.scrollTop;",
"expects getBoundingClientRect // getBoundingClientRect can be float: 73.1 instead of",
"user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 // Please note, their implementation has 1px offset. if",
"var left = $doc.getBoxObjectFor(elem).x; var parent = elem.parentNode; while (parent)",
"up on 'Permission denied to get property // HTMLDivElement.parentNode' //",
"!= 1)) { child = null; } } return false;",
"# } #\"\"\" def getAbsoluteLeft(elem): JS(\"\"\" // Firefox 3 expects",
"return (elem1 == elem2); } return (elem1.isSameNode(elem2)); \"\"\") def eventGetButton(evt):",
"return true; } } else if (parent.isSameNode(child)) { return true;",
"the calculations are inside or outside of the element's #",
"$doc.documentElement.scrollTop; } // Older Firefox can use getBoxObjectFor else {",
"return button || 0; } \"\"\") # This is what",
"getBoxObjectFor which causes inconsistencies # // on whether the calculations",
"parent.firstChild; while (current) { if (! current.isSameNode) { if (current",
"def isOrHasChild(parent, child): JS(\"\"\" while (child) { if ((!parent.isSameNode)) {",
"offsetLeft/Top return erroneous # // values when overflow is not",
"// here due to a change in getBoxObjectFor which causes",
"// Older Firefox can use getBoxObjectFor else { var top",
"(parent.scrollTop > 0) { top -= parent.scrollTop; } parent =",
"return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } // Older Firefox",
"1) { ++count; } current = current.nextSibling; } return -1;",
"= Math.ceil(elem.getBoundingClientRect().top); return top + $doc.body.scrollTop + $doc.documentElement.scrollTop; } //",
"!elem2) { return true; } else if (!elem1 || !elem2)",
"which causes inconsistencies # // on whether the calculations are",
"null, 0, 0, 0, 0, 0, false, false, false, false,",
"return 0; # } # throw e; # } #\"\"\"",
"{ $wnd.__captureElem = null; } } else if (elem.isSameNode($wnd.__captureElem)) {",
"count = 0, current = parent.firstChild; while (current) { if",
"} parent = parent.parentNode; } return top + $doc.body.scrollTop +",
"0) { left = left - parent.scrollLeft; } parent =",
"if (current.isSameNode(child)) { return count; } if (current.nodeType == 1)",
"to get property // HTMLDivElement.parentNode' // See https://bugzilla.mozilla.org/show_bug.cgi?id=208427 return false;",
"left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } \"\"\") # This is",
"# This is what is in GWT 1.5 for getAbsoluteLeft.",
"parent.scrollTop; } parent = parent.parentNode; } return top + $doc.body.scrollTop",
"be fixed before they release, so this can # //",
"outside of the element's # // border. # try {",
"instead of 74, see // gwt's workaround at user/src/com/google/gwt/dom/client/DOMImplMozilla.java:47 //",
"(parent.scrollLeft > 0) { left = left - parent.scrollLeft; }",
"> 0) { left = left - parent.scrollLeft; } parent",
"== 4) { # return 0; # } # throw",
"= parent.parentNode; } return left + $doc.body.scrollLeft + $doc.documentElement.scrollLeft; }",
"// on whether the calculations are inside or outside of",
"causes inconsistencies # // on whether the calculations are inside",
"var top = $doc.getBoxObjectFor(elem).y; var parent = elem.parentNode; while (parent)",
"return true; } try { child = child.parentNode; } catch(e)",
"{ return true; } else if (!elem1 || !elem2) {",
"the FF3 betas. The bug # // should be fixed",
"= child.parentNode; } catch(e) { // Give up on 'Permission",
"doc = button.ownerDocument; if (doc != null) { var evt",
"getAbsoluteLeft(elem): JS(\"\"\" // Firefox 3 expects getBoundingClientRect // getBoundingClientRect can",
"= evt.which; if(button == 2) { return 4; } else",
"DOMImpl here because offsetLeft/Top return erroneous # // values when",
"throw e; # } #\"\"\" def getAbsoluteLeft(elem): JS(\"\"\" // Firefox",
"0, null); button.dispatchEvent(evt); } \"\"\") def compare(elem1, elem2): JS(\"\"\" if",
"(! current.isSameNode) { if (current == child) { return count;",
"if (child && (child.nodeType != 1)) { child = null;",
"JS(\"\"\" if ((DOM.sCaptureElem != null) && DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem =",
"{ if ((!parent.isSameNode)) { if (parent == child) { return",
"when overflow is not visible. We have to difference screenY",
"parent = elem.parentNode; while (parent) { if (parent.scrollLeft > 0)",
"JS(\"\"\" var doc = button.ownerDocument; if (doc != null) {",
"current.nextSibling; } return -1; \"\"\") def isOrHasChild(parent, child): JS(\"\"\" while",
"} } return false; \"\"\") def releaseCapture(elem): JS(\"\"\" if ((DOM.sCaptureElem",
"here because offsetLeft/Top return erroneous # // values when overflow",
"+ $doc.body.scrollLeft + $doc.documentElement.scrollLeft; } // Older Firefox can use",
"have to difference screenX # // here due to a",
"$doc.getBoxObjectFor(elem).y; var parent = elem.parentNode; while (parent) { if (parent.scrollTop",
"can be float: 73.1 instead of 74, see // gwt's",
"!= null) && DOM.compare(elem, DOM.sCaptureElem)) DOM.sCaptureElem = null; if (!elem.isSameNode)",
"around a bug in the FF3 betas. The bug #",
"return count; } } else if (current.isSameNode(child)) { return count;",
"var parent = elem.parentNode; while (parent) { if (parent.scrollLeft >",
"JS(\"\"\" if (!elem1 && !elem2) { return true; } else",
"$doc.body.scrollTop + $doc.documentElement.scrollTop; } // Older Firefox can use getBoxObjectFor",
"{ # return 0; # } # throw e; #",
"parent.scrollLeft; } parent = parent.parentNode; } return left + $doc.body.scrollLeft",
"0; # } # throw e; # } #\"\"\" def",
"eventGetButton(evt): JS(\"\"\" var button = evt.which; if(button == 2) {",
"(elem == $wnd.__captureElem) { $wnd.__captureElem = null; } } else",
"to a change in getBoxObjectFor which causes inconsistencies # //",
"their implementation has 1px offset. if ( typeof elem.getBoundingClientRect ==",
"} return (elem1.isSameNode(elem2)); \"\"\") def eventGetButton(evt): JS(\"\"\" var button =",
"in the FF3 betas. The bug # // should be",
"left = left - parent.scrollLeft; } parent = parent.parentNode; }",
"child = child.parentNode; } catch(e) { // Give up on",
"true, true, null, 0, 0, 0, 0, 0, false, false,",
"def getAbsoluteLeft(elem): JS(\"\"\" // Firefox 3 expects getBoundingClientRect // getBoundingClientRect",
"be float: 73.1 instead of 74, see // gwt's workaround",
"current = parent.firstChild; while (current) { if (! current.isSameNode) {",
"overflow is not visible. We have to difference screenX #",
"{ ++count; } current = current.nextSibling; } return -1; \"\"\")",
"var parent = elem.parentNode; while (parent) { if (parent.scrollTop >",
"{ // Give up on 'Permission denied to get property",
"inside or outside of the element's # // border. #",
"= 0, current = parent.firstChild; while (current) { if (!",
"== $wnd.__captureElem) { $wnd.__captureElem = null; } } else if",
"is not visible. We have to difference screenX # //",
"def eventGetButton(evt): JS(\"\"\" var button = evt.which; if(button == 2)",
"offset. if ( typeof elem.getBoundingClientRect == 'function' ) { var",
"} catch(e) { // Give up on 'Permission denied to",
"<gh_stars>0 def buttonClick(button): JS(\"\"\" var doc = button.ownerDocument; if (doc",
"# } # throw e; # } #\"\"\" def getAbsoluteLeft(elem):",
"} #\"\"\" def getAbsoluteTop(elem): JS(\"\"\" // Firefox 3 expects getBoundingClientRect",
"getAbsoluteLeft. err... #\"\"\" # // We cannot use DOMImpl here"
] |
[
"by Django 1.9.6 on 2016-06-10 21:25 from __future__ import unicode_literals",
"# -*- coding: utf-8 -*- # Generated by Django 1.9.6",
"Generated by Django 1.9.6 on 2016-06-10 21:25 from __future__ import",
"('vendors', '0089_auto_20160602_2123'), ] operations = [ migrations.AlterField( model_name='vendor', name='email', field=models.EmailField(blank=True,",
"21:25 from __future__ import unicode_literals from django.db import migrations, models",
"-*- coding: utf-8 -*- # Generated by Django 1.9.6 on",
"dependencies = [ ('vendors', '0089_auto_20160602_2123'), ] operations = [ migrations.AlterField(",
"'0089_auto_20160602_2123'), ] operations = [ migrations.AlterField( model_name='vendor', name='email', field=models.EmailField(blank=True, max_length=254,",
"2016-06-10 21:25 from __future__ import unicode_literals from django.db import migrations,",
"unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"migrations, models class Migration(migrations.Migration): dependencies = [ ('vendors', '0089_auto_20160602_2123'), ]",
"= [ ('vendors', '0089_auto_20160602_2123'), ] operations = [ migrations.AlterField( model_name='vendor',",
"operations = [ migrations.AlterField( model_name='vendor', name='email', field=models.EmailField(blank=True, max_length=254, verbose_name='Email'), ),",
"Migration(migrations.Migration): dependencies = [ ('vendors', '0089_auto_20160602_2123'), ] operations = [",
"] operations = [ migrations.AlterField( model_name='vendor', name='email', field=models.EmailField(blank=True, max_length=254, verbose_name='Email'),",
"[ ('vendors', '0089_auto_20160602_2123'), ] operations = [ migrations.AlterField( model_name='vendor', name='email',",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [",
"# Generated by Django 1.9.6 on 2016-06-10 21:25 from __future__",
"= [ migrations.AlterField( model_name='vendor', name='email', field=models.EmailField(blank=True, max_length=254, verbose_name='Email'), ), ]",
"class Migration(migrations.Migration): dependencies = [ ('vendors', '0089_auto_20160602_2123'), ] operations =",
"1.9.6 on 2016-06-10 21:25 from __future__ import unicode_literals from django.db",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('vendors',",
"import migrations, models class Migration(migrations.Migration): dependencies = [ ('vendors', '0089_auto_20160602_2123'),",
"Django 1.9.6 on 2016-06-10 21:25 from __future__ import unicode_literals from",
"import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"on 2016-06-10 21:25 from __future__ import unicode_literals from django.db import",
"__future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration):",
"utf-8 -*- # Generated by Django 1.9.6 on 2016-06-10 21:25",
"from __future__ import unicode_literals from django.db import migrations, models class",
"coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-06-10",
"-*- # Generated by Django 1.9.6 on 2016-06-10 21:25 from",
"models class Migration(migrations.Migration): dependencies = [ ('vendors', '0089_auto_20160602_2123'), ] operations"
] |
[
"n): if any(order[x] <= lower[y] for y in dfs.children[x]): yield",
"in dfs.preorder[::-1]: for y in graph[x]: if y == dfs.parent[x]:",
"== dfs.parent[x]: continue lower[x] = min(lower[x], lower[y]) if len(dfs.children[0]) >",
"DFS def articulation_points(graph): n = len(graph) dfs = DFS(graph) order",
"depth_first_search import DFS def articulation_points(graph): n = len(graph) dfs =",
"= DFS(graph) order = [None] * n for i, x",
"n = len(graph) dfs = DFS(graph) order = [None] *",
"dfs.preorder[::-1]: for y in graph[x]: if y == dfs.parent[x]: continue",
"yield 0 for x in range(1, n): if any(order[x] <=",
"in range(1, n): if any(order[x] <= lower[y] for y in",
"i lower = order[:] for x in dfs.preorder[::-1]: for y",
"x in dfs.preorder[::-1]: for y in graph[x]: if y ==",
"dfs.parent[x]: continue lower[x] = min(lower[x], lower[y]) if len(dfs.children[0]) > 1:",
"y in graph[x]: if y == dfs.parent[x]: continue lower[x] =",
"order[:] for x in dfs.preorder[::-1]: for y in graph[x]: if",
"= min(lower[x], lower[y]) if len(dfs.children[0]) > 1: yield 0 for",
"y == dfs.parent[x]: continue lower[x] = min(lower[x], lower[y]) if len(dfs.children[0])",
"len(graph) dfs = DFS(graph) order = [None] * n for",
"> 1: yield 0 for x in range(1, n): if",
"if any(order[x] <= lower[y] for y in dfs.children[x]): yield x",
"if y == dfs.parent[x]: continue lower[x] = min(lower[x], lower[y]) if",
"[None] * n for i, x in enumerate(dfs.preorder): order[x] =",
"lower[x] = min(lower[x], lower[y]) if len(dfs.children[0]) > 1: yield 0",
"= [None] * n for i, x in enumerate(dfs.preorder): order[x]",
"= len(graph) dfs = DFS(graph) order = [None] * n",
"i, x in enumerate(dfs.preorder): order[x] = i lower = order[:]",
"* n for i, x in enumerate(dfs.preorder): order[x] = i",
"= i lower = order[:] for x in dfs.preorder[::-1]: for",
"lower = order[:] for x in dfs.preorder[::-1]: for y in",
"enumerate(dfs.preorder): order[x] = i lower = order[:] for x in",
"DFS(graph) order = [None] * n for i, x in",
"for x in dfs.preorder[::-1]: for y in graph[x]: if y",
"min(lower[x], lower[y]) if len(dfs.children[0]) > 1: yield 0 for x",
"1: yield 0 for x in range(1, n): if any(order[x]",
"def articulation_points(graph): n = len(graph) dfs = DFS(graph) order =",
"for i, x in enumerate(dfs.preorder): order[x] = i lower =",
"in enumerate(dfs.preorder): order[x] = i lower = order[:] for x",
"lower[y]) if len(dfs.children[0]) > 1: yield 0 for x in",
"for y in graph[x]: if y == dfs.parent[x]: continue lower[x]",
"= order[:] for x in dfs.preorder[::-1]: for y in graph[x]:",
"dfs = DFS(graph) order = [None] * n for i,",
"from depth_first_search import DFS def articulation_points(graph): n = len(graph) dfs",
"len(dfs.children[0]) > 1: yield 0 for x in range(1, n):",
"articulation_points(graph): n = len(graph) dfs = DFS(graph) order = [None]",
"order[x] = i lower = order[:] for x in dfs.preorder[::-1]:",
"x in range(1, n): if any(order[x] <= lower[y] for y",
"<gh_stars>0 from depth_first_search import DFS def articulation_points(graph): n = len(graph)",
"in graph[x]: if y == dfs.parent[x]: continue lower[x] = min(lower[x],",
"continue lower[x] = min(lower[x], lower[y]) if len(dfs.children[0]) > 1: yield",
"range(1, n): if any(order[x] <= lower[y] for y in dfs.children[x]):",
"if len(dfs.children[0]) > 1: yield 0 for x in range(1,",
"import DFS def articulation_points(graph): n = len(graph) dfs = DFS(graph)",
"n for i, x in enumerate(dfs.preorder): order[x] = i lower",
"order = [None] * n for i, x in enumerate(dfs.preorder):",
"graph[x]: if y == dfs.parent[x]: continue lower[x] = min(lower[x], lower[y])",
"for x in range(1, n): if any(order[x] <= lower[y] for",
"x in enumerate(dfs.preorder): order[x] = i lower = order[:] for",
"0 for x in range(1, n): if any(order[x] <= lower[y]"
] |
[
"_instancia = None def __new__(cls, *args, **kwargs): if not(cls._instancia): cls._instancia",
"novos def grava_tweet(self, tweet_info): #grava o retorno dos tweets self.collection_tweets.insert_one(tweet_info)",
"assert (string_conexao != \"\"), \"String de conexao indefinida\" try: self.mongo_client",
"foi possivel se conectar ao B.D.\") print(\"Conectado a\", string_conexao) def",
"conexao com o MongoDB \"\"\" _instancia = None def __new__(cls,",
"cls).__new__(cls, *args, **kwargs) return cls._instancia def __init__(self,): #pega a string",
"grava e adiciona a lista de novos filmes novos =",
"Configuracoes class Mongo_Database: \"\"\" Singleton com a conexao com o",
"not(cls._instancia): cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs) return cls._instancia def",
"self.mongo_client = pymongo.MongoClient(string_conexao) self.collection_filmes = self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets = self.mongo_client[\"twitter_log\"][\"tweets\"] except:",
"def __init__(self,): #pega a string de conexao no arquivo de",
"e adiciona a lista de novos filmes novos = []",
"= super(Mongo_Database, cls).__new__(cls, *args, **kwargs) return cls._instancia def __init__(self,): #pega",
"configuracao string_conexao = Configuracoes().get_config(\"database\", \"string_connection\") assert (string_conexao != \"\"), \"String",
"self.mongo_client[\"twitter_log\"][\"tweets\"] except: raise Exception(\"Nao foi possivel se conectar ao B.D.\")",
"return novos def grava_tweet(self, tweet_info): #grava o retorno dos tweets",
"cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs) return cls._instancia def __init__(self,):",
"se o filme ja existe #se nao existir, grava e",
"grava_filmes(self, lista_filmes): #verifica se o filme ja existe #se nao",
"import pymongo from conf import Configuracoes class Mongo_Database: \"\"\" Singleton",
"__init__(self,): #pega a string de conexao no arquivo de configuracao",
"adiciona a lista de novos filmes novos = [] try:",
"string de conexao no arquivo de configuracao string_conexao = Configuracoes().get_config(\"database\",",
"a lista de novos filmes novos = [] try: for",
"a\", string_conexao) def grava_filmes(self, lista_filmes): #verifica se o filme ja",
"*args, **kwargs): if not(cls._instancia): cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs)",
"lista_filmes): #verifica se o filme ja existe #se nao existir,",
"o MongoDB \"\"\" _instancia = None def __new__(cls, *args, **kwargs):",
"novos filmes novos = [] try: for filme in lista_filmes:",
"def __new__(cls, *args, **kwargs): if not(cls._instancia): cls._instancia = super(Mongo_Database, cls).__new__(cls,",
"indefinida\" try: self.mongo_client = pymongo.MongoClient(string_conexao) self.collection_filmes = self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets =",
"nao existir, grava e adiciona a lista de novos filmes",
"Singleton com a conexao com o MongoDB \"\"\" _instancia =",
"Exception(\"Nao foi possivel se conectar ao B.D.\") print(\"Conectado a\", string_conexao)",
"com a conexao com o MongoDB \"\"\" _instancia = None",
"conexao indefinida\" try: self.mongo_client = pymongo.MongoClient(string_conexao) self.collection_filmes = self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets",
"*args, **kwargs) return cls._instancia def __init__(self,): #pega a string de",
"**kwargs) return cls._instancia def __init__(self,): #pega a string de conexao",
"\"String de conexao indefinida\" try: self.mongo_client = pymongo.MongoClient(string_conexao) self.collection_filmes =",
"import Configuracoes class Mongo_Database: \"\"\" Singleton com a conexao com",
"0): self.collection_filmes.insert_one(filme) novos.append(filme) finally: return novos def grava_tweet(self, tweet_info): #grava",
"novos.append(filme) finally: return novos def grava_tweet(self, tweet_info): #grava o retorno",
"cls._instancia def __init__(self,): #pega a string de conexao no arquivo",
"ao B.D.\") print(\"Conectado a\", string_conexao) def grava_filmes(self, lista_filmes): #verifica se",
"conectar ao B.D.\") print(\"Conectado a\", string_conexao) def grava_filmes(self, lista_filmes): #verifica",
"no arquivo de configuracao string_conexao = Configuracoes().get_config(\"database\", \"string_connection\") assert (string_conexao",
"Mongo_Database: \"\"\" Singleton com a conexao com o MongoDB \"\"\"",
"in lista_filmes: if (self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]}) == 0): self.collection_filmes.insert_one(filme) novos.append(filme) finally:",
"self.collection_tweets = self.mongo_client[\"twitter_log\"][\"tweets\"] except: raise Exception(\"Nao foi possivel se conectar",
"class Mongo_Database: \"\"\" Singleton com a conexao com o MongoDB",
"filme ja existe #se nao existir, grava e adiciona a",
"#se nao existir, grava e adiciona a lista de novos",
"super(Mongo_Database, cls).__new__(cls, *args, **kwargs) return cls._instancia def __init__(self,): #pega a",
"filme in lista_filmes: if (self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]}) == 0): self.collection_filmes.insert_one(filme) novos.append(filme)",
"a string de conexao no arquivo de configuracao string_conexao =",
"Configuracoes().get_config(\"database\", \"string_connection\") assert (string_conexao != \"\"), \"String de conexao indefinida\"",
"arquivo de configuracao string_conexao = Configuracoes().get_config(\"database\", \"string_connection\") assert (string_conexao !=",
"de configuracao string_conexao = Configuracoes().get_config(\"database\", \"string_connection\") assert (string_conexao != \"\"),",
"\"\"\" _instancia = None def __new__(cls, *args, **kwargs): if not(cls._instancia):",
"None def __new__(cls, *args, **kwargs): if not(cls._instancia): cls._instancia = super(Mongo_Database,",
"pymongo.MongoClient(string_conexao) self.collection_filmes = self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets = self.mongo_client[\"twitter_log\"][\"tweets\"] except: raise Exception(\"Nao",
"print(\"Conectado a\", string_conexao) def grava_filmes(self, lista_filmes): #verifica se o filme",
"\"\"), \"String de conexao indefinida\" try: self.mongo_client = pymongo.MongoClient(string_conexao) self.collection_filmes",
"lista_filmes: if (self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]}) == 0): self.collection_filmes.insert_one(filme) novos.append(filme) finally: return",
"= None def __new__(cls, *args, **kwargs): if not(cls._instancia): cls._instancia =",
"com o MongoDB \"\"\" _instancia = None def __new__(cls, *args,",
"= self.mongo_client[\"twitter_log\"][\"tweets\"] except: raise Exception(\"Nao foi possivel se conectar ao",
"for filme in lista_filmes: if (self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]}) == 0): self.collection_filmes.insert_one(filme)",
"filme[\"_id\"]}) == 0): self.collection_filmes.insert_one(filme) novos.append(filme) finally: return novos def grava_tweet(self,",
"string_conexao) def grava_filmes(self, lista_filmes): #verifica se o filme ja existe",
"**kwargs): if not(cls._instancia): cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs) return",
"de conexao indefinida\" try: self.mongo_client = pymongo.MongoClient(string_conexao) self.collection_filmes = self.mongo_client[\"popcorn_time\"][\"filmes\"]",
"= [] try: for filme in lista_filmes: if (self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]})",
"possivel se conectar ao B.D.\") print(\"Conectado a\", string_conexao) def grava_filmes(self,",
"o filme ja existe #se nao existir, grava e adiciona",
"novos = [] try: for filme in lista_filmes: if (self.collection_filmes.count_documents({\"_id\":",
"\"\"\" Singleton com a conexao com o MongoDB \"\"\" _instancia",
"if (self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]}) == 0): self.collection_filmes.insert_one(filme) novos.append(filme) finally: return novos",
"finally: return novos def grava_tweet(self, tweet_info): #grava o retorno dos",
"__new__(cls, *args, **kwargs): if not(cls._instancia): cls._instancia = super(Mongo_Database, cls).__new__(cls, *args,",
"self.collection_filmes = self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets = self.mongo_client[\"twitter_log\"][\"tweets\"] except: raise Exception(\"Nao foi",
"existir, grava e adiciona a lista de novos filmes novos",
"\"string_connection\") assert (string_conexao != \"\"), \"String de conexao indefinida\" try:",
"de conexao no arquivo de configuracao string_conexao = Configuracoes().get_config(\"database\", \"string_connection\")",
"a conexao com o MongoDB \"\"\" _instancia = None def",
"MongoDB \"\"\" _instancia = None def __new__(cls, *args, **kwargs): if",
"(string_conexao != \"\"), \"String de conexao indefinida\" try: self.mongo_client =",
"if not(cls._instancia): cls._instancia = super(Mongo_Database, cls).__new__(cls, *args, **kwargs) return cls._instancia",
"B.D.\") print(\"Conectado a\", string_conexao) def grava_filmes(self, lista_filmes): #verifica se o",
"conexao no arquivo de configuracao string_conexao = Configuracoes().get_config(\"database\", \"string_connection\") assert",
"== 0): self.collection_filmes.insert_one(filme) novos.append(filme) finally: return novos def grava_tweet(self, tweet_info):",
"existe #se nao existir, grava e adiciona a lista de",
"(self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]}) == 0): self.collection_filmes.insert_one(filme) novos.append(filme) finally: return novos def",
"pymongo from conf import Configuracoes class Mongo_Database: \"\"\" Singleton com",
"try: self.mongo_client = pymongo.MongoClient(string_conexao) self.collection_filmes = self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets = self.mongo_client[\"twitter_log\"][\"tweets\"]",
"lista de novos filmes novos = [] try: for filme",
"= pymongo.MongoClient(string_conexao) self.collection_filmes = self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets = self.mongo_client[\"twitter_log\"][\"tweets\"] except: raise",
"ja existe #se nao existir, grava e adiciona a lista",
"filmes novos = [] try: for filme in lista_filmes: if",
"from conf import Configuracoes class Mongo_Database: \"\"\" Singleton com a",
"!= \"\"), \"String de conexao indefinida\" try: self.mongo_client = pymongo.MongoClient(string_conexao)",
"= self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets = self.mongo_client[\"twitter_log\"][\"tweets\"] except: raise Exception(\"Nao foi possivel",
"def grava_filmes(self, lista_filmes): #verifica se o filme ja existe #se",
"self.collection_filmes.insert_one(filme) novos.append(filme) finally: return novos def grava_tweet(self, tweet_info): #grava o",
"return cls._instancia def __init__(self,): #pega a string de conexao no",
"[] try: for filme in lista_filmes: if (self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]}) ==",
"except: raise Exception(\"Nao foi possivel se conectar ao B.D.\") print(\"Conectado",
"string_conexao = Configuracoes().get_config(\"database\", \"string_connection\") assert (string_conexao != \"\"), \"String de",
"de novos filmes novos = [] try: for filme in",
"conf import Configuracoes class Mongo_Database: \"\"\" Singleton com a conexao",
"self.mongo_client[\"popcorn_time\"][\"filmes\"] self.collection_tweets = self.mongo_client[\"twitter_log\"][\"tweets\"] except: raise Exception(\"Nao foi possivel se",
"#pega a string de conexao no arquivo de configuracao string_conexao",
"= Configuracoes().get_config(\"database\", \"string_connection\") assert (string_conexao != \"\"), \"String de conexao",
"#verifica se o filme ja existe #se nao existir, grava",
"try: for filme in lista_filmes: if (self.collection_filmes.count_documents({\"_id\": filme[\"_id\"]}) == 0):",
"se conectar ao B.D.\") print(\"Conectado a\", string_conexao) def grava_filmes(self, lista_filmes):",
"raise Exception(\"Nao foi possivel se conectar ao B.D.\") print(\"Conectado a\","
] |
[
"def activeTime(self): return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property def inactiveTime(self):",
"= 604800000 # 1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime +",
"except Exception as e: Exceptions.error(Exception('Deepsleep not available: ' + str(e)))",
"None): pycom.nvs_set(key, value) def addWakeUpPin(self, pin): # P2, P3, P4,",
"@property def powerOnWake(self): return self.wakeReason == machine.PWRON_WAKE @property def pinWake(self):",
"def isSleepWake(self): return self.pinWake or self.RTCWake or self.ULPWake @property def",
"P2, P3, P4, P6, P8 to P10 and P13 to",
"wakePins(self): return machine.wake_reason()[1] @property def powerOnWake(self): return self.wakeReason == machine.PWRON_WAKE",
"@property def pinWake(self): return self.wakeReason == machine.PIN_WAKE @property def RTCWake(self):",
"available: ' + str(e))) def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0)",
"= 'activeTime' INACTIVE_TIME_KEY = 'inactiveTime' SLEEP_TIME_KEY = 'sleepTime' def __init__(self):",
"return self.wakeReason == machine.PWRON_WAKE @property def pinWake(self): return self.wakeReason ==",
"return self.wakeReason == machine.PIN_WAKE @property def RTCWake(self): return self.wakeReason ==",
"return self.pinWake or self.RTCWake or self.ULPWake @property def activeTime(self): return",
"pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def sleep(self, milliseconds=0): if milliseconds ==",
"def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def sleep(self, milliseconds=0): if",
"return machine.wake_reason()[1] @property def powerOnWake(self): return self.wakeReason == machine.PWRON_WAKE @property",
"class Sleep: @property def wakeReason(self): return machine.wake_reason()[0] @property def wakePins(self):",
"@property def activeTime(self): return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property def",
"self.wakeReason == machine.PIN_WAKE @property def RTCWake(self): return self.wakeReason == machine.RTC_WAKE",
"if not self.powerOnWake: sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY)",
"def pinWake(self): return self.wakeReason == machine.PIN_WAKE @property def RTCWake(self): return",
"== machine.PIN_WAKE @property def RTCWake(self): return self.wakeReason == machine.RTC_WAKE @property",
"isinstance(pin, list): self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except",
"Exceptions class Sleep: @property def wakeReason(self): return machine.wake_reason()[0] @property def",
"if isinstance(pin, list): self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True)",
"def ULPWake(self): return self.wakeReason == machine.ULP_WAKE @property def isSleepWake(self): return",
"+ utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property def inactiveTime(self): return self.__inactiveTime ACTIVE_TIME_KEY =",
"ULPWake(self): return self.wakeReason == machine.ULP_WAKE @property def isSleepWake(self): return self.pinWake",
"604800000 # 1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(),",
"key, value=0): if (pycom.nvs_get(key) == None): pycom.nvs_set(key, value) def addWakeUpPin(self,",
"0) def sleep(self, milliseconds=0): if milliseconds == 0: milliseconds =",
"P6, P8 to P10 and P13 to P23 if isinstance(pin,",
"self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins = [] def __initPersistentVariable(self, key, value=0):",
"self.wakeReason == machine.RTC_WAKE @property def ULPWake(self): return self.wakeReason == machine.ULP_WAKE",
"value) def addWakeUpPin(self, pin): # P2, P3, P4, P6, P8",
"def wakePins(self): return machine.wake_reason()[1] @property def powerOnWake(self): return self.wakeReason ==",
"pin): # P2, P3, P4, P6, P8 to P10 and",
"return machine.wake_reason()[0] @property def wakePins(self): return machine.wake_reason()[1] @property def powerOnWake(self):",
"' + str(e))) def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def",
"utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property def inactiveTime(self): return self.__inactiveTime ACTIVE_TIME_KEY = 'activeTime'",
"sleep(self, milliseconds=0): if milliseconds == 0: milliseconds = 604800000 #",
"except Exception as e: Exceptions.error(Exception('Sleep not available: ' + str(e)))",
"e: Exceptions.error(Exception('Sleep not available: ' + str(e))) def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY,",
"1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try:",
"utime from exceptions import Exceptions class Sleep: @property def wakeReason(self):",
"P13 to P23 if isinstance(pin, list): self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin) try:",
"resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def sleep(self, milliseconds=0): if milliseconds",
"if milliseconds == 0: milliseconds = 604800000 # 1 week",
"self.RTCWake or self.ULPWake @property def activeTime(self): return self.__activeTime + utime.ticks_diff(utime.ticks_ms(),",
"= 'sleepTime' def __init__(self): self.__activityStart = utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if",
"pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def sleep(self, milliseconds=0): if milliseconds == 0: milliseconds",
"self.powerOnWake: sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime)",
"mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except Exception as e: Exceptions.error(Exception('Sleep not available: '",
"self.ULPWake @property def activeTime(self): return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property",
"return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property def inactiveTime(self): return self.__inactiveTime",
"str(e))) def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def sleep(self, milliseconds=0):",
"pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime) self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY)",
"def RTCWake(self): return self.wakeReason == machine.RTC_WAKE @property def ULPWake(self): return",
"= 'inactiveTime' SLEEP_TIME_KEY = 'sleepTime' def __init__(self): self.__activityStart = utime.ticks_ms()",
"'activeTime' INACTIVE_TIME_KEY = 'inactiveTime' SLEEP_TIME_KEY = 'sleepTime' def __init__(self): self.__activityStart",
"value=0): if (pycom.nvs_get(key) == None): pycom.nvs_set(key, value) def addWakeUpPin(self, pin):",
"P4, P6, P8 to P10 and P13 to P23 if",
"machine.wake_reason()[0] @property def wakePins(self): return machine.wake_reason()[1] @property def powerOnWake(self): return",
"self.wakeReason == machine.ULP_WAKE @property def isSleepWake(self): return self.pinWake or self.RTCWake",
"self.__activityStart)) try: machine.deepsleep(milliseconds) except Exception as e: Exceptions.error(Exception('Deepsleep not available:",
"utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try: machine.deepsleep(milliseconds) except Exception as e: Exceptions.error(Exception('Deepsleep not",
"self.__activityStart = utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not self.powerOnWake: sleptTime =",
"import pycom import utime from exceptions import Exceptions class Sleep:",
"sleptTime) self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins = []",
"self.wakeReason == machine.PWRON_WAKE @property def pinWake(self): return self.wakeReason == machine.PIN_WAKE",
"pinWake(self): return self.wakeReason == machine.PIN_WAKE @property def RTCWake(self): return self.wakeReason",
"machine.deepsleep(milliseconds) except Exception as e: Exceptions.error(Exception('Deepsleep not available: ' +",
"pycom import utime from exceptions import Exceptions class Sleep: @property",
"return self.wakeReason == machine.RTC_WAKE @property def ULPWake(self): return self.wakeReason ==",
"self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not self.powerOnWake: sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time()",
"machine.PIN_WAKE @property def RTCWake(self): return self.wakeReason == machine.RTC_WAKE @property def",
"wakeReason(self): return machine.wake_reason()[0] @property def wakePins(self): return machine.wake_reason()[1] @property def",
"@property def wakePins(self): return machine.wake_reason()[1] @property def powerOnWake(self): return self.wakeReason",
"self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try: machine.deepsleep(milliseconds) except Exception as e:",
"RTCWake(self): return self.wakeReason == machine.RTC_WAKE @property def ULPWake(self): return self.wakeReason",
"+ str(e))) def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def sleep(self,",
"exceptions import Exceptions class Sleep: @property def wakeReason(self): return machine.wake_reason()[0]",
"self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property def inactiveTime(self): return self.__inactiveTime ACTIVE_TIME_KEY",
"not self.powerOnWake: sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) +",
"pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try: machine.deepsleep(milliseconds) except",
"as e: Exceptions.error(Exception('Deepsleep not available: ' + str(e))) def delay(self,",
"Sleep: @property def wakeReason(self): return machine.wake_reason()[0] @property def wakePins(self): return",
"import machine import pycom import utime from exceptions import Exceptions",
"self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not self.powerOnWake: sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY,",
"= pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins = [] def __initPersistentVariable(self,",
"'inactiveTime' SLEEP_TIME_KEY = 'sleepTime' def __init__(self): self.__activityStart = utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY)",
"pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins = [] def __initPersistentVariable(self, key, value=0): if (pycom.nvs_get(key)",
"<gh_stars>0 import machine import pycom import utime from exceptions import",
"def addWakeUpPin(self, pin): # P2, P3, P4, P6, P8 to",
"(pycom.nvs_get(key) == None): pycom.nvs_set(key, value) def addWakeUpPin(self, pin): # P2,",
"# P2, P3, P4, P6, P8 to P10 and P13",
"Exceptions.error(Exception('Sleep not available: ' + str(e))) def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0)",
"pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins = [] def __initPersistentVariable(self, key,",
"else: self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except Exception as e:",
"def powerOnWake(self): return self.wakeReason == machine.PWRON_WAKE @property def pinWake(self): return",
"or self.RTCWake or self.ULPWake @property def activeTime(self): return self.__activeTime +",
"== None): pycom.nvs_set(key, value) def addWakeUpPin(self, pin): # P2, P3,",
"return self.wakeReason == machine.ULP_WAKE @property def isSleepWake(self): return self.pinWake or",
"list): self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except Exception",
"to P23 if isinstance(pin, list): self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins,",
"machine.wake_reason()[1] @property def powerOnWake(self): return self.wakeReason == machine.PWRON_WAKE @property def",
"self.__inactiveTime ACTIVE_TIME_KEY = 'activeTime' INACTIVE_TIME_KEY = 'inactiveTime' SLEEP_TIME_KEY = 'sleepTime'",
"= utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not self.powerOnWake: sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY)",
"machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime) self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime =",
"= [] def __initPersistentVariable(self, key, value=0): if (pycom.nvs_get(key) == None):",
"self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins = [] def",
"P10 and P13 to P23 if isinstance(pin, list): self.__wakeUpPins.extend(pin) else:",
"+ utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try: machine.deepsleep(milliseconds) except Exception as e: Exceptions.error(Exception('Deepsleep",
"enable_pull=True) except Exception as e: Exceptions.error(Exception('Sleep not available: ' +",
"if (pycom.nvs_get(key) == None): pycom.nvs_set(key, value) def addWakeUpPin(self, pin): #",
"machine import pycom import utime from exceptions import Exceptions class",
"0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, 0) def sleep(self, milliseconds=0): if milliseconds == 0:",
"activeTime(self): return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart) @property def inactiveTime(self): return",
"@property def wakeReason(self): return machine.wake_reason()[0] @property def wakePins(self): return machine.wake_reason()[1]",
"pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime) self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins",
"= pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins = [] def __initPersistentVariable(self, key, value=0): if",
"def wakeReason(self): return machine.wake_reason()[0] @property def wakePins(self): return machine.wake_reason()[1] @property",
"+ sleptTime) self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) self.__wakeUpPins =",
"not available: ' + str(e))) def resetTimers(self): pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, 0) pycom.nvs_set(Sleep.INACTIVE_TIME_KEY,",
"self.pinWake or self.RTCWake or self.ULPWake @property def activeTime(self): return self.__activeTime",
"pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime) self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime = pycom.nvs_get(Sleep.INACTIVE_TIME_KEY)",
"milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try: machine.deepsleep(milliseconds) except Exception",
"== machine.PWRON_WAKE @property def pinWake(self): return self.wakeReason == machine.PIN_WAKE @property",
"ACTIVE_TIME_KEY = 'activeTime' INACTIVE_TIME_KEY = 'inactiveTime' SLEEP_TIME_KEY = 'sleepTime' def",
"def inactiveTime(self): return self.__inactiveTime ACTIVE_TIME_KEY = 'activeTime' INACTIVE_TIME_KEY = 'inactiveTime'",
"week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try: machine.deepsleep(milliseconds)",
"self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except Exception as e: Exceptions.error(Exception('Sleep",
"inactiveTime(self): return self.__inactiveTime ACTIVE_TIME_KEY = 'activeTime' INACTIVE_TIME_KEY = 'inactiveTime' SLEEP_TIME_KEY",
"machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except Exception as e: Exceptions.error(Exception('Sleep not available:",
"= pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime) self.__activeTime =",
"__initPersistentVariable(self, key, value=0): if (pycom.nvs_get(key) == None): pycom.nvs_set(key, value) def",
"sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) - machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime) self.__activeTime",
"pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)) try: machine.deepsleep(milliseconds) except Exception as",
"def __initPersistentVariable(self, key, value=0): if (pycom.nvs_get(key) == None): pycom.nvs_set(key, value)",
"milliseconds=0): if milliseconds == 0: milliseconds = 604800000 # 1",
"machine.PWRON_WAKE @property def pinWake(self): return self.wakeReason == machine.PIN_WAKE @property def",
"milliseconds = 604800000 # 1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime",
"INACTIVE_TIME_KEY = 'inactiveTime' SLEEP_TIME_KEY = 'sleepTime' def __init__(self): self.__activityStart =",
"P3, P4, P6, P8 to P10 and P13 to P23",
"try: machine.deepsleep(milliseconds) except Exception as e: Exceptions.error(Exception('Deepsleep not available: '",
"== machine.RTC_WAKE @property def ULPWake(self): return self.wakeReason == machine.ULP_WAKE @property",
"powerOnWake(self): return self.wakeReason == machine.PWRON_WAKE @property def pinWake(self): return self.wakeReason",
"def __init__(self): self.__activityStart = utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not self.powerOnWake:",
"Exception as e: Exceptions.error(Exception('Sleep not available: ' + str(e))) def",
"# 1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY, self.activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart))",
"[] def __initPersistentVariable(self, key, value=0): if (pycom.nvs_get(key) == None): pycom.nvs_set(key,",
"__init__(self): self.__activityStart = utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not self.powerOnWake: sleptTime",
"to P10 and P13 to P23 if isinstance(pin, list): self.__wakeUpPins.extend(pin)",
"Exceptions.error(Exception('Deepsleep not available: ' + str(e))) def delay(self, milliseconds): utime.sleep_ms(milliseconds)",
"isSleepWake(self): return self.pinWake or self.RTCWake or self.ULPWake @property def activeTime(self):",
"@property def inactiveTime(self): return self.__inactiveTime ACTIVE_TIME_KEY = 'activeTime' INACTIVE_TIME_KEY =",
"@property def ULPWake(self): return self.wakeReason == machine.ULP_WAKE @property def isSleepWake(self):",
"self.__wakeUpPins = [] def __initPersistentVariable(self, key, value=0): if (pycom.nvs_get(key) ==",
"and P13 to P23 if isinstance(pin, list): self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin)",
"@property def RTCWake(self): return self.wakeReason == machine.RTC_WAKE @property def ULPWake(self):",
"or self.ULPWake @property def activeTime(self): return self.__activeTime + utime.ticks_diff(utime.ticks_ms(), self.__activityStart)",
"0: milliseconds = 604800000 # 1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds) pycom.nvs_set(Sleep.ACTIVE_TIME_KEY,",
"pycom.nvs_set(key, value) def addWakeUpPin(self, pin): # P2, P3, P4, P6,",
"def sleep(self, milliseconds=0): if milliseconds == 0: milliseconds = 604800000",
"== machine.ULP_WAKE @property def isSleepWake(self): return self.pinWake or self.RTCWake or",
"'sleepTime' def __init__(self): self.__activityStart = utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not",
"P8 to P10 and P13 to P23 if isinstance(pin, list):",
"as e: Exceptions.error(Exception('Sleep not available: ' + str(e))) def resetTimers(self):",
"SLEEP_TIME_KEY = 'sleepTime' def __init__(self): self.__activityStart = utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY)",
"milliseconds == 0: milliseconds = 604800000 # 1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY,",
"try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except Exception as e: Exceptions.error(Exception('Sleep not",
"Exception as e: Exceptions.error(Exception('Deepsleep not available: ' + str(e))) def",
"P23 if isinstance(pin, list): self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH,",
"- machine.remaining_sleep_time() pycom.nvs_set(Sleep.INACTIVE_TIME_KEY, pycom.nvs_get(Sleep.INACTIVE_TIME_KEY) + sleptTime) self.__activeTime = pycom.nvs_get(Sleep.ACTIVE_TIME_KEY) self.__inactiveTime",
"import utime from exceptions import Exceptions class Sleep: @property def",
"addWakeUpPin(self, pin): # P2, P3, P4, P6, P8 to P10",
"== 0: milliseconds = 604800000 # 1 week pycom.nvs_set(Sleep.SLEEP_TIME_KEY, milliseconds)",
"self.__activityStart) @property def inactiveTime(self): return self.__inactiveTime ACTIVE_TIME_KEY = 'activeTime' INACTIVE_TIME_KEY",
"@property def isSleepWake(self): return self.pinWake or self.RTCWake or self.ULPWake @property",
"self.__wakeUpPins.extend(pin) else: self.__wakeUpPins.append(pin) try: machine.pin_sleep_wakeup(self.__wakeUpPins, mode=machine.WAKEUP_ANY_HIGH, enable_pull=True) except Exception as",
"e: Exceptions.error(Exception('Deepsleep not available: ' + str(e))) def delay(self, milliseconds):",
"machine.ULP_WAKE @property def isSleepWake(self): return self.pinWake or self.RTCWake or self.ULPWake",
"utime.ticks_ms() self.__initPersistentVariable(Sleep.ACTIVE_TIME_KEY) self.__initPersistentVariable(Sleep.INACTIVE_TIME_KEY) if not self.powerOnWake: sleptTime = pycom.nvs_get(Sleep.SLEEP_TIME_KEY) -",
"return self.__inactiveTime ACTIVE_TIME_KEY = 'activeTime' INACTIVE_TIME_KEY = 'inactiveTime' SLEEP_TIME_KEY =",
"import Exceptions class Sleep: @property def wakeReason(self): return machine.wake_reason()[0] @property",
"machine.RTC_WAKE @property def ULPWake(self): return self.wakeReason == machine.ULP_WAKE @property def",
"from exceptions import Exceptions class Sleep: @property def wakeReason(self): return"
] |
[
"or q_p_id not in question_scores[doc_pass_id]: p_count += 1 u_count +=",
"args.output_path question_scores = load_predictions(input_path) with open(output_path, 'w') as f: json.dump(question_scores,",
"return question_scores def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_path', required=True)",
"if doc_pass_id not in question_scores or q_p_id not in question_scores[doc_pass_id]:",
"input_path = args.input_path output_path = args.output_path question_scores = load_predictions(input_path) with",
"in os.listdir(input_path): if file_name.endswith('.pt'): preds = torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds) question_scores",
"# score = prediction['pos_score'] score = prediction['pos_score'] - prediction['neg_score'] if",
"prediction['id'] q_p_id = prediction['question_id'] # score = prediction['pos_score'] score =",
"doc_pass_id not in question_scores or q_p_id not in question_scores[doc_pass_id]: p_count",
"= args.output_path question_scores = load_predictions(input_path) with open(output_path, 'w') as f:",
"+= 1 question_scores[doc_pass_id][q_p_id] = score print(f'{p_count} unique predictions') print(f'{u_count} total",
"question_scores = load_predictions(input_path) with open(output_path, 'w') as f: json.dump(question_scores, f)",
"for file_name in os.listdir(input_path): if file_name.endswith('.pt'): preds = torch.load(os.path.join(input_path, file_name))",
"q_p_id not in question_scores[doc_pass_id]: p_count += 1 u_count += 1",
"= argparse.ArgumentParser() parser.add_argument('-i', '--input_path', required=True) parser.add_argument('-o', '--output_path', required=True) args =",
"prediction in pred_list: doc_pass_id = prediction['id'] q_p_id = prediction['question_id'] #",
"u_count += 1 question_scores[doc_pass_id][q_p_id] = score print(f'{p_count} unique predictions') print(f'{u_count}",
"0 u_count = 0 for prediction in pred_list: doc_pass_id =",
"= torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds) question_scores = defaultdict(lambda: defaultdict(dict)) p_count =",
"open(output_path, 'w') as f: json.dump(question_scores, f) if __name__ == '__main__':",
"prediction['question_id'] # score = prediction['pos_score'] score = prediction['pos_score'] - prediction['neg_score']",
"import json def load_predictions(input_path): pred_list = [] for file_name in",
"import torch import argparse from collections import defaultdict import os",
"= prediction['pos_score'] score = prediction['pos_score'] - prediction['neg_score'] if doc_pass_id not",
"1 question_scores[doc_pass_id][q_p_id] = score print(f'{p_count} unique predictions') print(f'{u_count} total predictions')",
"torch import argparse from collections import defaultdict import os import",
"predictions') return question_scores def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_path',",
"import os import json def load_predictions(input_path): pred_list = [] for",
"os.listdir(input_path): if file_name.endswith('.pt'): preds = torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds) question_scores =",
"file_name.endswith('.pt'): preds = torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds) question_scores = defaultdict(lambda: defaultdict(dict))",
"total predictions') return question_scores def main(): parser = argparse.ArgumentParser() parser.add_argument('-i',",
"[] for file_name in os.listdir(input_path): if file_name.endswith('.pt'): preds = torch.load(os.path.join(input_path,",
"= parser.parse_args() input_path = args.input_path output_path = args.output_path question_scores =",
"defaultdict(dict)) p_count = 0 u_count = 0 for prediction in",
"q_p_id = prediction['question_id'] # score = prediction['pos_score'] score = prediction['pos_score']",
"<reponame>Supermaxman/pytorch-gleam<gh_stars>0 import torch import argparse from collections import defaultdict import",
"= prediction['question_id'] # score = prediction['pos_score'] score = prediction['pos_score'] -",
"def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_path', required=True) parser.add_argument('-o', '--output_path',",
"load_predictions(input_path): pred_list = [] for file_name in os.listdir(input_path): if file_name.endswith('.pt'):",
"prediction['pos_score'] - prediction['neg_score'] if doc_pass_id not in question_scores or q_p_id",
"'--output_path', required=True) args = parser.parse_args() input_path = args.input_path output_path =",
"os import json def load_predictions(input_path): pred_list = [] for file_name",
"pred_list = [] for file_name in os.listdir(input_path): if file_name.endswith('.pt'): preds",
"u_count = 0 for prediction in pred_list: doc_pass_id = prediction['id']",
"print(f'{u_count} total predictions') return question_scores def main(): parser = argparse.ArgumentParser()",
"unique predictions') print(f'{u_count} total predictions') return question_scores def main(): parser",
"defaultdict import os import json def load_predictions(input_path): pred_list = []",
"= [] for file_name in os.listdir(input_path): if file_name.endswith('.pt'): preds =",
"args = parser.parse_args() input_path = args.input_path output_path = args.output_path question_scores",
"in question_scores or q_p_id not in question_scores[doc_pass_id]: p_count += 1",
"in question_scores[doc_pass_id]: p_count += 1 u_count += 1 question_scores[doc_pass_id][q_p_id] =",
"parser.parse_args() input_path = args.input_path output_path = args.output_path question_scores = load_predictions(input_path)",
"score print(f'{p_count} unique predictions') print(f'{u_count} total predictions') return question_scores def",
"p_count = 0 u_count = 0 for prediction in pred_list:",
"from collections import defaultdict import os import json def load_predictions(input_path):",
"pred_list.extend(preds) question_scores = defaultdict(lambda: defaultdict(dict)) p_count = 0 u_count =",
"json def load_predictions(input_path): pred_list = [] for file_name in os.listdir(input_path):",
"question_scores = defaultdict(lambda: defaultdict(dict)) p_count = 0 u_count = 0",
"question_scores[doc_pass_id][q_p_id] = score print(f'{p_count} unique predictions') print(f'{u_count} total predictions') return",
"parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_path', required=True) parser.add_argument('-o', '--output_path', required=True) args",
"import defaultdict import os import json def load_predictions(input_path): pred_list =",
"0 for prediction in pred_list: doc_pass_id = prediction['id'] q_p_id =",
"with open(output_path, 'w') as f: json.dump(question_scores, f) if __name__ ==",
"for prediction in pred_list: doc_pass_id = prediction['id'] q_p_id = prediction['question_id']",
"score = prediction['pos_score'] - prediction['neg_score'] if doc_pass_id not in question_scores",
"file_name in os.listdir(input_path): if file_name.endswith('.pt'): preds = torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds)",
"required=True) parser.add_argument('-o', '--output_path', required=True) args = parser.parse_args() input_path = args.input_path",
"main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_path', required=True) parser.add_argument('-o', '--output_path', required=True)",
"+= 1 u_count += 1 question_scores[doc_pass_id][q_p_id] = score print(f'{p_count} unique",
"= load_predictions(input_path) with open(output_path, 'w') as f: json.dump(question_scores, f) if",
"1 u_count += 1 question_scores[doc_pass_id][q_p_id] = score print(f'{p_count} unique predictions')",
"collections import defaultdict import os import json def load_predictions(input_path): pred_list",
"= prediction['id'] q_p_id = prediction['question_id'] # score = prediction['pos_score'] score",
"= prediction['pos_score'] - prediction['neg_score'] if doc_pass_id not in question_scores or",
"= 0 for prediction in pred_list: doc_pass_id = prediction['id'] q_p_id",
"p_count += 1 u_count += 1 question_scores[doc_pass_id][q_p_id] = score print(f'{p_count}",
"parser.add_argument('-i', '--input_path', required=True) parser.add_argument('-o', '--output_path', required=True) args = parser.parse_args() input_path",
"preds = torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds) question_scores = defaultdict(lambda: defaultdict(dict)) p_count",
"args.input_path output_path = args.output_path question_scores = load_predictions(input_path) with open(output_path, 'w')",
"argparse from collections import defaultdict import os import json def",
"output_path = args.output_path question_scores = load_predictions(input_path) with open(output_path, 'w') as",
"parser.add_argument('-o', '--output_path', required=True) args = parser.parse_args() input_path = args.input_path output_path",
"predictions') print(f'{u_count} total predictions') return question_scores def main(): parser =",
"= 0 u_count = 0 for prediction in pred_list: doc_pass_id",
"file_name)) pred_list.extend(preds) question_scores = defaultdict(lambda: defaultdict(dict)) p_count = 0 u_count",
"question_scores def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_path', required=True) parser.add_argument('-o',",
"'--input_path', required=True) parser.add_argument('-o', '--output_path', required=True) args = parser.parse_args() input_path =",
"question_scores or q_p_id not in question_scores[doc_pass_id]: p_count += 1 u_count",
"score = prediction['pos_score'] score = prediction['pos_score'] - prediction['neg_score'] if doc_pass_id",
"prediction['neg_score'] if doc_pass_id not in question_scores or q_p_id not in",
"load_predictions(input_path) with open(output_path, 'w') as f: json.dump(question_scores, f) if __name__",
"torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds) question_scores = defaultdict(lambda: defaultdict(dict)) p_count = 0",
"pred_list: doc_pass_id = prediction['id'] q_p_id = prediction['question_id'] # score =",
"not in question_scores[doc_pass_id]: p_count += 1 u_count += 1 question_scores[doc_pass_id][q_p_id]",
"question_scores[doc_pass_id]: p_count += 1 u_count += 1 question_scores[doc_pass_id][q_p_id] = score",
"- prediction['neg_score'] if doc_pass_id not in question_scores or q_p_id not",
"'w') as f: json.dump(question_scores, f) if __name__ == '__main__': main()",
"defaultdict(lambda: defaultdict(dict)) p_count = 0 u_count = 0 for prediction",
"print(f'{p_count} unique predictions') print(f'{u_count} total predictions') return question_scores def main():",
"in pred_list: doc_pass_id = prediction['id'] q_p_id = prediction['question_id'] # score",
"not in question_scores or q_p_id not in question_scores[doc_pass_id]: p_count +=",
"def load_predictions(input_path): pred_list = [] for file_name in os.listdir(input_path): if",
"= args.input_path output_path = args.output_path question_scores = load_predictions(input_path) with open(output_path,",
"= defaultdict(lambda: defaultdict(dict)) p_count = 0 u_count = 0 for",
"doc_pass_id = prediction['id'] q_p_id = prediction['question_id'] # score = prediction['pos_score']",
"prediction['pos_score'] score = prediction['pos_score'] - prediction['neg_score'] if doc_pass_id not in",
"required=True) args = parser.parse_args() input_path = args.input_path output_path = args.output_path",
"if file_name.endswith('.pt'): preds = torch.load(os.path.join(input_path, file_name)) pred_list.extend(preds) question_scores = defaultdict(lambda:",
"argparse.ArgumentParser() parser.add_argument('-i', '--input_path', required=True) parser.add_argument('-o', '--output_path', required=True) args = parser.parse_args()",
"= score print(f'{p_count} unique predictions') print(f'{u_count} total predictions') return question_scores",
"import argparse from collections import defaultdict import os import json"
] |
[
"= int(input('Qual o valor da casa? ')) sal = int(input('Qual",
"? ')) parcela = casa/prazo margem = sal* (30/100) if",
"(30/100) if parcela > margem: print('Este negocio não foi aprovado,",
"a parcela é de R$ {} e voce pode pagar",
"sal* (30/100) if parcela > margem: print('Este negocio não foi",
"else: print(\"Negocio aprovado pois a parcela é de R$ {}",
"int(input('Qual seu salario? ')) prazo = int(input('Quantos meses deseja pagar",
"casa = int(input('Qual o valor da casa? ')) sal =",
"= int(input('Qual seu salario? ')) prazo = int(input('Quantos meses deseja",
"parcela é de R$ {} e voce pode pagar R$",
"de R$ {} e voce pode pagar R$ {} mensais\".format(parcela,margem))",
"pois a parcela é de R$ {} e voce pode",
"casa/prazo margem = sal* (30/100) if parcela > margem: print('Este",
"print('Este negocio não foi aprovado, aumente o prazo .') else:",
"parcela > margem: print('Este negocio não foi aprovado, aumente o",
"casa? ')) sal = int(input('Qual seu salario? ')) prazo =",
".') else: print(\"Negocio aprovado pois a parcela é de R$",
"margem = sal* (30/100) if parcela > margem: print('Este negocio",
"print(\"Negocio aprovado pois a parcela é de R$ {} e",
"int(input('Quantos meses deseja pagar ? ')) parcela = casa/prazo margem",
"não foi aprovado, aumente o prazo .') else: print(\"Negocio aprovado",
"')) prazo = int(input('Quantos meses deseja pagar ? ')) parcela",
"seu salario? ')) prazo = int(input('Quantos meses deseja pagar ?",
"')) parcela = casa/prazo margem = sal* (30/100) if parcela",
"deseja pagar ? ')) parcela = casa/prazo margem = sal*",
"parcela = casa/prazo margem = sal* (30/100) if parcela >",
"= int(input('Quantos meses deseja pagar ? ')) parcela = casa/prazo",
"salario? ')) prazo = int(input('Quantos meses deseja pagar ? '))",
"if parcela > margem: print('Este negocio não foi aprovado, aumente",
"aprovado pois a parcela é de R$ {} e voce",
"')) sal = int(input('Qual seu salario? ')) prazo = int(input('Quantos",
"o valor da casa? ')) sal = int(input('Qual seu salario?",
"meses deseja pagar ? ')) parcela = casa/prazo margem =",
"pagar ? ')) parcela = casa/prazo margem = sal* (30/100)",
"da casa? ')) sal = int(input('Qual seu salario? ')) prazo",
"int(input('Qual o valor da casa? ')) sal = int(input('Qual seu",
"= sal* (30/100) if parcela > margem: print('Este negocio não",
"> margem: print('Este negocio não foi aprovado, aumente o prazo",
"o prazo .') else: print(\"Negocio aprovado pois a parcela é",
"aprovado, aumente o prazo .') else: print(\"Negocio aprovado pois a",
"prazo .') else: print(\"Negocio aprovado pois a parcela é de",
"negocio não foi aprovado, aumente o prazo .') else: print(\"Negocio",
"é de R$ {} e voce pode pagar R$ {}",
"= casa/prazo margem = sal* (30/100) if parcela > margem:",
"aumente o prazo .') else: print(\"Negocio aprovado pois a parcela",
"sal = int(input('Qual seu salario? ')) prazo = int(input('Quantos meses",
"margem: print('Este negocio não foi aprovado, aumente o prazo .')",
"prazo = int(input('Quantos meses deseja pagar ? ')) parcela =",
"valor da casa? ')) sal = int(input('Qual seu salario? '))",
"foi aprovado, aumente o prazo .') else: print(\"Negocio aprovado pois"
] |
[
"upload_to='photos/company/roadmap')), ], options={ 'verbose_name': 'roadmap', 'verbose_name_plural': 'roadmaps', 'ordering': ('company_name',), },",
"3.1.7 on 2021-03-27 18:22 from django.db import migrations, models class",
"import migrations, models class Migration(migrations.Migration): dependencies = [ ('HackBitApp', '0002_company_photo'),",
"18:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"serialize=False, verbose_name='ID')), ('company_name', models.CharField(db_index=True, max_length=200, unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True,",
"fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('company_name', models.CharField(db_index=True, max_length=200, unique=True)),",
"class Migration(migrations.Migration): dependencies = [ ('HackBitApp', '0002_company_photo'), ] operations =",
"name='Roadmap', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('company_name', models.CharField(db_index=True, max_length=200,",
"models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ], options={ 'verbose_name': 'roadmap', 'verbose_name_plural':",
"models.ImageField(blank=True, upload_to='photos/company/roadmap')), ], options={ 'verbose_name': 'roadmap', 'verbose_name_plural': 'roadmaps', 'ordering': ('company_name',),",
"('HackBitApp', '0002_company_photo'), ] operations = [ migrations.CreateModel( name='Roadmap', fields=[ ('id',",
"operations = [ migrations.CreateModel( name='Roadmap', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,",
"'0002_company_photo'), ] operations = [ migrations.CreateModel( name='Roadmap', fields=[ ('id', models.AutoField(auto_created=True,",
"] operations = [ migrations.CreateModel( name='Roadmap', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,",
"upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ], options={ 'verbose_name': 'roadmap', 'verbose_name_plural': 'roadmaps',",
"Django 3.1.7 on 2021-03-27 18:22 from django.db import migrations, models",
"options={ 'verbose_name': 'roadmap', 'verbose_name_plural': 'roadmaps', 'ordering': ('company_name',), }, ), ]",
"models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('company_name', models.CharField(db_index=True, max_length=200, unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')),",
"models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ], options={ 'verbose_name':",
"2021-03-27 18:22 from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"verbose_name='ID')), ('company_name', models.CharField(db_index=True, max_length=200, unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')),",
"[ migrations.CreateModel( name='Roadmap', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('company_name',",
"# Generated by Django 3.1.7 on 2021-03-27 18:22 from django.db",
"models.CharField(db_index=True, max_length=200, unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True,",
"= [ migrations.CreateModel( name='Roadmap', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),",
"Migration(migrations.Migration): dependencies = [ ('HackBitApp', '0002_company_photo'), ] operations = [",
"], options={ 'verbose_name': 'roadmap', 'verbose_name_plural': 'roadmaps', 'ordering': ('company_name',), }, ),",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [",
"Generated by Django 3.1.7 on 2021-03-27 18:22 from django.db import",
"models class Migration(migrations.Migration): dependencies = [ ('HackBitApp', '0002_company_photo'), ] operations",
"by Django 3.1.7 on 2021-03-27 18:22 from django.db import migrations,",
"max_length=200, unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')),",
"unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ],",
"= [ ('HackBitApp', '0002_company_photo'), ] operations = [ migrations.CreateModel( name='Roadmap',",
"migrations, models class Migration(migrations.Migration): dependencies = [ ('HackBitApp', '0002_company_photo'), ]",
"[ ('HackBitApp', '0002_company_photo'), ] operations = [ migrations.CreateModel( name='Roadmap', fields=[",
"primary_key=True, serialize=False, verbose_name='ID')), ('company_name', models.CharField(db_index=True, max_length=200, unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2',",
"('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('company_name', models.CharField(db_index=True, max_length=200, unique=True)), ('photo1',",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('HackBitApp',",
"on 2021-03-27 18:22 from django.db import migrations, models class Migration(migrations.Migration):",
"('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ], options={ 'verbose_name': 'roadmap',",
"('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ], options={ 'verbose_name': 'roadmap', 'verbose_name_plural': 'roadmaps', 'ordering':",
"dependencies = [ ('HackBitApp', '0002_company_photo'), ] operations = [ migrations.CreateModel(",
"migrations.CreateModel( name='Roadmap', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('company_name', models.CharField(db_index=True,",
"('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ], options={",
"('company_name', models.CharField(db_index=True, max_length=200, unique=True)), ('photo1', models.ImageField(upload_to='photos/company/roadmap')), ('photo2', models.ImageField(blank=True, upload_to='photos/company/roadmap')), ('photo3',"
] |
[
"np #%% Kernel operations # Returns the norm of the",
"* inner_matrix + norm_square_1 + np.transpose(norm_square_2) return norm_diff # Returns",
"\"\"\" import autograd.numpy as np #%% Kernel operations # Returns",
"norm_diff # Returns the pairwise inner product def inner_matrix(matrix_1, matrix_2):",
"# Returns the norm of the pairwise difference def norm_matrix(matrix_1,",
"= -2 * inner_matrix + norm_square_1 + np.transpose(norm_square_2) return norm_diff",
"+ np.transpose(norm_square_2) return norm_diff # Returns the pairwise inner product",
"matrix_2): norm_square_1 = np.sum(np.square(matrix_1), axis = 1) norm_square_1 = np.reshape(norm_square_1,",
"# Returns the pairwise inner product def inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape",
"d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff = -2 *",
"return np.matmul(matrix_1, np.transpose(matrix_2)) if __name__ == '__main__': print('This is the",
"= 1) norm_square_1 = np.reshape(norm_square_1, (-1,1)) norm_square_2 = np.sum(np.square(matrix_2), axis",
"the pairwise difference def norm_matrix(matrix_1, matrix_2): norm_square_1 = np.sum(np.square(matrix_1), axis",
"d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff =",
"Returns the norm of the pairwise difference def norm_matrix(matrix_1, matrix_2):",
"norm_square_2 = np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix",
"def inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1,",
"np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix = np.matmul(matrix_1,",
"+ norm_square_1 + np.transpose(norm_square_2) return norm_diff # Returns the pairwise",
"Wed Jul 22 14:36:48 2020 @author: matth \"\"\" import autograd.numpy",
"1) norm_square_2 = np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1)",
"= np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff = -2 * inner_matrix + norm_square_1",
"matth \"\"\" import autograd.numpy as np #%% Kernel operations #",
"if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1, np.transpose(matrix_2)) if __name__ == '__main__':",
"def norm_matrix(matrix_1, matrix_2): norm_square_1 = np.sum(np.square(matrix_1), axis = 1) norm_square_1",
"np.matmul(matrix_1, np.transpose(matrix_2)) if __name__ == '__main__': print('This is the matrix",
"np.reshape(norm_square_1, (-1,1)) norm_square_2 = np.sum(np.square(matrix_2), axis = 1) norm_square_2 =",
"d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff",
"Created on Wed Jul 22 14:36:48 2020 @author: matth \"\"\"",
"np.transpose(norm_square_2) return norm_diff # Returns the pairwise inner product def",
"\"\"\" Created on Wed Jul 22 14:36:48 2020 @author: matth",
"if __name__ == '__main__': print('This is the matrix operations file')",
"np.transpose(matrix_2)) if __name__ == '__main__': print('This is the matrix operations",
"= np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix =",
"if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff = -2",
"as np #%% Kernel operations # Returns the norm of",
"pairwise difference def norm_matrix(matrix_1, matrix_2): norm_square_1 = np.sum(np.square(matrix_1), axis =",
"-*- \"\"\" Created on Wed Jul 22 14:36:48 2020 @author:",
"inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1, np.transpose(matrix_2))",
"inner_matrix + norm_square_1 + np.transpose(norm_square_2) return norm_diff # Returns the",
"(-1,1)) norm_square_2 = np.sum(np.square(matrix_2), axis = 1) norm_square_2 = np.reshape(norm_square_2,",
"np.sum(np.square(matrix_2), axis = 1) norm_square_2 = np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape d2=matrix_2.shape",
"Returns the pairwise inner product def inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape d2=matrix_2.shape",
"axis = 1) norm_square_1 = np.reshape(norm_square_1, (-1,1)) norm_square_2 = np.sum(np.square(matrix_2),",
"-*- coding: utf-8 -*- \"\"\" Created on Wed Jul 22",
"norm_matrix(matrix_1, matrix_2): norm_square_1 = np.sum(np.square(matrix_1), axis = 1) norm_square_1 =",
"operations # Returns the norm of the pairwise difference def",
"autograd.numpy as np #%% Kernel operations # Returns the norm",
"on Wed Jul 22 14:36:48 2020 @author: matth \"\"\" import",
"the norm of the pairwise difference def norm_matrix(matrix_1, matrix_2): norm_square_1",
"@author: matth \"\"\" import autograd.numpy as np #%% Kernel operations",
"-2 * inner_matrix + norm_square_1 + np.transpose(norm_square_2) return norm_diff #",
"<reponame>Romit-Maulik/Tutorials-Demos-Practice<filename>Other_Python/Kernel_Methods/matrix_operations.py # -*- coding: utf-8 -*- \"\"\" Created on Wed",
"# -*- coding: utf-8 -*- \"\"\" Created on Wed Jul",
"matrix_1=np.transpose(matrix_1) inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff = -2 * inner_matrix",
"inner product def inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1)",
"d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1, np.transpose(matrix_2)) if __name__ ==",
"Jul 22 14:36:48 2020 @author: matth \"\"\" import autograd.numpy as",
"pairwise inner product def inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]:",
"norm_diff = -2 * inner_matrix + norm_square_1 + np.transpose(norm_square_2) return",
"axis = 1) norm_square_2 = np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape d2=matrix_2.shape if",
"Kernel operations # Returns the norm of the pairwise difference",
"= np.sum(np.square(matrix_2), axis = 1) norm_square_2 = np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape",
"coding: utf-8 -*- \"\"\" Created on Wed Jul 22 14:36:48",
"utf-8 -*- \"\"\" Created on Wed Jul 22 14:36:48 2020",
"np.sum(np.square(matrix_1), axis = 1) norm_square_1 = np.reshape(norm_square_1, (-1,1)) norm_square_2 =",
"= 1) norm_square_2 = np.reshape(norm_square_2, (-1,1)) d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]:",
"2020 @author: matth \"\"\" import autograd.numpy as np #%% Kernel",
"14:36:48 2020 @author: matth \"\"\" import autograd.numpy as np #%%",
"the pairwise inner product def inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape d2=matrix_2.shape if",
"d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1, np.transpose(matrix_2)) if __name__ == '__main__': print('This",
"norm_square_1 + np.transpose(norm_square_2) return norm_diff # Returns the pairwise inner",
"#%% Kernel operations # Returns the norm of the pairwise",
"1) norm_square_1 = np.reshape(norm_square_1, (-1,1)) norm_square_2 = np.sum(np.square(matrix_2), axis =",
"22 14:36:48 2020 @author: matth \"\"\" import autograd.numpy as np",
"norm_square_1 = np.sum(np.square(matrix_1), axis = 1) norm_square_1 = np.reshape(norm_square_1, (-1,1))",
"import autograd.numpy as np #%% Kernel operations # Returns the",
"np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff = -2 * inner_matrix + norm_square_1 +",
"= np.reshape(norm_square_1, (-1,1)) norm_square_2 = np.sum(np.square(matrix_2), axis = 1) norm_square_2",
"of the pairwise difference def norm_matrix(matrix_1, matrix_2): norm_square_1 = np.sum(np.square(matrix_1),",
"inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2)) norm_diff = -2 * inner_matrix +",
"norm_square_1 = np.reshape(norm_square_1, (-1,1)) norm_square_2 = np.sum(np.square(matrix_2), axis = 1)",
"difference def norm_matrix(matrix_1, matrix_2): norm_square_1 = np.sum(np.square(matrix_1), axis = 1)",
"= np.sum(np.square(matrix_1), axis = 1) norm_square_1 = np.reshape(norm_square_1, (-1,1)) norm_square_2",
"(-1,1)) d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) inner_matrix = np.matmul(matrix_1, np.transpose(matrix_2))",
"np.transpose(matrix_2)) norm_diff = -2 * inner_matrix + norm_square_1 + np.transpose(norm_square_2)",
"product def inner_matrix(matrix_1, matrix_2): d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return",
"return norm_diff # Returns the pairwise inner product def inner_matrix(matrix_1,",
"norm of the pairwise difference def norm_matrix(matrix_1, matrix_2): norm_square_1 =",
"matrix_2): d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1, np.transpose(matrix_2)) if",
"matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1, np.transpose(matrix_2)) if __name__ == '__main__': print('This is",
"norm_square_2 = np.sum(np.square(matrix_2), axis = 1) norm_square_2 = np.reshape(norm_square_2, (-1,1))",
"d1=matrix_1.shape d2=matrix_2.shape if d1[1]!=d2[1]: matrix_1=np.transpose(matrix_1) return np.matmul(matrix_1, np.transpose(matrix_2)) if __name__"
] |
[
"request.GET: if code is None: code = 302 if code",
"in request.GET: if code is None: code = 302 if",
"we're in a preflight and it's asked if b'preflight' in",
"#Preflight if b'headers' in request.GET: response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers')) if b'credentials' in",
"in request.GET: response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers')) if b'credentials' in request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials'))",
"request.GET.first(b'location')) headers = {} for name, values in request.headers.items(): if",
"u\"1\" else: body = u\"0\" return [(b\"Content-Type\", b\"text/plain\")], body if",
"is None: code = 302 if code >= 300 and",
"= values headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b'')) body = json.dumps(headers) if",
"#Log that the preflight actually happened if we have an",
"request.GET: token = request.GET.first(b\"token\") value = request.server.stash.take(token) if value is",
"request.GET: request.server.stash.put(request.GET[b'token'], True) if b'location' in request.GET: if code is",
"= None if request.method == u'OPTIONS': #Override the response code",
"headers[isomorphic_decode(name)] = isomorphic_decode(values[0]) else: #I have no idea, really headers[name]",
"else: #I have no idea, really headers[name] = values headers[u'get_value']",
"body if origin != b'none': response.headers.set(b\"Access-Control-Allow-Origin\", origin) if b'origin2' in",
"or b'none') if b\"check\" in request.GET: token = request.GET.first(b\"token\") value",
"request.method == u'OPTIONS': #Override the response code if we're in",
"300 and code < 400: response.headers.set(b\"Location\", request.GET.first(b'location')) headers = {}",
"else: body = u\"0\" return [(b\"Content-Type\", b\"text/plain\")], body if origin",
"#Override the response code if we're in a preflight and",
"that the preflight actually happened if we have an ident",
"== u'OPTIONS': #Override the response code if we're in a",
"code if we're in a preflight and it's asked if",
"302 if code >= 300 and code < 400: response.headers.set(b\"Location\",",
"b'origin2' in request.GET: response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2')) #Preflight if b'headers' in request.GET:",
"actually happened if we have an ident if b'token' in",
"code = int(code_raw) else: code = None if request.method ==",
"request.GET: code = int(request.GET.first(b'preflight')) #Log that the preflight actually happened",
"= json.dumps(headers) if code: return (code, b\"StatusText\"), [], body else:",
"request.GET.first(b'code', None) if code_raw: code = int(code_raw) else: code =",
"json from wptserve.utils import isomorphic_decode def main(request, response): origin =",
"else: code = None if request.method == u'OPTIONS': #Override the",
"!= b'none': response.headers.set(b\"Access-Control-Allow-Origin\", origin) if b'origin2' in request.GET: response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2'))",
"body = u\"0\" return [(b\"Content-Type\", b\"text/plain\")], body if origin !=",
"b'none') if b\"check\" in request.GET: token = request.GET.first(b\"token\") value =",
"= request.GET.first(b\"token\") value = request.server.stash.take(token) if value is not None:",
"u\"0\" return [(b\"Content-Type\", b\"text/plain\")], body if origin != b'none': response.headers.set(b\"Access-Control-Allow-Origin\",",
"in request.GET: code = int(request.GET.first(b'preflight')) #Log that the preflight actually",
"for name, values in request.headers.items(): if len(values) == 1: headers[isomorphic_decode(name)]",
"if b'token' in request.GET: request.server.stash.put(request.GET[b'token'], True) if b'location' in request.GET:",
"response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2')) #Preflight if b'headers' in request.GET: response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers')) if",
"import isomorphic_decode def main(request, response): origin = request.GET.first(b\"origin\", request.headers.get(b'origin') or",
"isomorphic_decode def main(request, response): origin = request.GET.first(b\"origin\", request.headers.get(b'origin') or b'none')",
"code_raw: code = int(code_raw) else: code = None if request.method",
"request.GET.first(b'headers')) if b'credentials' in request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials')) if b'methods' in",
"= u\"1\" else: body = u\"0\" return [(b\"Content-Type\", b\"text/plain\")], body",
"< 400: response.headers.set(b\"Location\", request.GET.first(b'location')) headers = {} for name, values",
"body = json.dumps(headers) if code: return (code, b\"StatusText\"), [], body",
">= 300 and code < 400: response.headers.set(b\"Location\", request.GET.first(b'location')) headers =",
"if we're in a preflight and it's asked if b'preflight'",
"= request.GET.first(b'code', None) if code_raw: code = int(code_raw) else: code",
"== b\"keep\": request.server.stash.put(token, value) body = u\"1\" else: body =",
"wptserve.utils import isomorphic_decode def main(request, response): origin = request.GET.first(b\"origin\", request.headers.get(b'origin')",
"value) body = u\"1\" else: body = u\"0\" return [(b\"Content-Type\",",
"the preflight actually happened if we have an ident if",
"if value is not None: if request.GET.first(b\"check\", None) == b\"keep\":",
"in request.GET: request.server.stash.put(request.GET[b'token'], True) if b'location' in request.GET: if code",
"if code >= 300 and code < 400: response.headers.set(b\"Location\", request.GET.first(b'location'))",
"we have an ident if b'token' in request.GET: request.server.stash.put(request.GET[b'token'], True)",
"= 302 if code >= 300 and code < 400:",
"None) if code_raw: code = int(code_raw) else: code = None",
"if code: return (code, b\"StatusText\"), [], body else: return body",
"not None: if request.GET.first(b\"check\", None) == b\"keep\": request.server.stash.put(token, value) body",
"from wptserve.utils import isomorphic_decode def main(request, response): origin = request.GET.first(b\"origin\",",
"value is not None: if request.GET.first(b\"check\", None) == b\"keep\": request.server.stash.put(token,",
"response.headers.set(b\"Access-Control-Allow-Origin\", origin) if b'origin2' in request.GET: response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2')) #Preflight if",
"asked if b'preflight' in request.GET: code = int(request.GET.first(b'preflight')) #Log that",
"b'preflight' in request.GET: code = int(request.GET.first(b'preflight')) #Log that the preflight",
"is not None: if request.GET.first(b\"check\", None) == b\"keep\": request.server.stash.put(token, value)",
"b'location' in request.GET: if code is None: code = 302",
"preflight actually happened if we have an ident if b'token'",
"preflight and it's asked if b'preflight' in request.GET: code =",
"if b'preflight' in request.GET: code = int(request.GET.first(b'preflight')) #Log that the",
"1: headers[isomorphic_decode(name)] = isomorphic_decode(values[0]) else: #I have no idea, really",
"= request.server.stash.take(token) if value is not None: if request.GET.first(b\"check\", None)",
"body = u\"1\" else: body = u\"0\" return [(b\"Content-Type\", b\"text/plain\")],",
"request.GET.first(b\"origin\", request.headers.get(b'origin') or b'none') if b\"check\" in request.GET: token =",
"ident if b'token' in request.GET: request.server.stash.put(request.GET[b'token'], True) if b'location' in",
"idea, really headers[name] = values headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b'')) body",
"values headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b'')) body = json.dumps(headers) if code:",
"code_raw = request.GET.first(b'code', None) if code_raw: code = int(code_raw) else:",
"request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials')) if b'methods' in request.GET: response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods')) code_raw",
"= {} for name, values in request.headers.items(): if len(values) ==",
"int(code_raw) else: code = None if request.method == u'OPTIONS': #Override",
"request.GET: response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods')) code_raw = request.GET.first(b'code', None) if code_raw: code",
"b'headers' in request.GET: response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers')) if b'credentials' in request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\",",
"== 1: headers[isomorphic_decode(name)] = isomorphic_decode(values[0]) else: #I have no idea,",
"= u\"0\" return [(b\"Content-Type\", b\"text/plain\")], body if origin != b'none':",
"#I have no idea, really headers[name] = values headers[u'get_value'] =",
"request.GET.first(b\"token\") value = request.server.stash.take(token) if value is not None: if",
"request.GET.first(b'credentials')) if b'methods' in request.GET: response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods')) code_raw = request.GET.first(b'code',",
"if request.method == u'OPTIONS': #Override the response code if we're",
"in request.GET: token = request.GET.first(b\"token\") value = request.server.stash.take(token) if value",
"request.headers.items(): if len(values) == 1: headers[isomorphic_decode(name)] = isomorphic_decode(values[0]) else: #I",
"in request.GET: response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2')) #Preflight if b'headers' in request.GET: response.headers.set(b\"Access-Control-Allow-Headers\",",
"if b'location' in request.GET: if code is None: code =",
"{} for name, values in request.headers.items(): if len(values) == 1:",
"b'methods' in request.GET: response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods')) code_raw = request.GET.first(b'code', None) if",
"isomorphic_decode(request.GET.first(b'get_value', b'')) body = json.dumps(headers) if code: return (code, b\"StatusText\"),",
"b\"keep\": request.server.stash.put(token, value) body = u\"1\" else: body = u\"0\"",
"code = None if request.method == u'OPTIONS': #Override the response",
"if code is None: code = 302 if code >=",
"request.GET: response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2')) #Preflight if b'headers' in request.GET: response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers'))",
"response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods')) code_raw = request.GET.first(b'code', None) if code_raw: code =",
"code = 302 if code >= 300 and code <",
"values in request.headers.items(): if len(values) == 1: headers[isomorphic_decode(name)] = isomorphic_decode(values[0])",
"len(values) == 1: headers[isomorphic_decode(name)] = isomorphic_decode(values[0]) else: #I have no",
"headers[name] = values headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b'')) body = json.dumps(headers)",
"request.GET.first(b'origin2')) #Preflight if b'headers' in request.GET: response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers')) if b'credentials'",
"json.dumps(headers) if code: return (code, b\"StatusText\"), [], body else: return",
"in request.headers.items(): if len(values) == 1: headers[isomorphic_decode(name)] = isomorphic_decode(values[0]) else:",
"import json from wptserve.utils import isomorphic_decode def main(request, response): origin",
"code = int(request.GET.first(b'preflight')) #Log that the preflight actually happened if",
"name, values in request.headers.items(): if len(values) == 1: headers[isomorphic_decode(name)] =",
"if len(values) == 1: headers[isomorphic_decode(name)] = isomorphic_decode(values[0]) else: #I have",
"request.GET.first(b\"check\", None) == b\"keep\": request.server.stash.put(token, value) body = u\"1\" else:",
"token = request.GET.first(b\"token\") value = request.server.stash.take(token) if value is not",
"happened if we have an ident if b'token' in request.GET:",
"= isomorphic_decode(request.GET.first(b'get_value', b'')) body = json.dumps(headers) if code: return (code,",
"in request.GET: response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods')) code_raw = request.GET.first(b'code', None) if code_raw:",
"request.GET: response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers')) if b'credentials' in request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials')) if",
"if code_raw: code = int(code_raw) else: code = None if",
"if we have an ident if b'token' in request.GET: request.server.stash.put(request.GET[b'token'],",
"None) == b\"keep\": request.server.stash.put(token, value) body = u\"1\" else: body",
"if request.GET.first(b\"check\", None) == b\"keep\": request.server.stash.put(token, value) body = u\"1\"",
"a preflight and it's asked if b'preflight' in request.GET: code",
"return [(b\"Content-Type\", b\"text/plain\")], body if origin != b'none': response.headers.set(b\"Access-Control-Allow-Origin\", origin)",
"response code if we're in a preflight and it's asked",
"b'')) body = json.dumps(headers) if code: return (code, b\"StatusText\"), [],",
"400: response.headers.set(b\"Location\", request.GET.first(b'location')) headers = {} for name, values in",
"an ident if b'token' in request.GET: request.server.stash.put(request.GET[b'token'], True) if b'location'",
"None: if request.GET.first(b\"check\", None) == b\"keep\": request.server.stash.put(token, value) body =",
"= isomorphic_decode(values[0]) else: #I have no idea, really headers[name] =",
"True) if b'location' in request.GET: if code is None: code",
"if b'credentials' in request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials')) if b'methods' in request.GET:",
"request.headers.get(b'origin') or b'none') if b\"check\" in request.GET: token = request.GET.first(b\"token\")",
"[(b\"Content-Type\", b\"text/plain\")], body if origin != b'none': response.headers.set(b\"Access-Control-Allow-Origin\", origin) if",
"response): origin = request.GET.first(b\"origin\", request.headers.get(b'origin') or b'none') if b\"check\" in",
"int(request.GET.first(b'preflight')) #Log that the preflight actually happened if we have",
"code < 400: response.headers.set(b\"Location\", request.GET.first(b'location')) headers = {} for name,",
"None if request.method == u'OPTIONS': #Override the response code if",
"request.server.stash.take(token) if value is not None: if request.GET.first(b\"check\", None) ==",
"origin != b'none': response.headers.set(b\"Access-Control-Allow-Origin\", origin) if b'origin2' in request.GET: response.headers.append(b\"Access-Control-Allow-Origin\",",
"response.headers.set(b\"Location\", request.GET.first(b'location')) headers = {} for name, values in request.headers.items():",
"if b'origin2' in request.GET: response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2')) #Preflight if b'headers' in",
"b'none': response.headers.set(b\"Access-Control-Allow-Origin\", origin) if b'origin2' in request.GET: response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2')) #Preflight",
"None: code = 302 if code >= 300 and code",
"and it's asked if b'preflight' in request.GET: code = int(request.GET.first(b'preflight'))",
"code >= 300 and code < 400: response.headers.set(b\"Location\", request.GET.first(b'location')) headers",
"in a preflight and it's asked if b'preflight' in request.GET:",
"the response code if we're in a preflight and it's",
"have an ident if b'token' in request.GET: request.server.stash.put(request.GET[b'token'], True) if",
"headers = {} for name, values in request.headers.items(): if len(values)",
"if b\"check\" in request.GET: token = request.GET.first(b\"token\") value = request.server.stash.take(token)",
"response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials')) if b'methods' in request.GET: response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods')) code_raw =",
"b'credentials' in request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials')) if b'methods' in request.GET: response.headers.set(b\"Access-Control-Allow-Methods\",",
"= int(code_raw) else: code = None if request.method == u'OPTIONS':",
"have no idea, really headers[name] = values headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value',",
"if b'methods' in request.GET: response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods')) code_raw = request.GET.first(b'code', None)",
"u'OPTIONS': #Override the response code if we're in a preflight",
"isomorphic_decode(values[0]) else: #I have no idea, really headers[name] = values",
"and code < 400: response.headers.set(b\"Location\", request.GET.first(b'location')) headers = {} for",
"no idea, really headers[name] = values headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b''))",
"request.server.stash.put(request.GET[b'token'], True) if b'location' in request.GET: if code is None:",
"main(request, response): origin = request.GET.first(b\"origin\", request.headers.get(b'origin') or b'none') if b\"check\"",
"request.GET.first(b'methods')) code_raw = request.GET.first(b'code', None) if code_raw: code = int(code_raw)",
"it's asked if b'preflight' in request.GET: code = int(request.GET.first(b'preflight')) #Log",
"origin) if b'origin2' in request.GET: response.headers.append(b\"Access-Control-Allow-Origin\", request.GET.first(b'origin2')) #Preflight if b'headers'",
"b'token' in request.GET: request.server.stash.put(request.GET[b'token'], True) if b'location' in request.GET: if",
"= request.GET.first(b\"origin\", request.headers.get(b'origin') or b'none') if b\"check\" in request.GET: token",
"b\"check\" in request.GET: token = request.GET.first(b\"token\") value = request.server.stash.take(token) if",
"in request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials')) if b'methods' in request.GET: response.headers.set(b\"Access-Control-Allow-Methods\", request.GET.first(b'methods'))",
"= int(request.GET.first(b'preflight')) #Log that the preflight actually happened if we",
"request.server.stash.put(token, value) body = u\"1\" else: body = u\"0\" return",
"headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b'')) body = json.dumps(headers) if code: return",
"value = request.server.stash.take(token) if value is not None: if request.GET.first(b\"check\",",
"if origin != b'none': response.headers.set(b\"Access-Control-Allow-Origin\", origin) if b'origin2' in request.GET:",
"response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers')) if b'credentials' in request.GET: response.headers.set(b\"Access-Control-Allow-Credentials\", request.GET.first(b'credentials')) if b'methods'",
"def main(request, response): origin = request.GET.first(b\"origin\", request.headers.get(b'origin') or b'none') if",
"b\"text/plain\")], body if origin != b'none': response.headers.set(b\"Access-Control-Allow-Origin\", origin) if b'origin2'",
"if b'headers' in request.GET: response.headers.set(b\"Access-Control-Allow-Headers\", request.GET.first(b'headers')) if b'credentials' in request.GET:",
"code is None: code = 302 if code >= 300",
"origin = request.GET.first(b\"origin\", request.headers.get(b'origin') or b'none') if b\"check\" in request.GET:",
"really headers[name] = values headers[u'get_value'] = isomorphic_decode(request.GET.first(b'get_value', b'')) body ="
] |
[
".device import DeviceObj # OSC Grid Object class OSCGrid(DeviceObj): def",
"__init__(self, name, width, height, ip, port, bri=1): DeviceObj.__init__(self, name, \"osc_grid\",",
"port, bri=1): DeviceObj.__init__(self, name, \"osc_grid\", width, height) self.buffer = []",
"ip, port, bri=1): DeviceObj.__init__(self, name, \"osc_grid\", width, height) self.buffer =",
"g, b, x, y) # Set Pixel builder = osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x,",
"= [] self.brightness = bri self.osc = udp_client.SimpleUDPClient(ip, port) def",
"\"osc_grid\", width, height) self.buffer = [] self.brightness = bri self.osc",
"b, x, y) # Set Pixel builder = osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y))",
"DeviceObj.show(self) # Update Display bundle = osc_bundle_builder.OscBundleBuilder(0) for m in",
"DeviceObj.set(self, r, g, b, x, y) # Set Pixel builder",
"width, height) self.buffer = [] self.brightness = bri self.osc =",
"import osc_bundle_builder from pythonosc import osc_message_builder from pythonosc import udp_client",
"self.buffer = [] self.brightness = bri self.osc = udp_client.SimpleUDPClient(ip, port)",
"class OSCGrid(DeviceObj): def __init__(self, name, width, height, ip, port, bri=1):",
"self.buffer.append(builder.build()) def show(self): DeviceObj.show(self) # Update Display bundle = osc_bundle_builder.OscBundleBuilder(0)",
"def show(self): DeviceObj.show(self) # Update Display bundle = osc_bundle_builder.OscBundleBuilder(0) for",
"g, b, x=0, y=0): DeviceObj.set(self, r, g, b, x, y)",
"from pythonosc import osc_bundle_builder from pythonosc import osc_message_builder from pythonosc",
"Object class OSCGrid(DeviceObj): def __init__(self, name, width, height, ip, port,",
"= bri self.osc = udp_client.SimpleUDPClient(ip, port) def set(self, r, g,",
"# Set Pixel builder = osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y)) builder.add_arg(r) builder.add_arg(g) builder.add_arg(b)",
"self.osc = udp_client.SimpleUDPClient(ip, port) def set(self, r, g, b, x=0,",
"[] self.brightness = bri self.osc = udp_client.SimpleUDPClient(ip, port) def set(self,",
"show(self): DeviceObj.show(self) # Update Display bundle = osc_bundle_builder.OscBundleBuilder(0) for m",
"pythonosc import osc_bundle_builder from pythonosc import osc_message_builder from pythonosc import",
"bri=1): DeviceObj.__init__(self, name, \"osc_grid\", width, height) self.buffer = [] self.brightness",
"udp_client from .device import DeviceObj # OSC Grid Object class",
"builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build()) def show(self): DeviceObj.show(self) # Update Display bundle",
"# OSC Grid Object class OSCGrid(DeviceObj): def __init__(self, name, width,",
"# Update Display bundle = osc_bundle_builder.OscBundleBuilder(0) for m in self.buffer:",
"udp_client.SimpleUDPClient(ip, port) def set(self, r, g, b, x=0, y=0): DeviceObj.set(self,",
"r, g, b, x=0, y=0): DeviceObj.set(self, r, g, b, x,",
"def __init__(self, name, width, height, ip, port, bri=1): DeviceObj.__init__(self, name,",
"Display bundle = osc_bundle_builder.OscBundleBuilder(0) for m in self.buffer: bundle.add_content(m) self.osc.send(bundle.build())",
"b, x=0, y=0): DeviceObj.set(self, r, g, b, x, y) #",
"<reponame>wlfyit/PiLightsLib #!/usr/bin/env python3 from pythonosc import osc_bundle_builder from pythonosc import",
"Grid Object class OSCGrid(DeviceObj): def __init__(self, name, width, height, ip,",
"import osc_message_builder from pythonosc import udp_client from .device import DeviceObj",
"OSC Grid Object class OSCGrid(DeviceObj): def __init__(self, name, width, height,",
"OSCGrid(DeviceObj): def __init__(self, name, width, height, ip, port, bri=1): DeviceObj.__init__(self,",
"bundle = osc_bundle_builder.OscBundleBuilder(0) for m in self.buffer: bundle.add_content(m) self.osc.send(bundle.build()) self.buffer.clear()",
"import DeviceObj # OSC Grid Object class OSCGrid(DeviceObj): def __init__(self,",
"builder.add_arg(b) self.buffer.append(builder.build()) def show(self): DeviceObj.show(self) # Update Display bundle =",
"from pythonosc import osc_message_builder from pythonosc import udp_client from .device",
"DeviceObj # OSC Grid Object class OSCGrid(DeviceObj): def __init__(self, name,",
"builder.add_arg(r) builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build()) def show(self): DeviceObj.show(self) # Update Display",
"name, width, height, ip, port, bri=1): DeviceObj.__init__(self, name, \"osc_grid\", width,",
"height, ip, port, bri=1): DeviceObj.__init__(self, name, \"osc_grid\", width, height) self.buffer",
"from .device import DeviceObj # OSC Grid Object class OSCGrid(DeviceObj):",
"osc_message_builder from pythonosc import udp_client from .device import DeviceObj #",
"y)) builder.add_arg(r) builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build()) def show(self): DeviceObj.show(self) # Update",
"x, y) # Set Pixel builder = osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y)) builder.add_arg(r)",
"= osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y)) builder.add_arg(r) builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build()) def show(self): DeviceObj.show(self)",
"port) def set(self, r, g, b, x=0, y=0): DeviceObj.set(self, r,",
"DeviceObj.__init__(self, name, \"osc_grid\", width, height) self.buffer = [] self.brightness =",
"from pythonosc import udp_client from .device import DeviceObj # OSC",
"pythonosc import osc_message_builder from pythonosc import udp_client from .device import",
"Set Pixel builder = osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y)) builder.add_arg(r) builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build())",
"python3 from pythonosc import osc_bundle_builder from pythonosc import osc_message_builder from",
"x=0, y=0): DeviceObj.set(self, r, g, b, x, y) # Set",
"Update Display bundle = osc_bundle_builder.OscBundleBuilder(0) for m in self.buffer: bundle.add_content(m)",
"#!/usr/bin/env python3 from pythonosc import osc_bundle_builder from pythonosc import osc_message_builder",
"= udp_client.SimpleUDPClient(ip, port) def set(self, r, g, b, x=0, y=0):",
"height) self.buffer = [] self.brightness = bri self.osc = udp_client.SimpleUDPClient(ip,",
"pythonosc import udp_client from .device import DeviceObj # OSC Grid",
"Pixel builder = osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y)) builder.add_arg(r) builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build()) def",
"self.brightness = bri self.osc = udp_client.SimpleUDPClient(ip, port) def set(self, r,",
"def set(self, r, g, b, x=0, y=0): DeviceObj.set(self, r, g,",
"y) # Set Pixel builder = osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y)) builder.add_arg(r) builder.add_arg(g)",
"builder = osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y)) builder.add_arg(r) builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build()) def show(self):",
"import udp_client from .device import DeviceObj # OSC Grid Object",
"bri self.osc = udp_client.SimpleUDPClient(ip, port) def set(self, r, g, b,",
"r, g, b, x, y) # Set Pixel builder =",
"y=0): DeviceObj.set(self, r, g, b, x, y) # Set Pixel",
"name, \"osc_grid\", width, height) self.buffer = [] self.brightness = bri",
"osc_message_builder.OscMessageBuilder(address=\"/light/{0}/{1}/color\".format(x, y)) builder.add_arg(r) builder.add_arg(g) builder.add_arg(b) self.buffer.append(builder.build()) def show(self): DeviceObj.show(self) #",
"osc_bundle_builder from pythonosc import osc_message_builder from pythonosc import udp_client from",
"set(self, r, g, b, x=0, y=0): DeviceObj.set(self, r, g, b,",
"width, height, ip, port, bri=1): DeviceObj.__init__(self, name, \"osc_grid\", width, height)"
] |
[
"class Project(db.Model): __tablename__ = 'projects' id = db.Column(db.Integer,primary_key=True,autoincrement=True) project_name =",
"'project_id': self.project_id, 'key': self.key, 'value': self.value } return mydict def",
"def to_dict(self): mydict = { 'id': self.id, 'project_id': self.project_id, 'key':",
"'id': self.id, 'project_name': self.project_name } return mydict def __repr__(self): return",
"} return mydict def __repr__(self): return '<Item %r>' % self.__name__",
"self.project_name } return mydict def __repr__(self): return '<Project %r>' %",
"from .app import db class Project(db.Model): __tablename__ = 'projects' id",
"def __repr__(self): return '<Project %r>' % self.__name__ class Item(db.Model): __tablename__",
"= db.Column(db.String(64),nullable=False) value = db.Column(db.String(64),nullable=False) def to_dict(self): mydict = {",
"{ 'id': self.id, 'project_name': self.project_name } return mydict def __repr__(self):",
"= { 'id': self.id, 'project_name': self.project_name } return mydict def",
"id = db.Column(db.Integer, primary_key=True,autoincrement=True) project_id = db.Column(db.Integer) key = db.Column(db.String(64),nullable=False)",
"self.project_id, 'key': self.key, 'value': self.value } return mydict def __repr__(self):",
"class Item(db.Model): __tablename__ = 'Items' id = db.Column(db.Integer, primary_key=True,autoincrement=True) project_id",
"mydict def __repr__(self): return '<Project %r>' % self.__name__ class Item(db.Model):",
"db.Column(db.Integer) key = db.Column(db.String(64),nullable=False) value = db.Column(db.String(64),nullable=False) def to_dict(self): mydict",
"id = db.Column(db.Integer,primary_key=True,autoincrement=True) project_name = db.Column(db.String(64),unique=True,index=True) def to_dict(self): mydict =",
"return '<Project %r>' % self.__name__ class Item(db.Model): __tablename__ = 'Items'",
"mydict = { 'id': self.id, 'project_name': self.project_name } return mydict",
"} return mydict def __repr__(self): return '<Project %r>' % self.__name__",
"= 'Items' id = db.Column(db.Integer, primary_key=True,autoincrement=True) project_id = db.Column(db.Integer) key",
".app import db class Project(db.Model): __tablename__ = 'projects' id =",
"mydict = { 'id': self.id, 'project_id': self.project_id, 'key': self.key, 'value':",
"{ 'id': self.id, 'project_id': self.project_id, 'key': self.key, 'value': self.value }",
"import db class Project(db.Model): __tablename__ = 'projects' id = db.Column(db.Integer,primary_key=True,autoincrement=True)",
"= db.Column(db.String(64),unique=True,index=True) def to_dict(self): mydict = { 'id': self.id, 'project_name':",
"= { 'id': self.id, 'project_id': self.project_id, 'key': self.key, 'value': self.value",
"%r>' % self.__name__ class Item(db.Model): __tablename__ = 'Items' id =",
"= 'projects' id = db.Column(db.Integer,primary_key=True,autoincrement=True) project_name = db.Column(db.String(64),unique=True,index=True) def to_dict(self):",
"key = db.Column(db.String(64),nullable=False) value = db.Column(db.String(64),nullable=False) def to_dict(self): mydict =",
"self.id, 'project_name': self.project_name } return mydict def __repr__(self): return '<Project",
"= db.Column(db.String(64),nullable=False) def to_dict(self): mydict = { 'id': self.id, 'project_id':",
"= db.Column(db.Integer) key = db.Column(db.String(64),nullable=False) value = db.Column(db.String(64),nullable=False) def to_dict(self):",
"self.value } return mydict def __repr__(self): return '<Item %r>' %",
"to_dict(self): mydict = { 'id': self.id, 'project_id': self.project_id, 'key': self.key,",
"self.__name__ class Item(db.Model): __tablename__ = 'Items' id = db.Column(db.Integer, primary_key=True,autoincrement=True)",
"db class Project(db.Model): __tablename__ = 'projects' id = db.Column(db.Integer,primary_key=True,autoincrement=True) project_name",
"db.Column(db.String(64),unique=True,index=True) def to_dict(self): mydict = { 'id': self.id, 'project_name': self.project_name",
"= db.Column(db.Integer,primary_key=True,autoincrement=True) project_name = db.Column(db.String(64),unique=True,index=True) def to_dict(self): mydict = {",
"def to_dict(self): mydict = { 'id': self.id, 'project_name': self.project_name }",
"Item(db.Model): __tablename__ = 'Items' id = db.Column(db.Integer, primary_key=True,autoincrement=True) project_id =",
"'Items' id = db.Column(db.Integer, primary_key=True,autoincrement=True) project_id = db.Column(db.Integer) key =",
"primary_key=True,autoincrement=True) project_id = db.Column(db.Integer) key = db.Column(db.String(64),nullable=False) value = db.Column(db.String(64),nullable=False)",
"% self.__name__ class Item(db.Model): __tablename__ = 'Items' id = db.Column(db.Integer,",
"'id': self.id, 'project_id': self.project_id, 'key': self.key, 'value': self.value } return",
"self.key, 'value': self.value } return mydict def __repr__(self): return '<Item",
"'<Project %r>' % self.__name__ class Item(db.Model): __tablename__ = 'Items' id",
"__tablename__ = 'Items' id = db.Column(db.Integer, primary_key=True,autoincrement=True) project_id = db.Column(db.Integer)",
"value = db.Column(db.String(64),nullable=False) def to_dict(self): mydict = { 'id': self.id,",
"__tablename__ = 'projects' id = db.Column(db.Integer,primary_key=True,autoincrement=True) project_name = db.Column(db.String(64),unique=True,index=True) def",
"= db.Column(db.Integer, primary_key=True,autoincrement=True) project_id = db.Column(db.Integer) key = db.Column(db.String(64),nullable=False) value",
"to_dict(self): mydict = { 'id': self.id, 'project_name': self.project_name } return",
"db.Column(db.String(64),nullable=False) def to_dict(self): mydict = { 'id': self.id, 'project_id': self.project_id,",
"'projects' id = db.Column(db.Integer,primary_key=True,autoincrement=True) project_name = db.Column(db.String(64),unique=True,index=True) def to_dict(self): mydict",
"project_name = db.Column(db.String(64),unique=True,index=True) def to_dict(self): mydict = { 'id': self.id,",
"self.id, 'project_id': self.project_id, 'key': self.key, 'value': self.value } return mydict",
"'project_name': self.project_name } return mydict def __repr__(self): return '<Project %r>'",
"db.Column(db.String(64),nullable=False) value = db.Column(db.String(64),nullable=False) def to_dict(self): mydict = { 'id':",
"__repr__(self): return '<Project %r>' % self.__name__ class Item(db.Model): __tablename__ =",
"return mydict def __repr__(self): return '<Project %r>' % self.__name__ class",
"project_id = db.Column(db.Integer) key = db.Column(db.String(64),nullable=False) value = db.Column(db.String(64),nullable=False) def",
"db.Column(db.Integer,primary_key=True,autoincrement=True) project_name = db.Column(db.String(64),unique=True,index=True) def to_dict(self): mydict = { 'id':",
"'value': self.value } return mydict def __repr__(self): return '<Item %r>'",
"Project(db.Model): __tablename__ = 'projects' id = db.Column(db.Integer,primary_key=True,autoincrement=True) project_name = db.Column(db.String(64),unique=True,index=True)",
"db.Column(db.Integer, primary_key=True,autoincrement=True) project_id = db.Column(db.Integer) key = db.Column(db.String(64),nullable=False) value =",
"'key': self.key, 'value': self.value } return mydict def __repr__(self): return"
] |
[
"self.cache.get(\"test_item\") self.assertEqual(item.value, 99) self.cache.decrement(\"test_item\", amount=98) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 1)",
"setUp(self): self.cache = IronCache(\"test_cache\") def test_get(self): self.cache.put(\"test_item\", \"testing\") item =",
"\"test_item\") def test_increment(self): self.cache.put(\"test_item\", 2) self.cache.increment(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value,",
"99) self.cache.decrement(\"test_item\", amount=98) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 1) if __name__",
"from iron_cache import * import unittest import requests class TestIronCache(unittest.TestCase):",
"IronCache(\"test_cache\") def test_get(self): self.cache.put(\"test_item\", \"testing\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, \"testing\")",
"self.cache.put(\"test_item\", 2) self.cache.increment(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 3) self.cache.increment(\"test_item\", amount=42)",
"item = self.cache.get(\"test_item\") self.assertEqual(item.value, 3) self.cache.increment(\"test_item\", amount=42) item = self.cache.get(\"test_item\")",
"self.cache.put(\"test_item\", \"testing\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, \"testing\") def test_delete(self): self.cache.put(\"test_item\",",
"item = self.cache.get(\"test_item\") self.assertEqual(item.value, \"testing\") def test_delete(self): self.cache.put(\"test_item\", \"will be",
"self.assertRaises(requests.exceptions.HTTPError, self.cache.get, \"test_item\") def test_increment(self): self.cache.put(\"test_item\", 2) self.cache.increment(\"test_item\") item =",
"def test_increment(self): self.cache.put(\"test_item\", 2) self.cache.increment(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 3)",
"= self.cache.get(\"test_item\") self.assertEqual(item.value, 3) self.cache.increment(\"test_item\", amount=42) item = self.cache.get(\"test_item\") self.assertEqual(item.value,",
"self.cache.get, \"test_item\") def test_increment(self): self.cache.put(\"test_item\", 2) self.cache.increment(\"test_item\") item = self.cache.get(\"test_item\")",
"self.cache.increment(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 3) self.cache.increment(\"test_item\", amount=42) item =",
"self.cache.put(\"test_item\", 100) self.cache.decrement(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 99) self.cache.decrement(\"test_item\", amount=98)",
"def test_get(self): self.cache.put(\"test_item\", \"testing\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, \"testing\") def",
"2) self.cache.increment(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 3) self.cache.increment(\"test_item\", amount=42) item",
"import requests class TestIronCache(unittest.TestCase): def setUp(self): self.cache = IronCache(\"test_cache\") def",
"\"testing\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, \"testing\") def test_delete(self): self.cache.put(\"test_item\", \"will",
"import unittest import requests class TestIronCache(unittest.TestCase): def setUp(self): self.cache =",
"self.assertEqual(item.value, 45) def test_decrement(self): self.cache.put(\"test_item\", 100) self.cache.decrement(\"test_item\") item = self.cache.get(\"test_item\")",
"self.cache.put(\"test_item\", \"will be deleted\") self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError, self.cache.get, \"test_item\") def test_increment(self):",
"requests class TestIronCache(unittest.TestCase): def setUp(self): self.cache = IronCache(\"test_cache\") def test_get(self):",
"iron_cache import * import unittest import requests class TestIronCache(unittest.TestCase): def",
"self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError, self.cache.get, \"test_item\") def test_increment(self): self.cache.put(\"test_item\", 2) self.cache.increment(\"test_item\") item",
"self.cache = IronCache(\"test_cache\") def test_get(self): self.cache.put(\"test_item\", \"testing\") item = self.cache.get(\"test_item\")",
"= self.cache.get(\"test_item\") self.assertEqual(item.value, \"testing\") def test_delete(self): self.cache.put(\"test_item\", \"will be deleted\")",
"def test_delete(self): self.cache.put(\"test_item\", \"will be deleted\") self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError, self.cache.get, \"test_item\")",
"test_decrement(self): self.cache.put(\"test_item\", 100) self.cache.decrement(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 99) self.cache.decrement(\"test_item\",",
"test_delete(self): self.cache.put(\"test_item\", \"will be deleted\") self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError, self.cache.get, \"test_item\") def",
"self.cache.get(\"test_item\") self.assertEqual(item.value, 3) self.cache.increment(\"test_item\", amount=42) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 45)",
"class TestIronCache(unittest.TestCase): def setUp(self): self.cache = IronCache(\"test_cache\") def test_get(self): self.cache.put(\"test_item\",",
"import * import unittest import requests class TestIronCache(unittest.TestCase): def setUp(self):",
"\"testing\") def test_delete(self): self.cache.put(\"test_item\", \"will be deleted\") self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError, self.cache.get,",
"test_increment(self): self.cache.put(\"test_item\", 2) self.cache.increment(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 3) self.cache.increment(\"test_item\",",
"amount=42) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 45) def test_decrement(self): self.cache.put(\"test_item\", 100)",
"= self.cache.get(\"test_item\") self.assertEqual(item.value, 99) self.cache.decrement(\"test_item\", amount=98) item = self.cache.get(\"test_item\") self.assertEqual(item.value,",
"TestIronCache(unittest.TestCase): def setUp(self): self.cache = IronCache(\"test_cache\") def test_get(self): self.cache.put(\"test_item\", \"testing\")",
"self.cache.decrement(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 99) self.cache.decrement(\"test_item\", amount=98) item =",
"deleted\") self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError, self.cache.get, \"test_item\") def test_increment(self): self.cache.put(\"test_item\", 2) self.cache.increment(\"test_item\")",
"<reponame>iron-io/iron_cache_python<gh_stars>1-10 from iron_cache import * import unittest import requests class",
"100) self.cache.decrement(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 99) self.cache.decrement(\"test_item\", amount=98) item",
"unittest import requests class TestIronCache(unittest.TestCase): def setUp(self): self.cache = IronCache(\"test_cache\")",
"self.cache.get(\"test_item\") self.assertEqual(item.value, \"testing\") def test_delete(self): self.cache.put(\"test_item\", \"will be deleted\") self.cache.delete(\"test_item\")",
"self.cache.decrement(\"test_item\", amount=98) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 1) if __name__ ==",
"item = self.cache.get(\"test_item\") self.assertEqual(item.value, 1) if __name__ == '__main__': unittest.main()",
"\"will be deleted\") self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError, self.cache.get, \"test_item\") def test_increment(self): self.cache.put(\"test_item\",",
"item = self.cache.get(\"test_item\") self.assertEqual(item.value, 45) def test_decrement(self): self.cache.put(\"test_item\", 100) self.cache.decrement(\"test_item\")",
"self.assertEqual(item.value, 99) self.cache.decrement(\"test_item\", amount=98) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 1) if",
"self.cache.increment(\"test_item\", amount=42) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 45) def test_decrement(self): self.cache.put(\"test_item\",",
"be deleted\") self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError, self.cache.get, \"test_item\") def test_increment(self): self.cache.put(\"test_item\", 2)",
"def setUp(self): self.cache = IronCache(\"test_cache\") def test_get(self): self.cache.put(\"test_item\", \"testing\") item",
"= IronCache(\"test_cache\") def test_get(self): self.cache.put(\"test_item\", \"testing\") item = self.cache.get(\"test_item\") self.assertEqual(item.value,",
"= self.cache.get(\"test_item\") self.assertEqual(item.value, 45) def test_decrement(self): self.cache.put(\"test_item\", 100) self.cache.decrement(\"test_item\") item",
"self.cache.get(\"test_item\") self.assertEqual(item.value, 45) def test_decrement(self): self.cache.put(\"test_item\", 100) self.cache.decrement(\"test_item\") item =",
"3) self.cache.increment(\"test_item\", amount=42) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 45) def test_decrement(self):",
"def test_decrement(self): self.cache.put(\"test_item\", 100) self.cache.decrement(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, 99)",
"self.assertEqual(item.value, 3) self.cache.increment(\"test_item\", amount=42) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 45) def",
"amount=98) item = self.cache.get(\"test_item\") self.assertEqual(item.value, 1) if __name__ == '__main__':",
"45) def test_decrement(self): self.cache.put(\"test_item\", 100) self.cache.decrement(\"test_item\") item = self.cache.get(\"test_item\") self.assertEqual(item.value,",
"item = self.cache.get(\"test_item\") self.assertEqual(item.value, 99) self.cache.decrement(\"test_item\", amount=98) item = self.cache.get(\"test_item\")",
"* import unittest import requests class TestIronCache(unittest.TestCase): def setUp(self): self.cache",
"self.assertEqual(item.value, \"testing\") def test_delete(self): self.cache.put(\"test_item\", \"will be deleted\") self.cache.delete(\"test_item\") self.assertRaises(requests.exceptions.HTTPError,",
"test_get(self): self.cache.put(\"test_item\", \"testing\") item = self.cache.get(\"test_item\") self.assertEqual(item.value, \"testing\") def test_delete(self):"
] |
[
"not found!') # Call command line tool silently cmd =",
"bandStdStart) # Add results to the output list bandStats.append( (bandMin,",
"and # limitations under the License. # __END_LICENSE__ \"\"\" Basic",
"for each band bandStats = [] band = 0 while",
"numSamples = int(sizeStrs[0]) numLines = int(sizeStrs[1]) size = [numSamples, numLines]",
"= subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() # Statistics are",
"2.0 (the # \"License\"); you may not use this file",
"an RBGA image\"\"\" cmd = 'gdal_translate ' + inputPath +",
"# Administrator of the National Aeronautics and Space Administration. All",
"the text sizePos = textOutput.find('Size is') endPos = textOutput.find('\\n', sizePos+7)",
"an ISIS file, False otherwise.\"\"\" # Currently we treat all",
"working with images on disk. \"\"\" import sys, os, re,",
"if we did not find it # Now parse out",
"= textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart = textOutput.find('STATISTICS_MINIMUM=',",
"bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) # Add",
"asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) # Add results to",
"some image statistics from gdalinfo\"\"\" if not os.path.exists(imagePath): raise Exception('Image",
"results to the output list bandStats.append( (bandMin, bandMax, bandMean, bandStd)",
"limitations under the License. # __END_LICENSE__ \"\"\" Basic functions for",
"\"TILED=YES\" -co \"BLOCKXSIZE=256\" -co \"BLOCKYSIZE=256\"' print cmd os.system(cmd) def getImageSize(imagePath):",
"utf-8 -*- # __BEGIN_LICENSE__ # Copyright (c) 2009-2013, United States",
"-co \"COMPRESS=LZW\" -co \"TILED=YES\" -co \"BLOCKXSIZE=256\" -co \"BLOCKYSIZE=256\"' print cmd",
"def isIsisFile(filePath): \"\"\"Returns True if the file is an ISIS",
"use this file except in compliance with the # License.",
"textOutput, err = p.communicate() # Statistics are computed seperately for",
"= p.communicate() # Statistics are computed seperately for each band",
"' + str(band+1) + ' Block=' bandLoc = textOutput.find(bandString) if",
"bandLoc = textOutput.find(bandString) if bandLoc < 0: return bandStats #",
"States Government as represented by the # Administrator of the",
"the stats line for this band bandString = 'Band '",
"the size from the text sizePos = textOutput.find('Size is') endPos",
"the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"textOutput, err = p.communicate() # Extract the size from the",
"under the License is distributed on an \"AS IS\" BASIS,",
"\"\"\"Makes an RGB copy of an RBGA image\"\"\" cmd =",
"License for the specific language governing permissions and # limitations",
"int(sizeStrs[0]) numLines = int(sizeStrs[1]) size = [numSamples, numLines] return size",
") band = band + 1 # Move to the",
"= [] band = 0 while (True): # Loop until",
"extension as ISIS files extension = os.path.splitext(filePath)[1] return (extension ==",
"input file exists if not os.path.exists(imagePath): raise Exception('Image file '",
"err = p.communicate() # Statistics are computed seperately for each",
"sizeStr = textOutput[sizePos+7:endPos] sizeStrs = sizeStr.strip().split(',') numSamples = int(sizeStrs[0]) numLines",
"an image\"\"\" # Make sure the input file exists if",
"bandMinStart) bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) # Add results to the",
"of bands # Look for the stats line for this",
"treat all files with .cub extension as ISIS files extension",
"statistics for this band bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart =",
"sizePos = textOutput.find('Size is') endPos = textOutput.find('\\n', sizePos+7) sizeStr =",
"= os.path.splitext(filePath)[1] return (extension == '.cub') def getImageStats(imagePath): \"\"\"Obtains some",
"otherwise.\"\"\" # Currently we treat all files with .cub extension",
"bandMax, bandMean, bandStd) ) band = band + 1 #",
"< 0: return bandStats # Quit if we did not",
"cmd = 'gdal_translate ' + inputPath + ' ' +",
"textOutput.find('\\n', sizePos+7) sizeStr = textOutput[sizePos+7:endPos] sizeStrs = sizeStr.strip().split(',') numSamples =",
"text sizePos = textOutput.find('Size is') endPos = textOutput.find('\\n', sizePos+7) sizeStr",
"it # Now parse out the statistics for this band",
"file except in compliance with the # License. You may",
"-b 3 -co \"COMPRESS=LZW\" -co \"TILED=YES\" -co \"BLOCKXSIZE=256\" -co \"BLOCKYSIZE=256\"'",
"software # distributed under the License is distributed on an",
"suppress the command output cmd = ['gdalinfo', imagePath] p =",
"' -b 1 -b 2 -b 3 -co \"COMPRESS=LZW\" -co",
"a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # #",
"License, Version 2.0 (the # \"License\"); you may not use",
"raise Exception('Image file ' + imagePath + ' not found!')",
"endPos = textOutput.find('\\n', sizePos+7) sizeStr = textOutput[sizePos+7:endPos] sizeStrs = sizeStr.strip().split(',')",
"\"\"\" Basic functions for working with images on disk. \"\"\"",
"errno import asp_string_utils def stripRgbImageAlphaChannel(inputPath, outputPath): \"\"\"Makes an RGB copy",
"except in compliance with the # License. You may obtain",
"getImageSize(imagePath): \"\"\"Returns the size [samples, lines] in an image\"\"\" #",
"with .cub extension as ISIS files extension = os.path.splitext(filePath)[1] return",
"0 while (True): # Loop until we run out of",
"is an ISIS file, False otherwise.\"\"\" # Currently we treat",
"in an image\"\"\" # Make sure the input file exists",
"int(sizeStrs[1]) size = [numSamples, numLines] return size def isIsisFile(filePath): \"\"\"Returns",
"import sys, os, re, subprocess, string, time, errno import asp_string_utils",
"with the # License. You may obtain a copy of",
"reserved. # # The NGT platform is licensed under the",
"Apache License, Version 2.0 (the # \"License\"); you may not",
"\"\"\"Obtains some image statistics from gdalinfo\"\"\" if not os.path.exists(imagePath): raise",
"os.system(cmd) def getImageSize(imagePath): \"\"\"Returns the size [samples, lines] in an",
"images on disk. \"\"\" import sys, os, re, subprocess, string,",
"we did not find it # Now parse out the",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd =",
"= ['gdalinfo', imagePath, '-stats'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err",
"National Aeronautics and Space Administration. All # rights reserved. #",
"for the specific language governing permissions and # limitations under",
"Loop until we run out of bands # Look for",
"to in writing, software # distributed under the License is",
"# Make sure the input file exists if not os.path.exists(imagePath):",
"= 0 while (True): # Loop until we run out",
"textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc)",
"# See the License for the specific language governing permissions",
"size = [numSamples, numLines] return size def isIsisFile(filePath): \"\"\"Returns True",
"' + inputPath + ' ' + outputPath + '",
"language governing permissions and # limitations under the License. #",
"or agreed to in writing, software # distributed under the",
"bandLoc < 0: return bandStats # Quit if we did",
"required by applicable law or agreed to in writing, software",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"copy of an RBGA image\"\"\" cmd = 'gdal_translate ' +",
"+ str(band+1) + ' Block=' bandLoc = textOutput.find(bandString) if bandLoc",
"return bandStats # Quit if we did not find it",
"size def isIsisFile(filePath): \"\"\"Returns True if the file is an",
"band bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart",
"parse out the statistics for this band bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=',",
"agreed to in writing, software # distributed under the License",
"output list bandStats.append( (bandMin, bandMax, bandMean, bandStd) ) band =",
"sure the input file exists if not os.path.exists(imagePath): raise Exception('Image",
"files extension = os.path.splitext(filePath)[1] return (extension == '.cub') def getImageStats(imagePath):",
"distributed under the License is distributed on an \"AS IS\"",
"2 -b 3 -co \"COMPRESS=LZW\" -co \"TILED=YES\" -co \"BLOCKXSIZE=256\" -co",
"= 'Band ' + str(band+1) + ' Block=' bandLoc =",
"United States Government as represented by the # Administrator of",
"' ' + outputPath + ' -b 1 -b 2",
"for working with images on disk. \"\"\" import sys, os,",
"+ outputPath + ' -b 1 -b 2 -b 3",
"found!') # Call command line tool silently cmd = ['gdalinfo',",
"imagePath + ' not found!') # Use subprocess to suppress",
"+ imagePath + ' not found!') # Call command line",
"= p.communicate() # Extract the size from the text sizePos",
"textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc)",
"= asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) # Add results to the output list",
"= textOutput.find('\\n', sizePos+7) sizeStr = textOutput[sizePos+7:endPos] sizeStrs = sizeStr.strip().split(',') numSamples",
"return (extension == '.cub') def getImageStats(imagePath): \"\"\"Obtains some image statistics",
"express or implied. # See the License for the specific",
"bandLoc) bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax",
"band bandString = 'Band ' + str(band+1) + ' Block='",
"string, time, errno import asp_string_utils def stripRgbImageAlphaChannel(inputPath, outputPath): \"\"\"Makes an",
"stdout=subprocess.PIPE) textOutput, err = p.communicate() # Statistics are computed seperately",
"computed seperately for each band bandStats = [] band =",
"' Block=' bandLoc = textOutput.find(bandString) if bandLoc < 0: return",
"writing, software # distributed under the License is distributed on",
"for this band bandString = 'Band ' + str(band+1) +",
"you may not use this file except in compliance with",
"gdalinfo\"\"\" if not os.path.exists(imagePath): raise Exception('Image file ' + imagePath",
"= sizeStr.strip().split(',') numSamples = int(sizeStrs[0]) numLines = int(sizeStrs[1]) size =",
"'.cub') def getImageStats(imagePath): \"\"\"Obtains some image statistics from gdalinfo\"\"\" if",
"[numSamples, numLines] return size def isIsisFile(filePath): \"\"\"Returns True if the",
"under the Apache License, Version 2.0 (the # \"License\"); you",
"seperately for each band bandStats = [] band = 0",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"-b 1 -b 2 -b 3 -co \"COMPRESS=LZW\" -co \"TILED=YES\"",
"-co \"BLOCKYSIZE=256\"' print cmd os.system(cmd) def getImageSize(imagePath): \"\"\"Returns the size",
"Aeronautics and Space Administration. All # rights reserved. # #",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"Administrator of the National Aeronautics and Space Administration. All #",
"\"\"\"Returns True if the file is an ISIS file, False",
"['gdalinfo', imagePath] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate()",
"Use subprocess to suppress the command output cmd = ['gdalinfo',",
"[] band = 0 while (True): # Loop until we",
"this band bandString = 'Band ' + str(band+1) + '",
"= textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput,",
"each band bandStats = [] band = 0 while (True):",
"Copyright (c) 2009-2013, United States Government as represented by the",
"# Quit if we did not find it # Now",
"== '.cub') def getImageStats(imagePath): \"\"\"Obtains some image statistics from gdalinfo\"\"\"",
"inputPath + ' ' + outputPath + ' -b 1",
"by the # Administrator of the National Aeronautics and Space",
"__END_LICENSE__ \"\"\" Basic functions for working with images on disk.",
"-co \"TILED=YES\" -co \"BLOCKXSIZE=256\" -co \"BLOCKYSIZE=256\"' print cmd os.system(cmd) def",
"Space Administration. All # rights reserved. # # The NGT",
"Quit if we did not find it # Now parse",
"-*- # __BEGIN_LICENSE__ # Copyright (c) 2009-2013, United States Government",
"# The NGT platform is licensed under the Apache License,",
"band = 0 while (True): # Loop until we run",
"the command output cmd = ['gdalinfo', imagePath] p = subprocess.Popen(cmd,",
"image\"\"\" cmd = 'gdal_translate ' + inputPath + ' '",
"bandLoc) bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean",
"copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"stdout=subprocess.PIPE) textOutput, err = p.communicate() # Extract the size from",
"+ ' -b 1 -b 2 -b 3 -co \"COMPRESS=LZW\"",
"(bandMin, bandMax, bandMean, bandStd) ) band = band + 1",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"= textOutput.find('Size is') endPos = textOutput.find('\\n', sizePos+7) sizeStr = textOutput[sizePos+7:endPos]",
"# # The NGT platform is licensed under the Apache",
"the # License. You may obtain a copy of the",
"the License is distributed on an \"AS IS\" BASIS, #",
"bands # Look for the stats line for this band",
"all files with .cub extension as ISIS files extension =",
"= ['gdalinfo', imagePath] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err =",
"band = band + 1 # Move to the next",
"bandMeanStart) bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) #",
"textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart)",
"p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() # Statistics",
"governing permissions and # limitations under the License. # __END_LICENSE__",
"= subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() # Extract the",
"until we run out of bands # Look for the",
"sizePos+7) sizeStr = textOutput[sizePos+7:endPos] sizeStrs = sizeStr.strip().split(',') numSamples = int(sizeStrs[0])",
"bandString = 'Band ' + str(band+1) + ' Block=' bandLoc",
"'Band ' + str(band+1) + ' Block=' bandLoc = textOutput.find(bandString)",
"textOutput.find('Size is') endPos = textOutput.find('\\n', sizePos+7) sizeStr = textOutput[sizePos+7:endPos] sizeStrs",
"image statistics from gdalinfo\"\"\" if not os.path.exists(imagePath): raise Exception('Image file",
"law or agreed to in writing, software # distributed under",
"re, subprocess, string, time, errno import asp_string_utils def stripRgbImageAlphaChannel(inputPath, outputPath):",
"# __END_LICENSE__ \"\"\" Basic functions for working with images on",
"-co \"BLOCKXSIZE=256\" -co \"BLOCKYSIZE=256\"' print cmd os.system(cmd) def getImageSize(imagePath): \"\"\"Returns",
"sys, os, re, subprocess, string, time, errno import asp_string_utils def",
"# Now parse out the statistics for this band bandMaxStart",
"# Loop until we run out of bands # Look",
"not os.path.exists(imagePath): raise Exception('Image file ' + imagePath + '",
"(True): # Loop until we run out of bands #",
"imagePath + ' not found!') # Call command line tool",
"bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart =",
"numLines = int(sizeStrs[1]) size = [numSamples, numLines] return size def",
"textOutput.find(bandString) if bandLoc < 0: return bandStats # Quit if",
"file is an ISIS file, False otherwise.\"\"\" # Currently we",
"asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart)",
"= textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput,",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"command line tool silently cmd = ['gdalinfo', imagePath, '-stats'] p",
"an RGB copy of an RBGA image\"\"\" cmd = 'gdal_translate",
"out of bands # Look for the stats line for",
"may not use this file except in compliance with the",
"= [numSamples, numLines] return size def isIsisFile(filePath): \"\"\"Returns True if",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"did not find it # Now parse out the statistics",
"are computed seperately for each band bandStats = [] band",
"2009-2013, United States Government as represented by the # Administrator",
"the National Aeronautics and Space Administration. All # rights reserved.",
"Currently we treat all files with .cub extension as ISIS",
"License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"band bandStats = [] band = 0 while (True): #",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"and Space Administration. All # rights reserved. # # The",
"bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart =",
"from the text sizePos = textOutput.find('Size is') endPos = textOutput.find('\\n',",
"# Currently we treat all files with .cub extension as",
"' not found!') # Call command line tool silently cmd",
"size from the text sizePos = textOutput.find('Size is') endPos =",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"files with .cub extension as ISIS files extension = os.path.splitext(filePath)[1]",
"not use this file except in compliance with the #",
"subprocess, string, time, errno import asp_string_utils def stripRgbImageAlphaChannel(inputPath, outputPath): \"\"\"Makes",
"+ ' not found!') # Call command line tool silently",
"' + imagePath + ' not found!') # Call command",
"run out of bands # Look for the stats line",
"the # Administrator of the National Aeronautics and Space Administration.",
"# License. You may obtain a copy of the License",
"os.path.exists(imagePath): raise Exception('Image file ' + imagePath + ' not",
"is') endPos = textOutput.find('\\n', sizePos+7) sizeStr = textOutput[sizePos+7:endPos] sizeStrs =",
"subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() # Statistics are computed",
"stripRgbImageAlphaChannel(inputPath, outputPath): \"\"\"Makes an RGB copy of an RBGA image\"\"\"",
"+ imagePath + ' not found!') # Use subprocess to",
"coding: utf-8 -*- # __BEGIN_LICENSE__ # Copyright (c) 2009-2013, United",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"command output cmd = ['gdalinfo', imagePath] p = subprocess.Popen(cmd, stdout=subprocess.PIPE)",
"= int(sizeStrs[0]) numLines = int(sizeStrs[1]) size = [numSamples, numLines] return",
"to the output list bandStats.append( (bandMin, bandMax, bandMean, bandStd) )",
"p.communicate() # Statistics are computed seperately for each band bandStats",
"+ inputPath + ' ' + outputPath + ' -b",
"or implied. # See the License for the specific language",
"False otherwise.\"\"\" # Currently we treat all files with .cub",
"output cmd = ['gdalinfo', imagePath] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput,",
"# \"License\"); you may not use this file except in",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"found!') # Use subprocess to suppress the command output cmd",
"\"License\"); you may not use this file except in compliance",
"out the statistics for this band bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc)",
"line tool silently cmd = ['gdalinfo', imagePath, '-stats'] p =",
"with images on disk. \"\"\" import sys, os, re, subprocess,",
"file ' + imagePath + ' not found!') # Use",
"represented by the # Administrator of the National Aeronautics and",
"bandLoc) bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin",
"NGT platform is licensed under the Apache License, Version 2.0",
"silently cmd = ['gdalinfo', imagePath, '-stats'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE)",
"(c) 2009-2013, United States Government as represented by the #",
"isIsisFile(filePath): \"\"\"Returns True if the file is an ISIS file,",
"find it # Now parse out the statistics for this",
"is licensed under the Apache License, Version 2.0 (the #",
"cmd os.system(cmd) def getImageSize(imagePath): \"\"\"Returns the size [samples, lines] in",
"the input file exists if not os.path.exists(imagePath): raise Exception('Image file",
"\"\"\" import sys, os, re, subprocess, string, time, errno import",
"License. # __END_LICENSE__ \"\"\" Basic functions for working with images",
"Basic functions for working with images on disk. \"\"\" import",
"Make sure the input file exists if not os.path.exists(imagePath): raise",
"extension = os.path.splitext(filePath)[1] return (extension == '.cub') def getImageStats(imagePath): \"\"\"Obtains",
"platform is licensed under the Apache License, Version 2.0 (the",
"Statistics are computed seperately for each band bandStats = []",
"if the file is an ISIS file, False otherwise.\"\"\" #",
"# # Unless required by applicable law or agreed to",
"bandStats # Quit if we did not find it #",
"Version 2.0 (the # \"License\"); you may not use this",
"sizeStr.strip().split(',') numSamples = int(sizeStrs[0]) numLines = int(sizeStrs[1]) size = [numSamples,",
"file exists if not os.path.exists(imagePath): raise Exception('Image file ' +",
"= asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) # Add results",
"p.communicate() # Extract the size from the text sizePos =",
"may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0",
"'-stats'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() #",
"# Look for the stats line for this band bandString",
"cmd = ['gdalinfo', imagePath, '-stats'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput,",
"['gdalinfo', imagePath, '-stats'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err =",
"if not os.path.exists(imagePath): raise Exception('Image file ' + imagePath +",
"disk. \"\"\" import sys, os, re, subprocess, string, time, errno",
"asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) # Add results to the output list bandStats.append(",
"lines] in an image\"\"\" # Make sure the input file",
"implied. # See the License for the specific language governing",
"#!/usr/bin/env python # -*- coding: utf-8 -*- # __BEGIN_LICENSE__ #",
"on disk. \"\"\" import sys, os, re, subprocess, string, time,",
"permissions and # limitations under the License. # __END_LICENSE__ \"\"\"",
"' not found!') # Use subprocess to suppress the command",
"for the stats line for this band bandString = 'Band",
"of an RBGA image\"\"\" cmd = 'gdal_translate ' + inputPath",
"= textOutput.find(bandString) if bandLoc < 0: return bandStats # Quit",
"subprocess to suppress the command output cmd = ['gdalinfo', imagePath]",
"bandMean, bandStd) ) band = band + 1 # Move",
"not find it # Now parse out the statistics for",
"by applicable law or agreed to in writing, software #",
"= textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart = textOutput.find('STATISTICS_STDDEV=',",
"under the License. # __END_LICENSE__ \"\"\" Basic functions for working",
"print cmd os.system(cmd) def getImageSize(imagePath): \"\"\"Returns the size [samples, lines]",
"+ ' not found!') # Use subprocess to suppress the",
"as represented by the # Administrator of the National Aeronautics",
"= textOutput[sizePos+7:endPos] sizeStrs = sizeStr.strip().split(',') numSamples = int(sizeStrs[0]) numLines =",
"imagePath] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() #",
"1 -b 2 -b 3 -co \"COMPRESS=LZW\" -co \"TILED=YES\" -co",
"bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax =",
"bandStd) ) band = band + 1 # Move to",
"this file except in compliance with the # License. You",
"# Statistics are computed seperately for each band bandStats =",
"numLines] return size def isIsisFile(filePath): \"\"\"Returns True if the file",
"sizeStrs = sizeStr.strip().split(',') numSamples = int(sizeStrs[0]) numLines = int(sizeStrs[1]) size",
"err = p.communicate() # Extract the size from the text",
"of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"the size [samples, lines] in an image\"\"\" # Make sure",
"# limitations under the License. # __END_LICENSE__ \"\"\" Basic functions",
"cmd = ['gdalinfo', imagePath] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err",
"as ISIS files extension = os.path.splitext(filePath)[1] return (extension == '.cub')",
"stats line for this band bandString = 'Band ' +",
"Block=' bandLoc = textOutput.find(bandString) if bandLoc < 0: return bandStats",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"size [samples, lines] in an image\"\"\" # Make sure the",
"from gdalinfo\"\"\" if not os.path.exists(imagePath): raise Exception('Image file ' +",
"Unless required by applicable law or agreed to in writing,",
"we treat all files with .cub extension as ISIS files",
"' + outputPath + ' -b 1 -b 2 -b",
"os, re, subprocess, string, time, errno import asp_string_utils def stripRgbImageAlphaChannel(inputPath,",
"we run out of bands # Look for the stats",
"time, errno import asp_string_utils def stripRgbImageAlphaChannel(inputPath, outputPath): \"\"\"Makes an RGB",
"imagePath, '-stats'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate()",
"the specific language governing permissions and # limitations under the",
"applicable law or agreed to in writing, software # distributed",
"Now parse out the statistics for this band bandMaxStart =",
"\"COMPRESS=LZW\" -co \"TILED=YES\" -co \"BLOCKXSIZE=256\" -co \"BLOCKYSIZE=256\"' print cmd os.system(cmd)",
"= band + 1 # Move to the next band",
"# Copyright (c) 2009-2013, United States Government as represented by",
"# Extract the size from the text sizePos = textOutput.find('Size",
"Extract the size from the text sizePos = textOutput.find('Size is')",
"str(band+1) + ' Block=' bandLoc = textOutput.find(bandString) if bandLoc <",
"def getImageSize(imagePath): \"\"\"Returns the size [samples, lines] in an image\"\"\"",
"in writing, software # distributed under the License is distributed",
"0: return bandStats # Quit if we did not find",
"# Add results to the output list bandStats.append( (bandMin, bandMax,",
"(the # \"License\"); you may not use this file except",
"The NGT platform is licensed under the Apache License, Version",
"import asp_string_utils def stripRgbImageAlphaChannel(inputPath, outputPath): \"\"\"Makes an RGB copy of",
"rights reserved. # # The NGT platform is licensed under",
"list bandStats.append( (bandMin, bandMax, bandMean, bandStd) ) band = band",
"= int(sizeStrs[1]) size = [numSamples, numLines] return size def isIsisFile(filePath):",
"\"\"\"Returns the size [samples, lines] in an image\"\"\" # Make",
"= 'gdal_translate ' + inputPath + ' ' + outputPath",
"file, False otherwise.\"\"\" # Currently we treat all files with",
"functions for working with images on disk. \"\"\" import sys,",
"p = subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() # Extract",
"RBGA image\"\"\" cmd = 'gdal_translate ' + inputPath + '",
"subprocess.Popen(cmd, stdout=subprocess.PIPE) textOutput, err = p.communicate() # Extract the size",
"+ ' ' + outputPath + ' -b 1 -b",
"= asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput,",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"(extension == '.cub') def getImageStats(imagePath): \"\"\"Obtains some image statistics from",
"python # -*- coding: utf-8 -*- # __BEGIN_LICENSE__ # Copyright",
"outputPath + ' -b 1 -b 2 -b 3 -co",
"def getImageStats(imagePath): \"\"\"Obtains some image statistics from gdalinfo\"\"\" if not",
"bandMaxStart) bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd",
"True if the file is an ISIS file, False otherwise.\"\"\"",
"ISIS files extension = os.path.splitext(filePath)[1] return (extension == '.cub') def",
"' + imagePath + ' not found!') # Use subprocess",
"Exception('Image file ' + imagePath + ' not found!') #",
"# Use subprocess to suppress the command output cmd =",
"\"BLOCKYSIZE=256\"' print cmd os.system(cmd) def getImageSize(imagePath): \"\"\"Returns the size [samples,",
"the file is an ISIS file, False otherwise.\"\"\" # Currently",
"the License for the specific language governing permissions and #",
"asp_string_utils def stripRgbImageAlphaChannel(inputPath, outputPath): \"\"\"Makes an RGB copy of an",
"3 -co \"COMPRESS=LZW\" -co \"TILED=YES\" -co \"BLOCKXSIZE=256\" -co \"BLOCKYSIZE=256\"' print",
"getImageStats(imagePath): \"\"\"Obtains some image statistics from gdalinfo\"\"\" if not os.path.exists(imagePath):",
"Look for the stats line for this band bandString =",
"either express or implied. # See the License for the",
"the output list bandStats.append( (bandMin, bandMax, bandMean, bandStd) ) band",
"not found!') # Use subprocess to suppress the command output",
"asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMinStart) bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart)",
"image\"\"\" # Make sure the input file exists if not",
"to suppress the command output cmd = ['gdalinfo', imagePath] p",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"textOutput[sizePos+7:endPos] sizeStrs = sizeStr.strip().split(',') numSamples = int(sizeStrs[0]) numLines = int(sizeStrs[1])",
"textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart)",
"obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #",
"tool silently cmd = ['gdalinfo', imagePath, '-stats'] p = subprocess.Popen(cmd,",
"+ ' Block=' bandLoc = textOutput.find(bandString) if bandLoc < 0:",
"# -*- coding: utf-8 -*- # __BEGIN_LICENSE__ # Copyright (c)",
"line for this band bandString = 'Band ' + str(band+1)",
"# Call command line tool silently cmd = ['gdalinfo', imagePath,",
"[samples, lines] in an image\"\"\" # Make sure the input",
"the License. # __END_LICENSE__ \"\"\" Basic functions for working with",
"at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"the Apache License, Version 2.0 (the # \"License\"); you may",
"# rights reserved. # # The NGT platform is licensed",
"RGB copy of an RBGA image\"\"\" cmd = 'gdal_translate '",
"licensed under the Apache License, Version 2.0 (the # \"License\");",
"bandLoc) bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc) bandMinStart = textOutput.find('STATISTICS_MINIMUM=', bandLoc) bandStdStart",
"bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin =",
"return size def isIsisFile(filePath): \"\"\"Returns True if the file is",
"bandStdStart = textOutput.find('STATISTICS_STDDEV=', bandLoc) bandMax = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean =",
"-b 2 -b 3 -co \"COMPRESS=LZW\" -co \"TILED=YES\" -co \"BLOCKXSIZE=256\"",
"statistics from gdalinfo\"\"\" if not os.path.exists(imagePath): raise Exception('Image file '",
"\"BLOCKXSIZE=256\" -co \"BLOCKYSIZE=256\"' print cmd os.system(cmd) def getImageSize(imagePath): \"\"\"Returns the",
"this band bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart = textOutput.find('STATISTICS_MEAN=', bandLoc)",
"while (True): # Loop until we run out of bands",
"def stripRgbImageAlphaChannel(inputPath, outputPath): \"\"\"Makes an RGB copy of an RBGA",
"__BEGIN_LICENSE__ # Copyright (c) 2009-2013, United States Government as represented",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"# __BEGIN_LICENSE__ # Copyright (c) 2009-2013, United States Government as",
"Government as represented by the # Administrator of the National",
"bandStats.append( (bandMin, bandMax, bandMean, bandStd) ) band = band +",
"os.path.splitext(filePath)[1] return (extension == '.cub') def getImageStats(imagePath): \"\"\"Obtains some image",
"# distributed under the License is distributed on an \"AS",
"Call command line tool silently cmd = ['gdalinfo', imagePath, '-stats']",
"# Unless required by applicable law or agreed to in",
".cub extension as ISIS files extension = os.path.splitext(filePath)[1] return (extension",
"All # rights reserved. # # The NGT platform is",
"the statistics for this band bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart",
"-*- coding: utf-8 -*- # __BEGIN_LICENSE__ # Copyright (c) 2009-2013,",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"compliance with the # License. You may obtain a copy",
"License. You may obtain a copy of the License at",
"file ' + imagePath + ' not found!') # Call",
"You may obtain a copy of the License at #",
"bandStd = asp_string_utils.getNumberAfterEqualSign(textOutput, bandStdStart) # Add results to the output",
"Administration. All # rights reserved. # # The NGT platform",
"outputPath): \"\"\"Makes an RGB copy of an RBGA image\"\"\" cmd",
"Add results to the output list bandStats.append( (bandMin, bandMax, bandMean,",
"exists if not os.path.exists(imagePath): raise Exception('Image file ' + imagePath",
"in compliance with the # License. You may obtain a",
"if bandLoc < 0: return bandStats # Quit if we",
"bandStats = [] band = 0 while (True): # Loop",
"'gdal_translate ' + inputPath + ' ' + outputPath +",
"= asp_string_utils.getNumberAfterEqualSign(textOutput, bandMaxStart) bandMean = asp_string_utils.getNumberAfterEqualSign(textOutput, bandMeanStart) bandMin = asp_string_utils.getNumberAfterEqualSign(textOutput,",
"of the National Aeronautics and Space Administration. All # rights",
"for this band bandMaxStart = textOutput.find('STATISTICS_MAXIMUM=', bandLoc) bandMeanStart = textOutput.find('STATISTICS_MEAN=',",
"ISIS file, False otherwise.\"\"\" # Currently we treat all files"
] |
[
"argparse DELIMITER = \"\\t\" def merge(genotypes_filename, gq_filename, merged_filename): with open(genotypes_filename,",
"x[i], y[i]]) + \"\\n\") if __name__ == '__main__': parser =",
"for genotypes_line, gq_line in zip(genotypes, gq): x = genotypes_line.rstrip().split(DELIMITER) y",
"\" f\"expected the following lines to match.\\n{x[0:4]}\\n{y[0:4]}\") h = DELIMITER.join(x[0:4])",
"lines in the files are in the correct order. if",
"__name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ')",
"as merged: # Integrity check: do the files have same",
"== '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ') parser.add_argument('fout')",
"y[0:4]: raise ValueError(f\"The lines in the files are not in",
"genotypes, open(gq_filename, \"r\") as gq, open(merged_filename, \"w\") as merged: #",
"same columns? genotypes_header = genotypes.readline().rstrip().split(DELIMITER) gq_header = gq.readline().rstrip().split(DELIMITER) if not",
"n_cols = len(gq_header) for genotypes_line, gq_line in zip(genotypes, gq): x",
"DELIMITER = \"\\t\" def merge(genotypes_filename, gq_filename, merged_filename): with open(genotypes_filename, \"r\")",
"to match.\\n{x[0:4]}\\n{y[0:4]}\") h = DELIMITER.join(x[0:4]) for i in range(4, n_cols):",
"files do not have same number/order of columns\") n_cols =",
"the same order; \" f\"expected the following lines to match.\\n{x[0:4]}\\n{y[0:4]}\")",
"gq_line in zip(genotypes, gq): x = genotypes_line.rstrip().split(DELIMITER) y = gq_line.rstrip().split(DELIMITER)",
"len(gq_header) for genotypes_line, gq_line in zip(genotypes, gq): x = genotypes_line.rstrip().split(DELIMITER)",
"argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ') parser.add_argument('fout') args = parser.parse_args() merge(args.genotypes,",
"= len(gq_header) for genotypes_line, gq_line in zip(genotypes, gq): x =",
"DELIMITER.join(x[0:4]) for i in range(4, n_cols): merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]])",
"import argparse DELIMITER = \"\\t\" def merge(genotypes_filename, gq_filename, merged_filename): with",
"if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes')",
"not genotypes_header == gq_header: raise ValueError(\"The files do not have",
"== gq_header: raise ValueError(\"The files do not have same number/order",
"#!/usr/bin/env python import argparse DELIMITER = \"\\t\" def merge(genotypes_filename, gq_filename,",
"in zip(genotypes, gq): x = genotypes_line.rstrip().split(DELIMITER) y = gq_line.rstrip().split(DELIMITER) #",
"\"r\") as gq, open(merged_filename, \"w\") as merged: # Integrity check:",
"= genotypes_line.rstrip().split(DELIMITER) y = gq_line.rstrip().split(DELIMITER) # Check if lines in",
"in the correct order. if not x[0:4] == y[0:4]: raise",
"raise ValueError(f\"The lines in the files are not in the",
"columns\") n_cols = len(gq_header) for genotypes_line, gq_line in zip(genotypes, gq):",
"open(gq_filename, \"r\") as gq, open(merged_filename, \"w\") as merged: # Integrity",
"gq_filename, merged_filename): with open(genotypes_filename, \"r\") as genotypes, open(gq_filename, \"r\") as",
"# Check if lines in the files are in the",
"are not in the same order; \" f\"expected the following",
"correct order. if not x[0:4] == y[0:4]: raise ValueError(f\"The lines",
"genotypes.readline().rstrip().split(DELIMITER) gq_header = gq.readline().rstrip().split(DELIMITER) if not genotypes_header == gq_header: raise",
"gq): x = genotypes_line.rstrip().split(DELIMITER) y = gq_line.rstrip().split(DELIMITER) # Check if",
"lines to match.\\n{x[0:4]}\\n{y[0:4]}\") h = DELIMITER.join(x[0:4]) for i in range(4,",
"n_cols): merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + \"\\n\") if __name__ ==",
"= DELIMITER.join(x[0:4]) for i in range(4, n_cols): merged.write(DELIMITER.join([h, gq_header[i], x[i],",
"the files are in the correct order. if not x[0:4]",
"lines in the files are not in the same order;",
"columns? genotypes_header = genotypes.readline().rstrip().split(DELIMITER) gq_header = gq.readline().rstrip().split(DELIMITER) if not genotypes_header",
"merged: # Integrity check: do the files have same columns?",
"check: do the files have same columns? genotypes_header = genotypes.readline().rstrip().split(DELIMITER)",
"following lines to match.\\n{x[0:4]}\\n{y[0:4]}\") h = DELIMITER.join(x[0:4]) for i in",
"formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ') parser.add_argument('fout') args = parser.parse_args() merge(args.genotypes, args.GQ, args.fout)",
"for i in range(4, n_cols): merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) +",
"in the same order; \" f\"expected the following lines to",
"gq, open(merged_filename, \"w\") as merged: # Integrity check: do the",
"description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ') parser.add_argument('fout') args = parser.parse_args() merge(args.genotypes, args.GQ,",
"= \"\\t\" def merge(genotypes_filename, gq_filename, merged_filename): with open(genotypes_filename, \"r\") as",
"+ \"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__,",
"do not have same number/order of columns\") n_cols = len(gq_header)",
"parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ') parser.add_argument('fout') args =",
"are in the correct order. if not x[0:4] == y[0:4]:",
"Check if lines in the files are in the correct",
"genotypes_line.rstrip().split(DELIMITER) y = gq_line.rstrip().split(DELIMITER) # Check if lines in the",
"not in the same order; \" f\"expected the following lines",
"gq_header[i], x[i], y[i]]) + \"\\n\") if __name__ == '__main__': parser",
"gq_header = gq.readline().rstrip().split(DELIMITER) if not genotypes_header == gq_header: raise ValueError(\"The",
"= argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ') parser.add_argument('fout') args = parser.parse_args()",
"if lines in the files are in the correct order.",
"== y[0:4]: raise ValueError(f\"The lines in the files are not",
"files are not in the same order; \" f\"expected the",
"not x[0:4] == y[0:4]: raise ValueError(f\"The lines in the files",
"gq.readline().rstrip().split(DELIMITER) if not genotypes_header == gq_header: raise ValueError(\"The files do",
"order; \" f\"expected the following lines to match.\\n{x[0:4]}\\n{y[0:4]}\") h =",
"merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + \"\\n\") if __name__ == '__main__':",
"as genotypes, open(gq_filename, \"r\") as gq, open(merged_filename, \"w\") as merged:",
"ValueError(\"The files do not have same number/order of columns\") n_cols",
"x = genotypes_line.rstrip().split(DELIMITER) y = gq_line.rstrip().split(DELIMITER) # Check if lines",
"i in range(4, n_cols): merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + \"\\n\")",
"as gq, open(merged_filename, \"w\") as merged: # Integrity check: do",
"gq_header: raise ValueError(\"The files do not have same number/order of",
"with open(genotypes_filename, \"r\") as genotypes, open(gq_filename, \"r\") as gq, open(merged_filename,",
"ValueError(f\"The lines in the files are not in the same",
"# Integrity check: do the files have same columns? genotypes_header",
"number/order of columns\") n_cols = len(gq_header) for genotypes_line, gq_line in",
"\"w\") as merged: # Integrity check: do the files have",
"of columns\") n_cols = len(gq_header) for genotypes_line, gq_line in zip(genotypes,",
"genotypes_line, gq_line in zip(genotypes, gq): x = genotypes_line.rstrip().split(DELIMITER) y =",
"'__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('genotypes') parser.add_argument('GQ') parser.add_argument('fout') args",
"files are in the correct order. if not x[0:4] ==",
"\"\\t\" def merge(genotypes_filename, gq_filename, merged_filename): with open(genotypes_filename, \"r\") as genotypes,",
"in the files are in the correct order. if not",
"do the files have same columns? genotypes_header = genotypes.readline().rstrip().split(DELIMITER) gq_header",
"order. if not x[0:4] == y[0:4]: raise ValueError(f\"The lines in",
"= gq_line.rstrip().split(DELIMITER) # Check if lines in the files are",
"h = DELIMITER.join(x[0:4]) for i in range(4, n_cols): merged.write(DELIMITER.join([h, gq_header[i],",
"the files have same columns? genotypes_header = genotypes.readline().rstrip().split(DELIMITER) gq_header =",
"the correct order. if not x[0:4] == y[0:4]: raise ValueError(f\"The",
"Integrity check: do the files have same columns? genotypes_header =",
"y[i]]) + \"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser(",
"raise ValueError(\"The files do not have same number/order of columns\")",
"= genotypes.readline().rstrip().split(DELIMITER) gq_header = gq.readline().rstrip().split(DELIMITER) if not genotypes_header == gq_header:",
"not have same number/order of columns\") n_cols = len(gq_header) for",
"genotypes_header == gq_header: raise ValueError(\"The files do not have same",
"if not genotypes_header == gq_header: raise ValueError(\"The files do not",
"the files are not in the same order; \" f\"expected",
"range(4, n_cols): merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + \"\\n\") if __name__",
"match.\\n{x[0:4]}\\n{y[0:4]}\") h = DELIMITER.join(x[0:4]) for i in range(4, n_cols): merged.write(DELIMITER.join([h,",
"open(merged_filename, \"w\") as merged: # Integrity check: do the files",
"same number/order of columns\") n_cols = len(gq_header) for genotypes_line, gq_line",
"merge(genotypes_filename, gq_filename, merged_filename): with open(genotypes_filename, \"r\") as genotypes, open(gq_filename, \"r\")",
"x[0:4] == y[0:4]: raise ValueError(f\"The lines in the files are",
"if not x[0:4] == y[0:4]: raise ValueError(f\"The lines in the",
"= gq.readline().rstrip().split(DELIMITER) if not genotypes_header == gq_header: raise ValueError(\"The files",
"python import argparse DELIMITER = \"\\t\" def merge(genotypes_filename, gq_filename, merged_filename):",
"\"\\n\") if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)",
"in range(4, n_cols): merged.write(DELIMITER.join([h, gq_header[i], x[i], y[i]]) + \"\\n\") if",
"gq_line.rstrip().split(DELIMITER) # Check if lines in the files are in",
"open(genotypes_filename, \"r\") as genotypes, open(gq_filename, \"r\") as gq, open(merged_filename, \"w\")",
"same order; \" f\"expected the following lines to match.\\n{x[0:4]}\\n{y[0:4]}\") h",
"files have same columns? genotypes_header = genotypes.readline().rstrip().split(DELIMITER) gq_header = gq.readline().rstrip().split(DELIMITER)",
"have same number/order of columns\") n_cols = len(gq_header) for genotypes_line,",
"\"r\") as genotypes, open(gq_filename, \"r\") as gq, open(merged_filename, \"w\") as",
"merged_filename): with open(genotypes_filename, \"r\") as genotypes, open(gq_filename, \"r\") as gq,",
"def merge(genotypes_filename, gq_filename, merged_filename): with open(genotypes_filename, \"r\") as genotypes, open(gq_filename,",
"y = gq_line.rstrip().split(DELIMITER) # Check if lines in the files",
"in the files are not in the same order; \"",
"zip(genotypes, gq): x = genotypes_line.rstrip().split(DELIMITER) y = gq_line.rstrip().split(DELIMITER) # Check",
"f\"expected the following lines to match.\\n{x[0:4]}\\n{y[0:4]}\") h = DELIMITER.join(x[0:4]) for",
"have same columns? genotypes_header = genotypes.readline().rstrip().split(DELIMITER) gq_header = gq.readline().rstrip().split(DELIMITER) if",
"genotypes_header = genotypes.readline().rstrip().split(DELIMITER) gq_header = gq.readline().rstrip().split(DELIMITER) if not genotypes_header ==",
"the following lines to match.\\n{x[0:4]}\\n{y[0:4]}\") h = DELIMITER.join(x[0:4]) for i"
] |
[] |
[
"= \"\" count = 0 for slist in ttp: if",
"') j += 1 mydb = my_client['TwoRolless'] mycol = mydb['sns']",
"> 0): pic.append(media[0]['media_url']) else: pic.append(\"\") j = 0 while j",
"\"은하철도의\") crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\") print(\"cycle end\") print(\"sleep 30 seconds\") time.sleep(30) print(\"sleep",
"0 for tweet in tweets: flag = tweet.full_text.find(findtag) if flag",
"\"엘리펀트송\") crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\", \"난쟁이들\") crawllTwit(\"@page1company\", \"곤투모로우\") crawllTwit(\"@HONGcompany\", \"더모먼트\") crawllTwit(\"@orchardmusical\",",
"= tweet.entities.get('media', []) if (len(media) > 0): pic.append(media[0]['media_url']) else: pic.append(\"\")",
"= tweet.full_text.split(\"https://\") gong = \"\" count = 0 for slist",
"break x = mycol.insert_one( { \"tag\": findtag, \"time\": snsTime[k], \"text\":",
"snsList[k], \"img\": pic[k], \"url\": url[k] } ) conn_str = \"\"",
"in range(0, len(snsList)): if k == 15: break x =",
"if __name__ == '__main__': while True: print(\"cycles start\") mydb =",
"0 for slist in ttp: if count == (len(ttp) -",
"while True: print(\"cycles start\") mydb = my_client['TwoRolless'] mycol = mydb['sns']",
"url.append(tmp) i += 1 media = tweet.entities.get('media', []) if (len(media)",
"range(0, len(snsList)): if k == 15: break x = mycol.insert_one(",
"tweepy.API(auth) def crawllTwit(snsname, findtag): account = snsname tweets = api.user_timeline(screen_name=account,",
"'>') snsList[j] = snsList[j].replace('▶️', ' ⇒ ') j += 1",
"len(snsList): if j == 10: break snsList[j] = snsList[j].replace('<', '<')",
"if j == 10: break snsList[j] = snsList[j].replace('<', '<') snsList[j]",
"== '__main__': while True: print(\"cycles start\") mydb = my_client['TwoRolless'] mycol",
"= OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret) api = tweepy.API(auth) def crawllTwit(snsname,",
"= \"\" twitter_consumer_secret = \"\" twitter_access_token = \"\" twitter_access_secret =",
"= mycol.insert_one( { \"tag\": findtag, \"time\": snsTime[k], \"text\": snsList[k], \"img\":",
"findtag, \"time\": snsTime[k], \"text\": snsList[k], \"img\": pic[k], \"url\": url[k] }",
"k == 15: break x = mycol.insert_one( { \"tag\": findtag,",
"tweet.full_text.find(findtag) if flag >= 0: ttp = tweet.full_text.split(\"https://\") gong =",
"tweet_mode='extended') snsList = [] snsTime = [] url = []",
"count=100, include_rts=False, exclude_replies=True, tweet_mode='extended') snsList = [] snsTime = []",
"snsTime.append(tweet.created_at) tmp = f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp) i += 1 media =",
"crawllTwit(\"@m_thelastman\", \"더라스트맨\") crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\") crawllTwit(\"@rndworks\", \"더데빌\") crawllTwit(\"@ninestory9\", \"엘리펀트송\") crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\")",
"ttp = tweet.full_text.split(\"https://\") gong = \"\" count = 0 for",
"twitter_access_secret) api = tweepy.API(auth) def crawllTwit(snsname, findtag): account = snsname",
"= tweet.full_text.find(findtag) if flag >= 0: ttp = tweet.full_text.split(\"https://\") gong",
"[] url = [] pic = [] i = 0",
"= 0 while j < len(snsList): if j == 10:",
"\"\" auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret) api = tweepy.API(auth)",
"== 15: break x = mycol.insert_one( { \"tag\": findtag, \"time\":",
"pymongo import MongoClient from pymongo.cursor import CursorType twitter_consumer_key = \"\"",
"\"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\", \"난쟁이들\") crawllTwit(\"@page1company\", \"곤투모로우\") crawllTwit(\"@HONGcompany\", \"더모먼트\") crawllTwit(\"@orchardmusical\", \"칠칠\") crawllTwit(\"@livecorp2011\",",
"import CursorType twitter_consumer_key = \"\" twitter_consumer_secret = \"\" twitter_access_token =",
"break snsList[j] = snsList[j].replace('<', '<') snsList[j] = snsList[j].replace('>', '>') snsList[j]",
"⇒ ') j += 1 mydb = my_client['TwoRolless'] mycol =",
"f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp) i += 1 media = tweet.entities.get('media', []) if",
"- 1): break gong = gong + slist count +=",
"snsList = [] snsTime = [] url = [] pic",
"== (len(ttp) - 1): break gong = gong + slist",
"\"time\": snsTime[k], \"text\": snsList[k], \"img\": pic[k], \"url\": url[k] } )",
"+ slist count += 1 snsList.append(gong) snsTime.append(tweet.created_at) tmp = f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\"",
"slist count += 1 snsList.append(gong) snsTime.append(tweet.created_at) tmp = f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp)",
"True: print(\"cycles start\") mydb = my_client['TwoRolless'] mycol = mydb['sns'] mycol.remove({})",
"\"젠틀맨스가이드\") crawllTwit(\"@od_musical\", \"지킬앤하이드\") crawllTwit(\"@kontentz\", \"엔딩노트\") crawllTwit(\"@i_seensee\", \"빌리\") crawllTwit(\"@doublek_ent\", \"은하철도의\") crawllTwit(\"@Insight_Since96\",",
"} ) conn_str = \"\" my_client = pymongo.MongoClient(conn_str) if __name__",
"url[k] } ) conn_str = \"\" my_client = pymongo.MongoClient(conn_str) if",
"crawllTwit(\"@od_musical\", \"지킬앤하이드\") crawllTwit(\"@kontentz\", \"엔딩노트\") crawllTwit(\"@i_seensee\", \"빌리\") crawllTwit(\"@doublek_ent\", \"은하철도의\") crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\")",
"j += 1 mydb = my_client['TwoRolless'] mycol = mydb['sns'] for",
"\"\" count = 0 for slist in ttp: if count",
"tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended') snsList = []",
"= \"\" twitter_access_secret = \"\" auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token,",
"15: break x = mycol.insert_one( { \"tag\": findtag, \"time\": snsTime[k],",
"i += 1 media = tweet.entities.get('media', []) if (len(media) >",
"= gong + slist count += 1 snsList.append(gong) snsTime.append(tweet.created_at) tmp",
"crawllTwit(\"@kontentz\", \"엔딩노트\") crawllTwit(\"@i_seensee\", \"빌리\") crawllTwit(\"@doublek_ent\", \"은하철도의\") crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\") print(\"cycle end\")",
"crawllTwit(\"@shownote\", \"젠틀맨스가이드\") crawllTwit(\"@od_musical\", \"지킬앤하이드\") crawllTwit(\"@kontentz\", \"엔딩노트\") crawllTwit(\"@i_seensee\", \"빌리\") crawllTwit(\"@doublek_ent\", \"은하철도의\")",
"mydb['sns'] mycol.remove({}) crawllTwit(\"@m_thelastman\", \"더라스트맨\") crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\") crawllTwit(\"@rndworks\", \"더데빌\") crawllTwit(\"@ninestory9\", \"엘리펀트송\")",
"tweet.full_text.split(\"https://\") gong = \"\" count = 0 for slist in",
"tweepy import traceback import time import pymongo from tweepy import",
"= [] url = [] pic = [] i =",
"\"지킬앤하이드\") crawllTwit(\"@kontentz\", \"엔딩노트\") crawllTwit(\"@i_seensee\", \"빌리\") crawllTwit(\"@doublek_ent\", \"은하철도의\") crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\") print(\"cycle",
"pic.append(\"\") j = 0 while j < len(snsList): if j",
"0: ttp = tweet.full_text.split(\"https://\") gong = \"\" count = 0",
"j = 0 while j < len(snsList): if j ==",
"[] pic = [] i = 0 for tweet in",
"tweepy import OAuthHandler from pymongo import MongoClient from pymongo.cursor import",
"j < len(snsList): if j == 10: break snsList[j] =",
"auth.set_access_token(twitter_access_token, twitter_access_secret) api = tweepy.API(auth) def crawllTwit(snsname, findtag): account =",
"= my_client['TwoRolless'] mycol = mydb['sns'] mycol.remove({}) crawllTwit(\"@m_thelastman\", \"더라스트맨\") crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\")",
"crawllTwit(\"@doublek_ent\", \"은하철도의\") crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\") print(\"cycle end\") print(\"sleep 30 seconds\") time.sleep(30)",
"mydb['sns'] for k in range(0, len(snsList)): if k == 15:",
"snsList[j] = snsList[j].replace('>', '>') snsList[j] = snsList[j].replace('▶️', ' ⇒ ')",
"my_client['TwoRolless'] mycol = mydb['sns'] mycol.remove({}) crawllTwit(\"@m_thelastman\", \"더라스트맨\") crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\") crawllTwit(\"@rndworks\",",
"snsList[j] = snsList[j].replace('▶️', ' ⇒ ') j += 1 mydb",
"snsname tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended') snsList =",
"count = 0 for slist in ttp: if count ==",
"= f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp) i += 1 media = tweet.entities.get('media', [])",
"+= 1 media = tweet.entities.get('media', []) if (len(media) > 0):",
"\"빌리\") crawllTwit(\"@doublek_ent\", \"은하철도의\") crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\") print(\"cycle end\") print(\"sleep 30 seconds\")",
"in ttp: if count == (len(ttp) - 1): break gong",
"len(snsList)): if k == 15: break x = mycol.insert_one( {",
"mycol.remove({}) crawllTwit(\"@m_thelastman\", \"더라스트맨\") crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\") crawllTwit(\"@rndworks\", \"더데빌\") crawllTwit(\"@ninestory9\", \"엘리펀트송\") crawllTwit(\"@companyrang\",",
"tmp = f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp) i += 1 media = tweet.entities.get('media',",
"print(\"cycles start\") mydb = my_client['TwoRolless'] mycol = mydb['sns'] mycol.remove({}) crawllTwit(\"@m_thelastman\",",
"mycol.insert_one( { \"tag\": findtag, \"time\": snsTime[k], \"text\": snsList[k], \"img\": pic[k],",
"\"tag\": findtag, \"time\": snsTime[k], \"text\": snsList[k], \"img\": pic[k], \"url\": url[k]",
"mydb = my_client['TwoRolless'] mycol = mydb['sns'] mycol.remove({}) crawllTwit(\"@m_thelastman\", \"더라스트맨\") crawllTwit(\"@Musical_NarGold\",",
"if count == (len(ttp) - 1): break gong = gong",
"mydb = my_client['TwoRolless'] mycol = mydb['sns'] for k in range(0,",
"k in range(0, len(snsList)): if k == 15: break x",
"OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret) api = tweepy.API(auth) def crawllTwit(snsname, findtag):",
"= mydb['sns'] mycol.remove({}) crawllTwit(\"@m_thelastman\", \"더라스트맨\") crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\") crawllTwit(\"@rndworks\", \"더데빌\") crawllTwit(\"@ninestory9\",",
") conn_str = \"\" my_client = pymongo.MongoClient(conn_str) if __name__ ==",
"exclude_replies=True, tweet_mode='extended') snsList = [] snsTime = [] url =",
"\"\" my_client = pymongo.MongoClient(conn_str) if __name__ == '__main__': while True:",
"pymongo from tweepy import OAuthHandler from pymongo import MongoClient from",
"crawllTwit(snsname, findtag): account = snsname tweets = api.user_timeline(screen_name=account, count=100, include_rts=False,",
"= snsList[j].replace('▶️', ' ⇒ ') j += 1 mydb =",
"\"더모먼트\") crawllTwit(\"@orchardmusical\", \"칠칠\") crawllTwit(\"@livecorp2011\", \"팬레터\") crawllTwit(\"@shownote\", \"젠틀맨스가이드\") crawllTwit(\"@od_musical\", \"지킬앤하이드\") crawllTwit(\"@kontentz\",",
"import time import pymongo from tweepy import OAuthHandler from pymongo",
"+= 1 snsList.append(gong) snsTime.append(tweet.created_at) tmp = f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp) i +=",
"tweet in tweets: flag = tweet.full_text.find(findtag) if flag >= 0:",
"tweets: flag = tweet.full_text.find(findtag) if flag >= 0: ttp =",
"10: break snsList[j] = snsList[j].replace('<', '<') snsList[j] = snsList[j].replace('>', '>')",
"flag >= 0: ttp = tweet.full_text.split(\"https://\") gong = \"\" count",
"slist in ttp: if count == (len(ttp) - 1): break",
"crawllTwit(\"@HONGcompany\", \"더모먼트\") crawllTwit(\"@orchardmusical\", \"칠칠\") crawllTwit(\"@livecorp2011\", \"팬레터\") crawllTwit(\"@shownote\", \"젠틀맨스가이드\") crawllTwit(\"@od_musical\", \"지킬앤하이드\")",
"= mydb['sns'] for k in range(0, len(snsList)): if k ==",
"if flag >= 0: ttp = tweet.full_text.split(\"https://\") gong = \"\"",
"crawllTwit(\"@i_seensee\", \"빌리\") crawllTwit(\"@doublek_ent\", \"은하철도의\") crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\") print(\"cycle end\") print(\"sleep 30",
"from pymongo.cursor import CursorType twitter_consumer_key = \"\" twitter_consumer_secret = \"\"",
"OAuthHandler from pymongo import MongoClient from pymongo.cursor import CursorType twitter_consumer_key",
">= 0: ttp = tweet.full_text.split(\"https://\") gong = \"\" count =",
"= my_client['TwoRolless'] mycol = mydb['sns'] for k in range(0, len(snsList)):",
"twitter_consumer_key = \"\" twitter_consumer_secret = \"\" twitter_access_token = \"\" twitter_access_secret",
"api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended') snsList = [] snsTime =",
"= [] i = 0 for tweet in tweets: flag",
"0 while j < len(snsList): if j == 10: break",
"1 mydb = my_client['TwoRolless'] mycol = mydb['sns'] for k in",
"my_client['TwoRolless'] mycol = mydb['sns'] for k in range(0, len(snsList)): if",
"snsList[j].replace('<', '<') snsList[j] = snsList[j].replace('>', '>') snsList[j] = snsList[j].replace('▶️', '",
"\"url\": url[k] } ) conn_str = \"\" my_client = pymongo.MongoClient(conn_str)",
"crawllTwit(\"@page1company\", \"곤투모로우\") crawllTwit(\"@HONGcompany\", \"더모먼트\") crawllTwit(\"@orchardmusical\", \"칠칠\") crawllTwit(\"@livecorp2011\", \"팬레터\") crawllTwit(\"@shownote\", \"젠틀맨스가이드\")",
"snsList.append(gong) snsTime.append(tweet.created_at) tmp = f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp) i += 1 media",
"= \"\" twitter_access_token = \"\" twitter_access_secret = \"\" auth =",
"findtag): account = snsname tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True,",
"= [] pic = [] i = 0 for tweet",
"import MongoClient from pymongo.cursor import CursorType twitter_consumer_key = \"\" twitter_consumer_secret",
"media = tweet.entities.get('media', []) if (len(media) > 0): pic.append(media[0]['media_url']) else:",
"'<') snsList[j] = snsList[j].replace('>', '>') snsList[j] = snsList[j].replace('▶️', ' ⇒",
"\"나르치스와_골드문트\") crawllTwit(\"@rndworks\", \"더데빌\") crawllTwit(\"@ninestory9\", \"엘리펀트송\") crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\", \"난쟁이들\") crawllTwit(\"@page1company\",",
"url = [] pic = [] i = 0 for",
"pymongo.cursor import CursorType twitter_consumer_key = \"\" twitter_consumer_secret = \"\" twitter_access_token",
"\"칠칠\") crawllTwit(\"@livecorp2011\", \"팬레터\") crawllTwit(\"@shownote\", \"젠틀맨스가이드\") crawllTwit(\"@od_musical\", \"지킬앤하이드\") crawllTwit(\"@kontentz\", \"엔딩노트\") crawllTwit(\"@i_seensee\",",
"twitter_consumer_secret = \"\" twitter_access_token = \"\" twitter_access_secret = \"\" auth",
"\"엔딩노트\") crawllTwit(\"@i_seensee\", \"빌리\") crawllTwit(\"@doublek_ent\", \"은하철도의\") crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\") print(\"cycle end\") print(\"sleep",
"crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\") crawllTwit(\"@rndworks\", \"더데빌\") crawllTwit(\"@ninestory9\", \"엘리펀트송\") crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\", \"난쟁이들\")",
"for slist in ttp: if count == (len(ttp) - 1):",
"= pymongo.MongoClient(conn_str) if __name__ == '__main__': while True: print(\"cycles start\")",
"\"난쟁이들\") crawllTwit(\"@page1company\", \"곤투모로우\") crawllTwit(\"@HONGcompany\", \"더모먼트\") crawllTwit(\"@orchardmusical\", \"칠칠\") crawllTwit(\"@livecorp2011\", \"팬레터\") crawllTwit(\"@shownote\",",
"if k == 15: break x = mycol.insert_one( { \"tag\":",
"conn_str = \"\" my_client = pymongo.MongoClient(conn_str) if __name__ == '__main__':",
"import tweepy import traceback import time import pymongo from tweepy",
"twitter_access_secret = \"\" auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret) api",
"account = snsname tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended')",
"= snsname tweets = api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended') snsList",
"tweet.entities.get('media', []) if (len(media) > 0): pic.append(media[0]['media_url']) else: pic.append(\"\") j",
"'__main__': while True: print(\"cycles start\") mydb = my_client['TwoRolless'] mycol =",
"auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret) api = tweepy.API(auth) def",
"== 10: break snsList[j] = snsList[j].replace('<', '<') snsList[j] = snsList[j].replace('>',",
"count == (len(ttp) - 1): break gong = gong +",
"crawllTwit(\"@Insight_Since96\", \"뱀파이어아더\") print(\"cycle end\") print(\"sleep 30 seconds\") time.sleep(30) print(\"sleep end\")",
"\"팬레터\") crawllTwit(\"@shownote\", \"젠틀맨스가이드\") crawllTwit(\"@od_musical\", \"지킬앤하이드\") crawllTwit(\"@kontentz\", \"엔딩노트\") crawllTwit(\"@i_seensee\", \"빌리\") crawllTwit(\"@doublek_ent\",",
"twitter_access_token = \"\" twitter_access_secret = \"\" auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret)",
"import pymongo from tweepy import OAuthHandler from pymongo import MongoClient",
"time import pymongo from tweepy import OAuthHandler from pymongo import",
"\"\" twitter_access_secret = \"\" auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret)",
"snsList[j].replace('>', '>') snsList[j] = snsList[j].replace('▶️', ' ⇒ ') j +=",
"traceback import time import pymongo from tweepy import OAuthHandler from",
"snsTime = [] url = [] pic = [] i",
"gong = \"\" count = 0 for slist in ttp:",
"1): break gong = gong + slist count += 1",
"crawllTwit(\"@orchardmusical\", \"칠칠\") crawllTwit(\"@livecorp2011\", \"팬레터\") crawllTwit(\"@shownote\", \"젠틀맨스가이드\") crawllTwit(\"@od_musical\", \"지킬앤하이드\") crawllTwit(\"@kontentz\", \"엔딩노트\")",
"[] i = 0 for tweet in tweets: flag =",
"(len(media) > 0): pic.append(media[0]['media_url']) else: pic.append(\"\") j = 0 while",
"my_client = pymongo.MongoClient(conn_str) if __name__ == '__main__': while True: print(\"cycles",
"snsTime[k], \"text\": snsList[k], \"img\": pic[k], \"url\": url[k] } ) conn_str",
"1 media = tweet.entities.get('media', []) if (len(media) > 0): pic.append(media[0]['media_url'])",
"\"곤투모로우\") crawllTwit(\"@HONGcompany\", \"더모먼트\") crawllTwit(\"@orchardmusical\", \"칠칠\") crawllTwit(\"@livecorp2011\", \"팬레터\") crawllTwit(\"@shownote\", \"젠틀맨스가이드\") crawllTwit(\"@od_musical\",",
"CursorType twitter_consumer_key = \"\" twitter_consumer_secret = \"\" twitter_access_token = \"\"",
"gong + slist count += 1 snsList.append(gong) snsTime.append(tweet.created_at) tmp =",
"j == 10: break snsList[j] = snsList[j].replace('<', '<') snsList[j] =",
"\"img\": pic[k], \"url\": url[k] } ) conn_str = \"\" my_client",
"crawllTwit(\"@ninestory9\", \"엘리펀트송\") crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\", \"난쟁이들\") crawllTwit(\"@page1company\", \"곤투모로우\") crawllTwit(\"@HONGcompany\", \"더모먼트\")",
"flag = tweet.full_text.find(findtag) if flag >= 0: ttp = tweet.full_text.split(\"https://\")",
"\"text\": snsList[k], \"img\": pic[k], \"url\": url[k] } ) conn_str =",
"import OAuthHandler from pymongo import MongoClient from pymongo.cursor import CursorType",
"break gong = gong + slist count += 1 snsList.append(gong)",
"snsList[j] = snsList[j].replace('<', '<') snsList[j] = snsList[j].replace('>', '>') snsList[j] =",
"ttp: if count == (len(ttp) - 1): break gong =",
"for k in range(0, len(snsList)): if k == 15: break",
"= \"\" my_client = pymongo.MongoClient(conn_str) if __name__ == '__main__': while",
"in tweets: flag = tweet.full_text.find(findtag) if flag >= 0: ttp",
"mycol = mydb['sns'] for k in range(0, len(snsList)): if k",
"= 0 for tweet in tweets: flag = tweet.full_text.find(findtag) if",
"mycol = mydb['sns'] mycol.remove({}) crawllTwit(\"@m_thelastman\", \"더라스트맨\") crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\") crawllTwit(\"@rndworks\", \"더데빌\")",
"crawllTwit(\"@rndworks\", \"더데빌\") crawllTwit(\"@ninestory9\", \"엘리펀트송\") crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\", \"난쟁이들\") crawllTwit(\"@page1company\", \"곤투모로우\")",
"gong = gong + slist count += 1 snsList.append(gong) snsTime.append(tweet.created_at)",
"= \"\" auth = OAuthHandler(twitter_consumer_key, twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret) api =",
"pic = [] i = 0 for tweet in tweets:",
"\"\" twitter_consumer_secret = \"\" twitter_access_token = \"\" twitter_access_secret = \"\"",
"x = mycol.insert_one( { \"tag\": findtag, \"time\": snsTime[k], \"text\": snsList[k],",
"\"더라스트맨\") crawllTwit(\"@Musical_NarGold\", \"나르치스와_골드문트\") crawllTwit(\"@rndworks\", \"더데빌\") crawllTwit(\"@ninestory9\", \"엘리펀트송\") crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\",",
"\"\" twitter_access_token = \"\" twitter_access_secret = \"\" auth = OAuthHandler(twitter_consumer_key,",
"= snsList[j].replace('<', '<') snsList[j] = snsList[j].replace('>', '>') snsList[j] = snsList[j].replace('▶️',",
"pic[k], \"url\": url[k] } ) conn_str = \"\" my_client =",
"(len(ttp) - 1): break gong = gong + slist count",
"start\") mydb = my_client['TwoRolless'] mycol = mydb['sns'] mycol.remove({}) crawllTwit(\"@m_thelastman\", \"더라스트맨\")",
"pymongo.MongoClient(conn_str) if __name__ == '__main__': while True: print(\"cycles start\") mydb",
"api = tweepy.API(auth) def crawllTwit(snsname, findtag): account = snsname tweets",
"i = 0 for tweet in tweets: flag = tweet.full_text.find(findtag)",
"{ \"tag\": findtag, \"time\": snsTime[k], \"text\": snsList[k], \"img\": pic[k], \"url\":",
"= [] snsTime = [] url = [] pic =",
"else: pic.append(\"\") j = 0 while j < len(snsList): if",
"def crawllTwit(snsname, findtag): account = snsname tweets = api.user_timeline(screen_name=account, count=100,",
"__name__ == '__main__': while True: print(\"cycles start\") mydb = my_client['TwoRolless']",
"< len(snsList): if j == 10: break snsList[j] = snsList[j].replace('<',",
"MongoClient from pymongo.cursor import CursorType twitter_consumer_key = \"\" twitter_consumer_secret =",
"count += 1 snsList.append(gong) snsTime.append(tweet.created_at) tmp = f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp) i",
"' ⇒ ') j += 1 mydb = my_client['TwoRolless'] mycol",
"crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\", \"난쟁이들\") crawllTwit(\"@page1company\", \"곤투모로우\") crawllTwit(\"@HONGcompany\", \"더모먼트\") crawllTwit(\"@orchardmusical\", \"칠칠\")",
"include_rts=False, exclude_replies=True, tweet_mode='extended') snsList = [] snsTime = [] url",
"import traceback import time import pymongo from tweepy import OAuthHandler",
"for tweet in tweets: flag = tweet.full_text.find(findtag) if flag >=",
"[]) if (len(media) > 0): pic.append(media[0]['media_url']) else: pic.append(\"\") j =",
"from pymongo import MongoClient from pymongo.cursor import CursorType twitter_consumer_key =",
"0): pic.append(media[0]['media_url']) else: pic.append(\"\") j = 0 while j <",
"= snsList[j].replace('>', '>') snsList[j] = snsList[j].replace('▶️', ' ⇒ ') j",
"\"더데빌\") crawllTwit(\"@ninestory9\", \"엘리펀트송\") crawllTwit(\"@companyrang\", \"쿠로이저택엔누가살고있을까\") crawllTwit(\"@companyrang\", \"난쟁이들\") crawllTwit(\"@page1company\", \"곤투모로우\") crawllTwit(\"@HONGcompany\",",
"while j < len(snsList): if j == 10: break snsList[j]",
"= 0 for slist in ttp: if count == (len(ttp)",
"= api.user_timeline(screen_name=account, count=100, include_rts=False, exclude_replies=True, tweet_mode='extended') snsList = [] snsTime",
"pic.append(media[0]['media_url']) else: pic.append(\"\") j = 0 while j < len(snsList):",
"[] snsTime = [] url = [] pic = []",
"= tweepy.API(auth) def crawllTwit(snsname, findtag): account = snsname tweets =",
"twitter_consumer_secret) auth.set_access_token(twitter_access_token, twitter_access_secret) api = tweepy.API(auth) def crawllTwit(snsname, findtag): account",
"1 snsList.append(gong) snsTime.append(tweet.created_at) tmp = f\"https://twitter.com/{tweet.user.screen_name}/status/{tweet.id}\" url.append(tmp) i += 1",
"crawllTwit(\"@companyrang\", \"난쟁이들\") crawllTwit(\"@page1company\", \"곤투모로우\") crawllTwit(\"@HONGcompany\", \"더모먼트\") crawllTwit(\"@orchardmusical\", \"칠칠\") crawllTwit(\"@livecorp2011\", \"팬레터\")",
"if (len(media) > 0): pic.append(media[0]['media_url']) else: pic.append(\"\") j = 0",
"from tweepy import OAuthHandler from pymongo import MongoClient from pymongo.cursor",
"+= 1 mydb = my_client['TwoRolless'] mycol = mydb['sns'] for k",
"crawllTwit(\"@livecorp2011\", \"팬레터\") crawllTwit(\"@shownote\", \"젠틀맨스가이드\") crawllTwit(\"@od_musical\", \"지킬앤하이드\") crawllTwit(\"@kontentz\", \"엔딩노트\") crawllTwit(\"@i_seensee\", \"빌리\")",
"snsList[j].replace('▶️', ' ⇒ ') j += 1 mydb = my_client['TwoRolless']"
] |
[
"private key associated with your service account in Privacy Enhanced",
"# The private key associated with your service account in",
"your Google contact. # Set up a service account as",
"the RSA format # (.p12 suffix) to .pem, run the",
"as described in the README. EE_ACCOUNT = '<EMAIL>' # The",
"a service account as described in the README. EE_ACCOUNT =",
"email address authorized by your Google contact. # Set up",
"# Email format (.pem suffix). To convert a private key",
"up a service account as described in the README. EE_ACCOUNT",
"ee # The service account email address authorized by your",
"your service account in Privacy Enhanced # Email format (.pem",
"run the openssl command like this: # openssl pkcs12 -in",
"# (.p12 suffix) to .pem, run the openssl command like",
"in Privacy Enhanced # Email format (.pem suffix). To convert",
"-in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem EE_PRIVATE_KEY_FILE = 'privatekey.pem' EE_CREDENTIALS",
"downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem EE_PRIVATE_KEY_FILE = 'privatekey.pem' EE_CREDENTIALS =",
"in the README. EE_ACCOUNT = '<EMAIL>' # The private key",
"the openssl command like this: # openssl pkcs12 -in downloaded-privatekey.p12",
"(.pem suffix). To convert a private key from the RSA",
"from the RSA format # (.p12 suffix) to .pem, run",
"service account email address authorized by your Google contact. #",
"openssl command like this: # openssl pkcs12 -in downloaded-privatekey.p12 -nodes",
"openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem EE_PRIVATE_KEY_FILE =",
"README. EE_ACCOUNT = '<EMAIL>' # The private key associated with",
"private key from the RSA format # (.p12 suffix) to",
"#!/usr/bin/env python \"\"\"Handles Earth Engine service account configuration.\"\"\" import ee",
"convert a private key from the RSA format # (.p12",
"python \"\"\"Handles Earth Engine service account configuration.\"\"\" import ee #",
"like this: # openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts >",
"address authorized by your Google contact. # Set up a",
"# Set up a service account as described in the",
"suffix) to .pem, run the openssl command like this: #",
"The private key associated with your service account in Privacy",
"= '<EMAIL>' # The private key associated with your service",
"Set up a service account as described in the README.",
"account email address authorized by your Google contact. # Set",
"associated with your service account in Privacy Enhanced # Email",
"suffix). To convert a private key from the RSA format",
"service account configuration.\"\"\" import ee # The service account email",
"To convert a private key from the RSA format #",
"pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem EE_PRIVATE_KEY_FILE = 'privatekey.pem'",
"format (.pem suffix). To convert a private key from the",
"\"\"\"Handles Earth Engine service account configuration.\"\"\" import ee # The",
"-nocerts > privatekey.pem EE_PRIVATE_KEY_FILE = 'privatekey.pem' EE_CREDENTIALS = ee.ServiceAccountCredentials(EE_ACCOUNT, EE_PRIVATE_KEY_FILE)",
"EE_ACCOUNT = '<EMAIL>' # The private key associated with your",
"with your service account in Privacy Enhanced # Email format",
"key from the RSA format # (.p12 suffix) to .pem,",
"-nodes -nocerts > privatekey.pem EE_PRIVATE_KEY_FILE = 'privatekey.pem' EE_CREDENTIALS = ee.ServiceAccountCredentials(EE_ACCOUNT,",
"account configuration.\"\"\" import ee # The service account email address",
"configuration.\"\"\" import ee # The service account email address authorized",
"by your Google contact. # Set up a service account",
"service account as described in the README. EE_ACCOUNT = '<EMAIL>'",
"service account in Privacy Enhanced # Email format (.pem suffix).",
"# The service account email address authorized by your Google",
"a private key from the RSA format # (.p12 suffix)",
"to .pem, run the openssl command like this: # openssl",
".pem, run the openssl command like this: # openssl pkcs12",
"Engine service account configuration.\"\"\" import ee # The service account",
"account as described in the README. EE_ACCOUNT = '<EMAIL>' #",
"Enhanced # Email format (.pem suffix). To convert a private",
"authorized by your Google contact. # Set up a service",
"'<EMAIL>' # The private key associated with your service account",
"import ee # The service account email address authorized by",
"the README. EE_ACCOUNT = '<EMAIL>' # The private key associated",
"RSA format # (.p12 suffix) to .pem, run the openssl",
"key associated with your service account in Privacy Enhanced #",
"described in the README. EE_ACCOUNT = '<EMAIL>' # The private",
"The service account email address authorized by your Google contact.",
"Earth Engine service account configuration.\"\"\" import ee # The service",
"this: # openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem",
"Google contact. # Set up a service account as described",
"format # (.p12 suffix) to .pem, run the openssl command",
"command like this: # openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts",
"contact. # Set up a service account as described in",
"# openssl pkcs12 -in downloaded-privatekey.p12 -nodes -nocerts > privatekey.pem EE_PRIVATE_KEY_FILE",
"(.p12 suffix) to .pem, run the openssl command like this:",
"Privacy Enhanced # Email format (.pem suffix). To convert a",
"Email format (.pem suffix). To convert a private key from",
"account in Privacy Enhanced # Email format (.pem suffix). To"
] |
[
"Infinity print(\"Infinity\") print(\"--------\") print(1e500) print(-1e500) print(\"\") # Conversions print(\"Conversions between",
"# Simple arithmetic print(\"Addition and Subtraction\") print(1 + 2) print(48",
"print(\"\") # Python has a real number type called float",
"expressions in Python \"\"\" # Unary + and - print(\"Unary",
"2) + 8)) print((-3) ** 2) \"\"\" Demonstration of the",
"print(5e32) print(999999999999999999999999999999999999999.9) print(\"\") # Infinity print(\"Infinity\") print(\"--------\") print(1e500) print(-1e500) print(\"\")",
"print(\"---------\") print(4.56372883832331773) print(1.23456789012345678) print(\"\") # Scientific/exponential notation print(\"Scientific notation\") print(\"-------------------\")",
"+ 2) print(48 - 89) print(3.45 + 2.7) print(87.3384 -",
"precedence\") print(7 + 3 * 5) print(5.5 * 6 //",
"5) print(5.5 * ((6 // 2) + 8)) print((-3) **",
"print(\"Addition and Subtraction\") print(1 + 2) print(48 - 89) print(3.45",
"/ 14.3) print(8 // 2) print(3 // 2) print(7.538 //",
"9) print(\"fahrenheit:\", float(temperature)) # You can assign a different value",
"print(3.45 + 2.7) print(87.3384 - 12.35) print(3 + 6.7) print(9.8",
"+ 2.7) print(87.3384 - 12.35) print(3 + 6.7) print(9.8 -",
"26 print(\"new value:\", temperature) # Multiple variables can be used",
"of numbers in Python \"\"\" # Python has an integer",
"can include multiple operations print(\"Compound expressions\") print(3 + 5 +",
"# The = operator can be used to assign values",
"+ 8)) print((-3) ** 2) \"\"\" Demonstration of the use",
"to them. \"\"\" # The = operator can be used",
"print(\"\") # Operator precedence defines how expressions are evaluated print(\"Operator",
"# Scientific/exponential notation print(\"Scientific notation\") print(\"-------------------\") print(5e32) print(999999999999999999999999999999999999999.9) print(\"\") #",
"used in arbitrary expressions offset = 32 multiplier = 5.0",
"int print(\"int\") print(\"---\") print(0) print(1) print(-3) print(70383028364830) print(\"\") # Python",
"2 + 8) print(-3 ** 2) print(\"\") # Use parentheses",
"change evaluation order print(\"Grouping with parentheses\") print((7 + 3) *",
"Subtraction\") print(1 + 2) print(48 - 89) print(3.45 + 2.7)",
"print(\"fahrenheit:\", float(temperature)) # You can assign a different value to",
"** 2) \"\"\" Demonstration of the use of variables and",
"print(-3348.63) print(\"\") # Simple arithmetic print(\"Addition and Subtraction\") print(1 +",
"arithmetic print(\"Addition and Subtraction\") print(1 + 2) print(48 - 89)",
"print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7)) print(int(-3.7)) \"\"\" Demonstration of simple arithmetic",
"can be used in arbitrary expressions offset = 32 multiplier",
"different value to an existing variable temperature = 26 print(\"new",
"compound arithmetic expressions in Python \"\"\" # Expressions can include",
"same precedence are evaluated from left to right print(18 -",
"\"\"\" Demonstration of compound arithmetic expressions in Python \"\"\" #",
"Demonstration of compound arithmetic expressions in Python \"\"\" # Expressions",
"14.3) print(8 // 2) print(3 // 2) print(7.538 // 14.3)",
"Operator precedence defines how expressions are evaluated print(\"Operator precedence\") print(7",
"# Limited precision print(\"Precision\") print(\"---------\") print(4.56372883832331773) print(1.23456789012345678) print(\"\") # Scientific/exponential",
"5.0 / 9.0 celsius = (temperature - offset) * multiplier",
"has a real number type called float print(\"float\") print(\"-----\") print(0.0)",
"* 5 / 9) print(\"fahrenheit:\", float(temperature)) # You can assign",
"integer type called int print(\"int\") print(\"---\") print(0) print(1) print(-3) print(70383028364830)",
"print(7.538 / 14.3) print(8 // 2) print(3 // 2) print(7.538",
"Demonstration of the use of variables and how to assign",
"// 2 + 8) print(-3 ** 2) print(\"\") # Use",
"in Python \"\"\" # Python has an integer type called",
"be used to assign values to variables bakers_dozen = 12",
"assign a different value to an existing variable temperature =",
"print(-3) print(70383028364830) print(\"\") # Python has a real number type",
"1 temperature = 93 # Variables can be used as",
"print(8 // 2) print(3 // 2) print(7.538 // 14.3) print(\"\")",
"called int print(\"int\") print(\"---\") print(0) print(1) print(-3) print(70383028364830) print(\"\") #",
"print(\"\") # Limited precision print(\"Precision\") print(\"---------\") print(4.56372883832331773) print(1.23456789012345678) print(\"\") #",
"* 5) print(5.5 * ((6 // 2) + 8)) print((-3)",
"to assign values to them. \"\"\" # The = operator",
"and in expressions print(temperature, bakers_dozen) print(\"celsius:\", (temperature - 32) *",
"# Conversions print(\"Conversions between numeric types\") print(\"---------------------------------\") print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0))",
"print(-5) print(+7.86) print(-3348.63) print(\"\") # Simple arithmetic print(\"Addition and Subtraction\")",
"6 + 4) print(\"\") # Operator precedence defines how expressions",
"and how to assign values to them. \"\"\" # The",
"= 5.0 / 9.0 celsius = (temperature - offset) *",
"multiplier = 5.0 / 9.0 celsius = (temperature - offset)",
"= 12 + 1 temperature = 93 # Variables can",
"print(32.6 ** 7) print(9 ** 0.5) \"\"\" Demonstration of compound",
"5) print(5.5 * 6 // 2 + 8) print(-3 **",
"has an integer type called int print(\"int\") print(\"---\") print(0) print(1)",
"# Python has a real number type called float print(\"float\")",
"print(1 + 2) print(48 - 89) print(3.45 + 2.7) print(87.3384",
"2.7) print(87.3384 - 12.35) print(3 + 6.7) print(9.8 - 4)",
"** 2) print(5 ** 4) print(32.6 ** 7) print(9 **",
"arithmetic expressions in Python \"\"\" # Expressions can include multiple",
"* ((6 // 2) + 8)) print((-3) ** 2) \"\"\"",
"arbitrary expressions offset = 32 multiplier = 5.0 / 9.0",
"+ 7 + 27) #Operator with same precedence are evaluated",
"- 32) * 5 / 9) print(\"fahrenheit:\", float(temperature)) # You",
"print(7 * 8.2) print(\"\") print(\"Division\") print(8 / 2) print(3 /",
"+ 3) * 5) print(5.5 * ((6 // 2) +",
"# Operator precedence defines how expressions are evaluated print(\"Operator precedence\")",
"((6 // 2) + 8)) print((-3) ** 2) \"\"\" Demonstration",
"print(0) print(1) print(-3) print(70383028364830) print(\"\") # Python has a real",
"14.3) print(\"\") print(\"Exponentiation\") print(3 ** 2) print(5 ** 4) print(32.6",
"5 + 7 + 27) #Operator with same precedence are",
"= 93 # Variables can be used as values and",
"real number type called float print(\"float\") print(\"-----\") print(0.0) print(7.35) print(-43.2)",
"27) #Operator with same precedence are evaluated from left to",
"precedence are evaluated from left to right print(18 - 6",
"print(7.8 * 27.54) print(7 * 8.2) print(\"\") print(\"Division\") print(8 /",
"print(\"\") print(\"Exponentiation\") print(3 ** 2) print(5 ** 4) print(32.6 **",
"7 + 27) #Operator with same precedence are evaluated from",
"* 2) print(7.8 * 27.54) print(7 * 8.2) print(\"\") print(\"Division\")",
"the use of variables and how to assign values to",
"- 12.35) print(3 + 6.7) print(9.8 - 4) print(\"\") print(\"Multiplication\")",
"print(9.8 - 4) print(\"\") print(\"Multiplication\") print(3 * 2) print(7.8 *",
"# Multiple variables can be used in arbitrary expressions offset",
"print((7 + 3) * 5) print(5.5 * ((6 // 2)",
"to right print(18 - 6 + 4) print(\"\") # Operator",
"bakers_dozen = 12 + 1 temperature = 93 # Variables",
"You can assign a different value to an existing variable",
"print((-3) ** 2) \"\"\" Demonstration of the use of variables",
"32) * 5 / 9) print(\"fahrenheit:\", float(temperature)) # You can",
"print(\"Conversions between numeric types\") print(\"---------------------------------\") print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7)) print(int(-3.7))",
"6 // 2 + 8) print(-3 ** 2) print(\"\") #",
"variables and how to assign values to them. \"\"\" #",
"- 4) print(\"\") print(\"Multiplication\") print(3 * 2) print(7.8 * 27.54)",
"/ 2) print(7.538 / 14.3) print(8 // 2) print(3 //",
"Multiple variables can be used in arbitrary expressions offset =",
"operations print(\"Compound expressions\") print(3 + 5 + 7 + 27)",
"\"\"\" Demonstration of the use of variables and how to",
"2) print(7.538 / 14.3) print(8 // 2) print(3 // 2)",
"print(\"\") # Use parentheses to change evaluation order print(\"Grouping with",
"print(18 - 6 + 4) print(\"\") # Operator precedence defines",
"print(3 // 2) print(7.538 // 14.3) print(\"\") print(\"Exponentiation\") print(3 **",
"can be used as values and in expressions print(temperature, bakers_dozen)",
"= 26 print(\"new value:\", temperature) # Multiple variables can be",
"left to right print(18 - 6 + 4) print(\"\") #",
"# Infinity print(\"Infinity\") print(\"--------\") print(1e500) print(-1e500) print(\"\") # Conversions print(\"Conversions",
"print(\"\") # Conversions print(\"Conversions between numeric types\") print(\"---------------------------------\") print(float(3)) print(float(99999999999999999999999999999999999999))",
"called float print(\"float\") print(\"-----\") print(0.0) print(7.35) print(-43.2) print(\"\") # Limited",
"of simple arithmetic expressions in Python \"\"\" # Unary +",
"print(\"Multiplication\") print(3 * 2) print(7.8 * 27.54) print(7 * 8.2)",
"print(+3) print(-5) print(+7.86) print(-3348.63) print(\"\") # Simple arithmetic print(\"Addition and",
"4) print(\"\") # Operator precedence defines how expressions are evaluated",
"4) print(32.6 ** 7) print(9 ** 0.5) \"\"\" Demonstration of",
"be used as values and in expressions print(temperature, bakers_dozen) print(\"celsius:\",",
"print(\"\") # Simple arithmetic print(\"Addition and Subtraction\") print(1 + 2)",
"- 89) print(3.45 + 2.7) print(87.3384 - 12.35) print(3 +",
"// 2) print(3 // 2) print(7.538 // 14.3) print(\"\") print(\"Exponentiation\")",
"be used in arbitrary expressions offset = 32 multiplier =",
"- print(\"Unary operators\") print(+3) print(-5) print(+7.86) print(-3348.63) print(\"\") # Simple",
"93 # Variables can be used as values and in",
"expressions are evaluated print(\"Operator precedence\") print(7 + 3 * 5)",
"and - print(\"Unary operators\") print(+3) print(-5) print(+7.86) print(-3348.63) print(\"\") #",
"with parentheses\") print((7 + 3) * 5) print(5.5 * ((6",
"print(5 ** 4) print(32.6 ** 7) print(9 ** 0.5) \"\"\"",
"simple arithmetic expressions in Python \"\"\" # Unary + and",
"Python \"\"\" # Unary + and - print(\"Unary operators\") print(+3)",
"to an existing variable temperature = 26 print(\"new value:\", temperature)",
"print(1) print(-3) print(70383028364830) print(\"\") # Python has a real number",
"in Python \"\"\" # Unary + and - print(\"Unary operators\")",
"value to an existing variable temperature = 26 print(\"new value:\",",
"assign values to them. \"\"\" # The = operator can",
"The = operator can be used to assign values to",
"2) print(5 ** 4) print(32.6 ** 7) print(9 ** 0.5)",
"* 5) print(5.5 * 6 // 2 + 8) print(-3",
"print(1e500) print(-1e500) print(\"\") # Conversions print(\"Conversions between numeric types\") print(\"---------------------------------\")",
"in Python \"\"\" # Expressions can include multiple operations print(\"Compound",
"+ 5 + 7 + 27) #Operator with same precedence",
"* 6 // 2 + 8) print(-3 ** 2) print(\"\")",
"celsius = (temperature - offset) * multiplier print(\"celsius value:\", celsius)",
"4) print(\"\") print(\"Multiplication\") print(3 * 2) print(7.8 * 27.54) print(7",
"print(\"Unary operators\") print(+3) print(-5) print(+7.86) print(-3348.63) print(\"\") # Simple arithmetic",
"are evaluated from left to right print(18 - 6 +",
"+ 1 temperature = 93 # Variables can be used",
"temperature) # Multiple variables can be used in arbitrary expressions",
"9.0 celsius = (temperature - offset) * multiplier print(\"celsius value:\",",
"print(8 / 2) print(3 / 2) print(7.538 / 14.3) print(8",
"** 7) print(9 ** 0.5) \"\"\" Demonstration of compound arithmetic",
"# Python has an integer type called int print(\"int\") print(\"---\")",
"89) print(3.45 + 2.7) print(87.3384 - 12.35) print(3 + 6.7)",
"/ 2) print(3 / 2) print(7.538 / 14.3) print(8 //",
"print(\"celsius:\", (temperature - 32) * 5 / 9) print(\"fahrenheit:\", float(temperature))",
"Demonstration of numbers in Python \"\"\" # Python has an",
"expressions\") print(3 + 5 + 7 + 27) #Operator with",
"precedence defines how expressions are evaluated print(\"Operator precedence\") print(7 +",
"print(\"Compound expressions\") print(3 + 5 + 7 + 27) #Operator",
"notation print(\"Scientific notation\") print(\"-------------------\") print(5e32) print(999999999999999999999999999999999999999.9) print(\"\") # Infinity print(\"Infinity\")",
"+ 8) print(-3 ** 2) print(\"\") # Use parentheses to",
"print(temperature, bakers_dozen) print(\"celsius:\", (temperature - 32) * 5 / 9)",
"an existing variable temperature = 26 print(\"new value:\", temperature) #",
"print(87.3384 - 12.35) print(3 + 6.7) print(9.8 - 4) print(\"\")",
"values to them. \"\"\" # The = operator can be",
"# Expressions can include multiple operations print(\"Compound expressions\") print(3 +",
"Demonstration of simple arithmetic expressions in Python \"\"\" # Unary",
"/ 9.0 celsius = (temperature - offset) * multiplier print(\"celsius",
"print(\"\") print(\"Multiplication\") print(3 * 2) print(7.8 * 27.54) print(7 *",
"print(\"Infinity\") print(\"--------\") print(1e500) print(-1e500) print(\"\") # Conversions print(\"Conversions between numeric",
"number type called float print(\"float\") print(\"-----\") print(0.0) print(7.35) print(-43.2) print(\"\")",
"of variables and how to assign values to them. \"\"\"",
"offset = 32 multiplier = 5.0 / 9.0 celsius =",
"# Variables can be used as values and in expressions",
"numeric types\") print(\"---------------------------------\") print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7)) print(int(-3.7)) \"\"\" Demonstration",
"print(int(3.0)) print(int(3.7)) print(int(-3.7)) \"\"\" Demonstration of simple arithmetic expressions in",
"Variables can be used as values and in expressions print(temperature,",
"+ 4) print(\"\") # Operator precedence defines how expressions are",
"Conversions print(\"Conversions between numeric types\") print(\"---------------------------------\") print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7))",
"print(7.35) print(-43.2) print(\"\") # Limited precision print(\"Precision\") print(\"---------\") print(4.56372883832331773) print(1.23456789012345678)",
"+ and - print(\"Unary operators\") print(+3) print(-5) print(+7.86) print(-3348.63) print(\"\")",
"arithmetic expressions in Python \"\"\" # Unary + and -",
"Scientific/exponential notation print(\"Scientific notation\") print(\"-------------------\") print(5e32) print(999999999999999999999999999999999999999.9) print(\"\") # Infinity",
"\"\"\" # The = operator can be used to assign",
"can assign a different value to an existing variable temperature",
"expressions in Python \"\"\" # Expressions can include multiple operations",
"Python has a real number type called float print(\"float\") print(\"-----\")",
"print(\"\") # Infinity print(\"Infinity\") print(\"--------\") print(1e500) print(-1e500) print(\"\") # Conversions",
"evaluated from left to right print(18 - 6 + 4)",
"\"\"\" Demonstration of numbers in Python \"\"\" # Python has",
"print(1.23456789012345678) print(\"\") # Scientific/exponential notation print(\"Scientific notation\") print(\"-------------------\") print(5e32) print(999999999999999999999999999999999999999.9)",
"print(\"\") # Scientific/exponential notation print(\"Scientific notation\") print(\"-------------------\") print(5e32) print(999999999999999999999999999999999999999.9) print(\"\")",
"Python \"\"\" # Expressions can include multiple operations print(\"Compound expressions\")",
"print(\"Exponentiation\") print(3 ** 2) print(5 ** 4) print(32.6 ** 7)",
"print(-1e500) print(\"\") # Conversions print(\"Conversions between numeric types\") print(\"---------------------------------\") print(float(3))",
"print(7 + 3 * 5) print(5.5 * 6 // 2",
"variables bakers_dozen = 12 + 1 temperature = 93 #",
"Unary + and - print(\"Unary operators\") print(+3) print(-5) print(+7.86) print(-3348.63)",
"32 multiplier = 5.0 / 9.0 celsius = (temperature -",
"assign values to variables bakers_dozen = 12 + 1 temperature",
"(temperature - 32) * 5 / 9) print(\"fahrenheit:\", float(temperature)) #",
"are evaluated print(\"Operator precedence\") print(7 + 3 * 5) print(5.5",
"can be used to assign values to variables bakers_dozen =",
"8.2) print(\"\") print(\"Division\") print(8 / 2) print(3 / 2) print(7.538",
"value:\", temperature) # Multiple variables can be used in arbitrary",
"temperature = 26 print(\"new value:\", temperature) # Multiple variables can",
"float print(\"float\") print(\"-----\") print(0.0) print(7.35) print(-43.2) print(\"\") # Limited precision",
"\"\"\" Demonstration of simple arithmetic expressions in Python \"\"\" #",
"print(\"--------\") print(1e500) print(-1e500) print(\"\") # Conversions print(\"Conversions between numeric types\")",
"print(0.0) print(7.35) print(-43.2) print(\"\") # Limited precision print(\"Precision\") print(\"---------\") print(4.56372883832331773)",
"// 14.3) print(\"\") print(\"Exponentiation\") print(3 ** 2) print(5 ** 4)",
"2) print(3 // 2) print(7.538 // 14.3) print(\"\") print(\"Exponentiation\") print(3",
"values to variables bakers_dozen = 12 + 1 temperature =",
"print(int(-3.7)) \"\"\" Demonstration of simple arithmetic expressions in Python \"\"\"",
"print(5.5 * ((6 // 2) + 8)) print((-3) ** 2)",
"print(\"float\") print(\"-----\") print(0.0) print(7.35) print(-43.2) print(\"\") # Limited precision print(\"Precision\")",
"between numeric types\") print(\"---------------------------------\") print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7)) print(int(-3.7)) \"\"\"",
"to assign values to variables bakers_dozen = 12 + 1",
"from left to right print(18 - 6 + 4) print(\"\")",
"precision print(\"Precision\") print(\"---------\") print(4.56372883832331773) print(1.23456789012345678) print(\"\") # Scientific/exponential notation print(\"Scientific",
"+ 3 * 5) print(5.5 * 6 // 2 +",
"print(3 ** 2) print(5 ** 4) print(32.6 ** 7) print(9",
"print(\"-------------------\") print(5e32) print(999999999999999999999999999999999999999.9) print(\"\") # Infinity print(\"Infinity\") print(\"--------\") print(1e500) print(-1e500)",
"in arbitrary expressions offset = 32 multiplier = 5.0 /",
"# You can assign a different value to an existing",
"// 2) + 8)) print((-3) ** 2) \"\"\" Demonstration of",
"float(temperature)) # You can assign a different value to an",
"Simple arithmetic print(\"Addition and Subtraction\") print(1 + 2) print(48 -",
"print(9 ** 0.5) \"\"\" Demonstration of compound arithmetic expressions in",
"of the use of variables and how to assign values",
"evaluation order print(\"Grouping with parentheses\") print((7 + 3) * 5)",
"2) print(48 - 89) print(3.45 + 2.7) print(87.3384 - 12.35)",
"used to assign values to variables bakers_dozen = 12 +",
"to variables bakers_dozen = 12 + 1 temperature = 93",
"** 2) print(\"\") # Use parentheses to change evaluation order",
"print(\"int\") print(\"---\") print(0) print(1) print(-3) print(70383028364830) print(\"\") # Python has",
"# Unary + and - print(\"Unary operators\") print(+3) print(-5) print(+7.86)",
"# Use parentheses to change evaluation order print(\"Grouping with parentheses\")",
"2) print(\"\") # Use parentheses to change evaluation order print(\"Grouping",
"/ 9) print(\"fahrenheit:\", float(temperature)) # You can assign a different",
"numbers in Python \"\"\" # Python has an integer type",
"= operator can be used to assign values to variables",
"8) print(-3 ** 2) print(\"\") # Use parentheses to change",
"\"\"\" # Python has an integer type called int print(\"int\")",
"2) print(3 / 2) print(7.538 / 14.3) print(8 // 2)",
"** 4) print(32.6 ** 7) print(9 ** 0.5) \"\"\" Demonstration",
"print(48 - 89) print(3.45 + 2.7) print(87.3384 - 12.35) print(3",
"#Operator with same precedence are evaluated from left to right",
"2) print(7.8 * 27.54) print(7 * 8.2) print(\"\") print(\"Division\") print(8",
"print(5.5 * 6 // 2 + 8) print(-3 ** 2)",
"and Subtraction\") print(1 + 2) print(48 - 89) print(3.45 +",
"existing variable temperature = 26 print(\"new value:\", temperature) # Multiple",
"print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7)) print(int(-3.7)) \"\"\" Demonstration of simple arithmetic expressions",
"parentheses to change evaluation order print(\"Grouping with parentheses\") print((7 +",
"parentheses\") print((7 + 3) * 5) print(5.5 * ((6 //",
"print(-43.2) print(\"\") # Limited precision print(\"Precision\") print(\"---------\") print(4.56372883832331773) print(1.23456789012345678) print(\"\")",
"3) * 5) print(5.5 * ((6 // 2) + 8))",
"Python \"\"\" # Python has an integer type called int",
"a different value to an existing variable temperature = 26",
"print(\"Division\") print(8 / 2) print(3 / 2) print(7.538 / 14.3)",
"6.7) print(9.8 - 4) print(\"\") print(\"Multiplication\") print(3 * 2) print(7.8",
"them. \"\"\" # The = operator can be used to",
"* 8.2) print(\"\") print(\"Division\") print(8 / 2) print(3 / 2)",
"print(\"-----\") print(0.0) print(7.35) print(-43.2) print(\"\") # Limited precision print(\"Precision\") print(\"---------\")",
"Expressions can include multiple operations print(\"Compound expressions\") print(3 + 5",
"print(-3 ** 2) print(\"\") # Use parentheses to change evaluation",
"operators\") print(+3) print(-5) print(+7.86) print(-3348.63) print(\"\") # Simple arithmetic print(\"Addition",
"+ 6.7) print(9.8 - 4) print(\"\") print(\"Multiplication\") print(3 * 2)",
"2) \"\"\" Demonstration of the use of variables and how",
"temperature = 93 # Variables can be used as values",
"print(\"Precision\") print(\"---------\") print(4.56372883832331773) print(1.23456789012345678) print(\"\") # Scientific/exponential notation print(\"Scientific notation\")",
"how to assign values to them. \"\"\" # The =",
"+ 27) #Operator with same precedence are evaluated from left",
"variable temperature = 26 print(\"new value:\", temperature) # Multiple variables",
"defines how expressions are evaluated print(\"Operator precedence\") print(7 + 3",
"print(3 + 6.7) print(9.8 - 4) print(\"\") print(\"Multiplication\") print(3 *",
"print(\"Grouping with parentheses\") print((7 + 3) * 5) print(5.5 *",
"print(int(3.7)) print(int(-3.7)) \"\"\" Demonstration of simple arithmetic expressions in Python",
"use of variables and how to assign values to them.",
"include multiple operations print(\"Compound expressions\") print(3 + 5 + 7",
"2) print(7.538 // 14.3) print(\"\") print(\"Exponentiation\") print(3 ** 2) print(5",
"print(\"---\") print(0) print(1) print(-3) print(70383028364830) print(\"\") # Python has a",
"Use parentheses to change evaluation order print(\"Grouping with parentheses\") print((7",
"notation\") print(\"-------------------\") print(5e32) print(999999999999999999999999999999999999999.9) print(\"\") # Infinity print(\"Infinity\") print(\"--------\") print(1e500)",
"to change evaluation order print(\"Grouping with parentheses\") print((7 + 3)",
"print(\"Scientific notation\") print(\"-------------------\") print(5e32) print(999999999999999999999999999999999999999.9) print(\"\") # Infinity print(\"Infinity\") print(\"--------\")",
"7) print(9 ** 0.5) \"\"\" Demonstration of compound arithmetic expressions",
"** 0.5) \"\"\" Demonstration of compound arithmetic expressions in Python",
"8)) print((-3) ** 2) \"\"\" Demonstration of the use of",
"print(\"---------------------------------\") print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7)) print(int(-3.7)) \"\"\" Demonstration of simple",
"* 27.54) print(7 * 8.2) print(\"\") print(\"Division\") print(8 / 2)",
"expressions offset = 32 multiplier = 5.0 / 9.0 celsius",
"values and in expressions print(temperature, bakers_dozen) print(\"celsius:\", (temperature - 32)",
"order print(\"Grouping with parentheses\") print((7 + 3) * 5) print(5.5",
"27.54) print(7 * 8.2) print(\"\") print(\"Division\") print(8 / 2) print(3",
"\"\"\" # Unary + and - print(\"Unary operators\") print(+3) print(-5)",
"Python has an integer type called int print(\"int\") print(\"---\") print(0)",
"print(\"Operator precedence\") print(7 + 3 * 5) print(5.5 * 6",
"12.35) print(3 + 6.7) print(9.8 - 4) print(\"\") print(\"Multiplication\") print(3",
"\"\"\" # Expressions can include multiple operations print(\"Compound expressions\") print(3",
"expressions print(temperature, bakers_dozen) print(\"celsius:\", (temperature - 32) * 5 /",
"print(\"\") print(\"Division\") print(8 / 2) print(3 / 2) print(7.538 /",
"right print(18 - 6 + 4) print(\"\") # Operator precedence",
"0.5) \"\"\" Demonstration of compound arithmetic expressions in Python \"\"\"",
"with same precedence are evaluated from left to right print(18",
"evaluated print(\"Operator precedence\") print(7 + 3 * 5) print(5.5 *",
"print(999999999999999999999999999999999999999.9) print(\"\") # Infinity print(\"Infinity\") print(\"--------\") print(1e500) print(-1e500) print(\"\") #",
"as values and in expressions print(temperature, bakers_dozen) print(\"celsius:\", (temperature -",
"5 / 9) print(\"fahrenheit:\", float(temperature)) # You can assign a",
"Limited precision print(\"Precision\") print(\"---------\") print(4.56372883832331773) print(1.23456789012345678) print(\"\") # Scientific/exponential notation",
"// 2) print(7.538 // 14.3) print(\"\") print(\"Exponentiation\") print(3 ** 2)",
"multiple operations print(\"Compound expressions\") print(3 + 5 + 7 +",
"3 * 5) print(5.5 * 6 // 2 + 8)",
"variables can be used in arbitrary expressions offset = 32",
"- 6 + 4) print(\"\") # Operator precedence defines how",
"type called int print(\"int\") print(\"---\") print(0) print(1) print(-3) print(70383028364830) print(\"\")",
"= 32 multiplier = 5.0 / 9.0 celsius = (temperature",
"type called float print(\"float\") print(\"-----\") print(0.0) print(7.35) print(-43.2) print(\"\") #",
"an integer type called int print(\"int\") print(\"---\") print(0) print(1) print(-3)",
"print(4.56372883832331773) print(1.23456789012345678) print(\"\") # Scientific/exponential notation print(\"Scientific notation\") print(\"-------------------\") print(5e32)",
"of compound arithmetic expressions in Python \"\"\" # Expressions can",
"in expressions print(temperature, bakers_dozen) print(\"celsius:\", (temperature - 32) * 5",
"bakers_dozen) print(\"celsius:\", (temperature - 32) * 5 / 9) print(\"fahrenheit:\",",
"types\") print(\"---------------------------------\") print(float(3)) print(float(99999999999999999999999999999999999999)) print(int(3.0)) print(int(3.7)) print(int(-3.7)) \"\"\" Demonstration of",
"used as values and in expressions print(temperature, bakers_dozen) print(\"celsius:\", (temperature",
"12 + 1 temperature = 93 # Variables can be",
"print(3 * 2) print(7.8 * 27.54) print(7 * 8.2) print(\"\")",
"a real number type called float print(\"float\") print(\"-----\") print(0.0) print(7.35)",
"print(3 + 5 + 7 + 27) #Operator with same",
"how expressions are evaluated print(\"Operator precedence\") print(7 + 3 *",
"print(\"new value:\", temperature) # Multiple variables can be used in",
"operator can be used to assign values to variables bakers_dozen",
"print(7.538 // 14.3) print(\"\") print(\"Exponentiation\") print(3 ** 2) print(5 **",
"print(3 / 2) print(7.538 / 14.3) print(8 // 2) print(3",
"print(+7.86) print(-3348.63) print(\"\") # Simple arithmetic print(\"Addition and Subtraction\") print(1",
"print(70383028364830) print(\"\") # Python has a real number type called"
] |
[
"i current_time = self.array_time[i] #print(\"time: {0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:, i]) # appending",
"not None: # self.dynamic_reaction[:, i] = self._compute_reaction() # update results",
"reaction when user wants it # moved reaction computation to",
"Solver class LinearSolver(Solver): def __init__(self, array_time, time_integration_scheme, dt, comp_model, initial_conditions,",
"computation to dynamic analysis level # AK . this doesnt",
"class LinearSolver(Solver): def __init__(self, array_time, time_integration_scheme, dt, comp_model, initial_conditions, force,",
"# TODO: only calculate reaction when user wants it #",
"def __init__(self, array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model): super().__init__(array_time,",
"i] = self.scheme.get_displacement() self.velocity[:, i] = self.scheme.get_velocity() self.acceleration[:, i] =",
"solve(self): # time loop for i in range(0, len(self.array_time)): self.step",
"reaction computed in dynamic analysis # TODO: only calculate reaction",
"dt, comp_model, initial_conditions, force, structure_model) def _print_solver_info(self): print(\"Linear Solver\") def",
"import Solver class LinearSolver(Solver): def __init__(self, array_time, time_integration_scheme, dt, comp_model,",
"self.scheme.get_acceleration() # TODO: only calculate reaction when user wants it",
"when user wants it # moved reaction computation to dynamic",
"list self.displacement[:, i] = self.scheme.get_displacement() self.velocity[:, i] = self.scheme.get_velocity() self.acceleration[:,",
"when user wants it # if self.structure_model is not None:",
"dynamic analysis level # AK . this doesnt considers the",
"force, structure_model): super().__init__(array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model) def",
"# self.dynamic_reaction[:, i] = self._compute_reaction() # reaction computed in dynamic",
"it # moved reaction computation to dynamic analysis level #",
"the support reaction check #if self.structure_model is not None: #",
"self.dynamic_reaction[:, i] = self._compute_reaction() # reaction computed in dynamic analysis",
"LinearSolver(Solver): def __init__(self, array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model):",
"self.scheme.get_velocity() self.acceleration[:, i] = self.scheme.get_acceleration() # TODO: only calculate reaction",
"def _print_solver_info(self): print(\"Linear Solver\") def solve(self): # time loop for",
"= self.scheme.get_displacement() self.velocity[:, i] = self.scheme.get_velocity() self.acceleration[:, i] = self.scheme.get_acceleration()",
"# time loop for i in range(0, len(self.array_time)): self.step =",
"range(0, len(self.array_time)): self.step = i current_time = self.array_time[i] #print(\"time: {0:.2f}\".format(current_time))",
"structure_model): super().__init__(array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model) def _print_solver_info(self):",
". this doesnt considers the support reaction check #if self.structure_model",
"wants it # if self.structure_model is not None: # self.dynamic_reaction[:,",
"None: # self.dynamic_reaction[:, i] = self._compute_reaction() # reaction computed in",
"this doesnt considers the support reaction check #if self.structure_model is",
"source.solving_strategies.strategies.solver import Solver class LinearSolver(Solver): def __init__(self, array_time, time_integration_scheme, dt,",
"i in range(0, len(self.array_time)): self.step = i current_time = self.array_time[i]",
"in range(0, len(self.array_time)): self.step = i current_time = self.array_time[i] #print(\"time:",
"dt, comp_model, initial_conditions, force, structure_model): super().__init__(array_time, time_integration_scheme, dt, comp_model, initial_conditions,",
"moved reaction computation to dynamic analysis level # AK .",
"self._compute_reaction() # reaction computed in dynamic analysis # TODO: only",
"appending results to the list self.displacement[:, i] = self.scheme.get_displacement() self.velocity[:,",
"= self.scheme.get_velocity() self.acceleration[:, i] = self.scheme.get_acceleration() # TODO: only calculate",
"= self.scheme.get_acceleration() # TODO: only calculate reaction when user wants",
"print(\"Linear Solver\") def solve(self): # time loop for i in",
"doesnt considers the support reaction check #if self.structure_model is not",
"#if self.structure_model is not None: # self.dynamic_reaction[:, i] = self._compute_reaction()",
"= self.array_time[i] #print(\"time: {0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:, i]) # appending results to",
"len(self.array_time)): self.step = i current_time = self.array_time[i] #print(\"time: {0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:,",
"it # if self.structure_model is not None: # self.dynamic_reaction[:, i]",
"initial_conditions, force, structure_model) def _print_solver_info(self): print(\"Linear Solver\") def solve(self): #",
"time loop for i in range(0, len(self.array_time)): self.step = i",
"__init__(self, array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model): super().__init__(array_time, time_integration_scheme,",
"considers the support reaction check #if self.structure_model is not None:",
"time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model) def _print_solver_info(self): print(\"Linear Solver\")",
"self.step = i current_time = self.array_time[i] #print(\"time: {0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:, i])",
"i] = self.scheme.get_velocity() self.acceleration[:, i] = self.scheme.get_acceleration() # TODO: only",
"None: # self.dynamic_reaction[:, i] = self._compute_reaction() # update results self.scheme.update()",
"self.scheme.get_displacement() self.velocity[:, i] = self.scheme.get_velocity() self.acceleration[:, i] = self.scheme.get_acceleration() #",
"support reaction check #if self.structure_model is not None: # self.dynamic_reaction[:,",
"not None: # self.dynamic_reaction[:, i] = self._compute_reaction() # reaction computed",
"self.displacement[:, i] = self.scheme.get_displacement() self.velocity[:, i] = self.scheme.get_velocity() self.acceleration[:, i]",
"i] = self._compute_reaction() # reaction computed in dynamic analysis #",
"to the list self.displacement[:, i] = self.scheme.get_displacement() self.velocity[:, i] =",
"i] = self.scheme.get_acceleration() # TODO: only calculate reaction when user",
"is not None: # self.dynamic_reaction[:, i] = self._compute_reaction() # reaction",
"= i current_time = self.array_time[i] #print(\"time: {0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:, i]) #",
"TODO: only calculate reaction when user wants it # moved",
"level # AK . this doesnt considers the support reaction",
"Solver\") def solve(self): # time loop for i in range(0,",
"comp_model, initial_conditions, force, structure_model) def _print_solver_info(self): print(\"Linear Solver\") def solve(self):",
"only calculate reaction when user wants it # moved reaction",
"reaction check #if self.structure_model is not None: # self.dynamic_reaction[:, i]",
"force, structure_model) def _print_solver_info(self): print(\"Linear Solver\") def solve(self): # time",
"reaction computation to dynamic analysis level # AK . this",
"the list self.displacement[:, i] = self.scheme.get_displacement() self.velocity[:, i] = self.scheme.get_velocity()",
"in dynamic analysis # TODO: only calculate reaction when user",
"user wants it # moved reaction computation to dynamic analysis",
"current_time = self.array_time[i] #print(\"time: {0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:, i]) # appending results",
"from source.solving_strategies.strategies.solver import Solver class LinearSolver(Solver): def __init__(self, array_time, time_integration_scheme,",
"{0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:, i]) # appending results to the list self.displacement[:,",
"user wants it # if self.structure_model is not None: #",
"for i in range(0, len(self.array_time)): self.step = i current_time =",
"i]) # appending results to the list self.displacement[:, i] =",
"# if self.structure_model is not None: # self.dynamic_reaction[:, i] =",
"is not None: # self.dynamic_reaction[:, i] = self._compute_reaction() # update",
"self.acceleration[:, i] = self.scheme.get_acceleration() # TODO: only calculate reaction when",
"TODO: only calculate reaction when user wants it # if",
"calculate reaction when user wants it # if self.structure_model is",
"self.structure_model is not None: # self.dynamic_reaction[:, i] = self._compute_reaction() #",
"AK . this doesnt considers the support reaction check #if",
"initial_conditions, force, structure_model): super().__init__(array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model)",
"if self.structure_model is not None: # self.dynamic_reaction[:, i] = self._compute_reaction()",
"= self._compute_reaction() # reaction computed in dynamic analysis # TODO:",
"# AK . this doesnt considers the support reaction check",
"wants it # moved reaction computation to dynamic analysis level",
"check #if self.structure_model is not None: # self.dynamic_reaction[:, i] =",
"structure_model) def _print_solver_info(self): print(\"Linear Solver\") def solve(self): # time loop",
"def solve(self): # time loop for i in range(0, len(self.array_time)):",
"comp_model, initial_conditions, force, structure_model): super().__init__(array_time, time_integration_scheme, dt, comp_model, initial_conditions, force,",
"results to the list self.displacement[:, i] = self.scheme.get_displacement() self.velocity[:, i]",
"analysis # TODO: only calculate reaction when user wants it",
"analysis level # AK . this doesnt considers the support",
"# reaction computed in dynamic analysis # TODO: only calculate",
"calculate reaction when user wants it # moved reaction computation",
"_print_solver_info(self): print(\"Linear Solver\") def solve(self): # time loop for i",
"loop for i in range(0, len(self.array_time)): self.step = i current_time",
"# moved reaction computation to dynamic analysis level # AK",
"self.scheme.solve_single_step(self.force[:, i]) # appending results to the list self.displacement[:, i]",
"reaction when user wants it # if self.structure_model is not",
"time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model): super().__init__(array_time, time_integration_scheme, dt, comp_model,",
"dynamic analysis # TODO: only calculate reaction when user wants",
"# appending results to the list self.displacement[:, i] = self.scheme.get_displacement()",
"#print(\"time: {0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:, i]) # appending results to the list",
"super().__init__(array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model) def _print_solver_info(self): print(\"Linear",
"to dynamic analysis level # AK . this doesnt considers",
"self.array_time[i] #print(\"time: {0:.2f}\".format(current_time)) self.scheme.solve_single_step(self.force[:, i]) # appending results to the",
"self.velocity[:, i] = self.scheme.get_velocity() self.acceleration[:, i] = self.scheme.get_acceleration() # TODO:",
"only calculate reaction when user wants it # if self.structure_model",
"computed in dynamic analysis # TODO: only calculate reaction when",
"array_time, time_integration_scheme, dt, comp_model, initial_conditions, force, structure_model): super().__init__(array_time, time_integration_scheme, dt,"
] |
[
"1, 719662, tzinfo=utc), verbose_name='order date'), ), migrations.AlterField( model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017,",
"11, 24, 16, 22, 1, 719840, tzinfo=utc), verbose_name='daed_date'), ), migrations.AlterField(",
"-*- # Generated by Django 1.11.7 on 2017-11-24 16:22 from",
"Generated by Django 1.11.7 on 2017-11-24 16:22 from __future__ import",
"-*- coding: utf-8 -*- # Generated by Django 1.11.7 on",
"model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719840, tzinfo=utc),",
"migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies =",
"migrations.AlterField( model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719662,",
"import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies",
"16, 22, 1, 719662, tzinfo=utc), verbose_name='order date'), ), migrations.AlterField( model_name='history_order',",
"[ migrations.AlterField( model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1,",
"models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [",
"[ ('payment', '0001_initial'), ] operations = [ migrations.AlterField( model_name='history_order', name='dead_date',",
"<gh_stars>0 # -*- coding: utf-8 -*- # Generated by Django",
"field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719662, tzinfo=utc), verbose_name='order date'),",
"field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719840, tzinfo=utc), verbose_name='daed_date'), ),",
"django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('payment', '0001_initial'),",
"name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719840, tzinfo=utc), verbose_name='daed_date'),",
"] operations = [ migrations.AlterField( model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24,",
"24, 16, 22, 1, 719662, tzinfo=utc), verbose_name='order date'), ), migrations.AlterField(",
"= [ migrations.AlterField( model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22,",
"# -*- coding: utf-8 -*- # Generated by Django 1.11.7",
"class Migration(migrations.Migration): dependencies = [ ('payment', '0001_initial'), ] operations =",
"), migrations.AlterField( model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1,",
"tzinfo=utc), verbose_name='order date'), ), migrations.AlterField( model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24,",
"verbose_name='daed_date'), ), migrations.AlterField( model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22,",
"by Django 1.11.7 on 2017-11-24 16:22 from __future__ import unicode_literals",
"1, 719840, tzinfo=utc), verbose_name='daed_date'), ), migrations.AlterField( model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11,",
"import unicode_literals import datetime from django.db import migrations, models from",
"from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('payment',",
"import utc class Migration(migrations.Migration): dependencies = [ ('payment', '0001_initial'), ]",
"dependencies = [ ('payment', '0001_initial'), ] operations = [ migrations.AlterField(",
"utc class Migration(migrations.Migration): dependencies = [ ('payment', '0001_initial'), ] operations",
"from django.db import migrations, models from django.utils.timezone import utc class",
"719840, tzinfo=utc), verbose_name='daed_date'), ), migrations.AlterField( model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24,",
"on 2017-11-24 16:22 from __future__ import unicode_literals import datetime from",
"verbose_name='order date'), ), migrations.AlterField( model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16,",
"), migrations.AlterField( model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1,",
"coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-24",
"field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719758, tzinfo=utc), verbose_name='valid_date'), ),",
"= [ ('payment', '0001_initial'), ] operations = [ migrations.AlterField( model_name='history_order',",
"datetime from django.db import migrations, models from django.utils.timezone import utc",
"import datetime from django.db import migrations, models from django.utils.timezone import",
"model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719758, tzinfo=utc),",
"# Generated by Django 1.11.7 on 2017-11-24 16:22 from __future__",
"11, 24, 16, 22, 1, 719758, tzinfo=utc), verbose_name='valid_date'), ), ]",
"Migration(migrations.Migration): dependencies = [ ('payment', '0001_initial'), ] operations = [",
"name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719758, tzinfo=utc), verbose_name='valid_date'),",
"__future__ import unicode_literals import datetime from django.db import migrations, models",
"utf-8 -*- # Generated by Django 1.11.7 on 2017-11-24 16:22",
"model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719662, tzinfo=utc),",
"24, 16, 22, 1, 719840, tzinfo=utc), verbose_name='daed_date'), ), migrations.AlterField( model_name='history_order',",
"migrations.AlterField( model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719758,",
"migrations.AlterField( model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719840,",
"2017-11-24 16:22 from __future__ import unicode_literals import datetime from django.db",
"django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration):",
"22, 1, 719662, tzinfo=utc), verbose_name='order date'), ), migrations.AlterField( model_name='history_order', name='valid_date',",
"'0001_initial'), ] operations = [ migrations.AlterField( model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11,",
"719662, tzinfo=utc), verbose_name='order date'), ), migrations.AlterField( model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11,",
"tzinfo=utc), verbose_name='daed_date'), ), migrations.AlterField( model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16,",
"16:22 from __future__ import unicode_literals import datetime from django.db import",
"Django 1.11.7 on 2017-11-24 16:22 from __future__ import unicode_literals import",
"1.11.7 on 2017-11-24 16:22 from __future__ import unicode_literals import datetime",
"from __future__ import unicode_literals import datetime from django.db import migrations,",
"operations = [ migrations.AlterField( model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16,",
"('payment', '0001_initial'), ] operations = [ migrations.AlterField( model_name='history_order', name='dead_date', field=models.DateTimeField(default=datetime.datetime(2017,",
"16, 22, 1, 719840, tzinfo=utc), verbose_name='daed_date'), ), migrations.AlterField( model_name='history_order', name='order_date',",
"22, 1, 719840, tzinfo=utc), verbose_name='daed_date'), ), migrations.AlterField( model_name='history_order', name='order_date', field=models.DateTimeField(default=datetime.datetime(2017,",
"name='order_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22, 1, 719662, tzinfo=utc), verbose_name='order",
"11, 24, 16, 22, 1, 719662, tzinfo=utc), verbose_name='order date'), ),",
"unicode_literals import datetime from django.db import migrations, models from django.utils.timezone",
"date'), ), migrations.AlterField( model_name='history_order', name='valid_date', field=models.DateTimeField(default=datetime.datetime(2017, 11, 24, 16, 22,"
] |
[
"\"join_clause\" pre_segment_identifier = (\"name\", \"using\") post_segment_identifier = (\"type\", \"bracketed\") expand_children",
":force: SELECT b FROM foo LEFT JOIN zoo USING•(a) \"\"\"",
"\"\"\"Implementation of Rule L024.\"\"\" from sqlfluff.core.rules.doc_decorators import document_fix_compatible from sqlfluff.rules.L023",
"import Rule_L023 @document_fix_compatible class Rule_L024(Rule_L023): \"\"\"Single whitespace expected after USING",
"space. | Add a space after USING, to avoid confusing",
"FROM foo LEFT JOIN zoo USING(a) | **Best practice** |",
"code-block:: sql SELECT b FROM foo LEFT JOIN zoo USING(a)",
"of Rule L024.\"\"\" from sqlfluff.core.rules.doc_decorators import document_fix_compatible from sqlfluff.rules.L023 import",
"JOIN zoo USING(a) | **Best practice** | The • character",
"zoo USING•(a) \"\"\" expected_mother_segment_type = \"join_clause\" pre_segment_identifier = (\"name\", \"using\")",
"| The • character represents a space. | Add a",
"document_fix_compatible from sqlfluff.rules.L023 import Rule_L023 @document_fix_compatible class Rule_L024(Rule_L023): \"\"\"Single whitespace",
"sqlfluff.rules.L023 import Rule_L023 @document_fix_compatible class Rule_L024(Rule_L023): \"\"\"Single whitespace expected after",
"sql :force: SELECT b FROM foo LEFT JOIN zoo USING•(a)",
"LEFT JOIN zoo USING(a) | **Best practice** | The •",
"represents a space. | Add a space after USING, to",
"| **Anti-pattern** .. code-block:: sql SELECT b FROM foo LEFT",
"\"using\") post_segment_identifier = (\"type\", \"bracketed\") expand_children = None allow_newline =",
"practice** | The • character represents a space. | Add",
"USING in JOIN clause. | **Anti-pattern** .. code-block:: sql SELECT",
"| for a function. .. code-block:: sql :force: SELECT b",
"post_segment_identifier = (\"type\", \"bracketed\") expand_children = None allow_newline = True",
"(\"name\", \"using\") post_segment_identifier = (\"type\", \"bracketed\") expand_children = None allow_newline",
"sql SELECT b FROM foo LEFT JOIN zoo USING(a) |",
"\"\"\" expected_mother_segment_type = \"join_clause\" pre_segment_identifier = (\"name\", \"using\") post_segment_identifier =",
"JOIN zoo USING•(a) \"\"\" expected_mother_segment_type = \"join_clause\" pre_segment_identifier = (\"name\",",
"clause. | **Anti-pattern** .. code-block:: sql SELECT b FROM foo",
"Rule_L023 @document_fix_compatible class Rule_L024(Rule_L023): \"\"\"Single whitespace expected after USING in",
"it | for a function. .. code-block:: sql :force: SELECT",
"import document_fix_compatible from sqlfluff.rules.L023 import Rule_L023 @document_fix_compatible class Rule_L024(Rule_L023): \"\"\"Single",
"a space after USING, to avoid confusing it | for",
"**Best practice** | The • character represents a space. |",
"code-block:: sql :force: SELECT b FROM foo LEFT JOIN zoo",
"Rule_L024(Rule_L023): \"\"\"Single whitespace expected after USING in JOIN clause. |",
"<gh_stars>1000+ \"\"\"Implementation of Rule L024.\"\"\" from sqlfluff.core.rules.doc_decorators import document_fix_compatible from",
"for a function. .. code-block:: sql :force: SELECT b FROM",
"Rule L024.\"\"\" from sqlfluff.core.rules.doc_decorators import document_fix_compatible from sqlfluff.rules.L023 import Rule_L023",
"class Rule_L024(Rule_L023): \"\"\"Single whitespace expected after USING in JOIN clause.",
"SELECT b FROM foo LEFT JOIN zoo USING(a) | **Best",
"@document_fix_compatible class Rule_L024(Rule_L023): \"\"\"Single whitespace expected after USING in JOIN",
"in JOIN clause. | **Anti-pattern** .. code-block:: sql SELECT b",
"a space. | Add a space after USING, to avoid",
"pre_segment_identifier = (\"name\", \"using\") post_segment_identifier = (\"type\", \"bracketed\") expand_children =",
"| **Best practice** | The • character represents a space.",
".. code-block:: sql SELECT b FROM foo LEFT JOIN zoo",
"= \"join_clause\" pre_segment_identifier = (\"name\", \"using\") post_segment_identifier = (\"type\", \"bracketed\")",
"The • character represents a space. | Add a space",
"a function. .. code-block:: sql :force: SELECT b FROM foo",
"from sqlfluff.core.rules.doc_decorators import document_fix_compatible from sqlfluff.rules.L023 import Rule_L023 @document_fix_compatible class",
"b FROM foo LEFT JOIN zoo USING(a) | **Best practice**",
"to avoid confusing it | for a function. .. code-block::",
"whitespace expected after USING in JOIN clause. | **Anti-pattern** ..",
"zoo USING(a) | **Best practice** | The • character represents",
"SELECT b FROM foo LEFT JOIN zoo USING•(a) \"\"\" expected_mother_segment_type",
"| Add a space after USING, to avoid confusing it",
"**Anti-pattern** .. code-block:: sql SELECT b FROM foo LEFT JOIN",
"b FROM foo LEFT JOIN zoo USING•(a) \"\"\" expected_mother_segment_type =",
"FROM foo LEFT JOIN zoo USING•(a) \"\"\" expected_mother_segment_type = \"join_clause\"",
"sqlfluff.core.rules.doc_decorators import document_fix_compatible from sqlfluff.rules.L023 import Rule_L023 @document_fix_compatible class Rule_L024(Rule_L023):",
"from sqlfluff.rules.L023 import Rule_L023 @document_fix_compatible class Rule_L024(Rule_L023): \"\"\"Single whitespace expected",
"USING•(a) \"\"\" expected_mother_segment_type = \"join_clause\" pre_segment_identifier = (\"name\", \"using\") post_segment_identifier",
"foo LEFT JOIN zoo USING•(a) \"\"\" expected_mother_segment_type = \"join_clause\" pre_segment_identifier",
"function. .. code-block:: sql :force: SELECT b FROM foo LEFT",
"= (\"name\", \"using\") post_segment_identifier = (\"type\", \"bracketed\") expand_children = None",
"USING(a) | **Best practice** | The • character represents a",
"Add a space after USING, to avoid confusing it |",
"after USING in JOIN clause. | **Anti-pattern** .. code-block:: sql",
"confusing it | for a function. .. code-block:: sql :force:",
"space after USING, to avoid confusing it | for a",
"character represents a space. | Add a space after USING,",
"USING, to avoid confusing it | for a function. ..",
"LEFT JOIN zoo USING•(a) \"\"\" expected_mother_segment_type = \"join_clause\" pre_segment_identifier =",
"after USING, to avoid confusing it | for a function.",
"foo LEFT JOIN zoo USING(a) | **Best practice** | The",
"• character represents a space. | Add a space after",
"expected after USING in JOIN clause. | **Anti-pattern** .. code-block::",
".. code-block:: sql :force: SELECT b FROM foo LEFT JOIN",
"JOIN clause. | **Anti-pattern** .. code-block:: sql SELECT b FROM",
"expected_mother_segment_type = \"join_clause\" pre_segment_identifier = (\"name\", \"using\") post_segment_identifier = (\"type\",",
"avoid confusing it | for a function. .. code-block:: sql",
"\"\"\"Single whitespace expected after USING in JOIN clause. | **Anti-pattern**",
"L024.\"\"\" from sqlfluff.core.rules.doc_decorators import document_fix_compatible from sqlfluff.rules.L023 import Rule_L023 @document_fix_compatible"
] |
[
"import numpy as np from astropy.table import Table import matplotlib.pyplot",
"vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim) age=c['Age'] ax.set_title('%s (%.2f$\\pm$%.2f Myr %s)",
"yms - 1, c='brown', label='1 mag above the median', linewidth=1,",
"ax.axvline(x=0.767, linewidth=0.5, color='k') # G ax.axvline(x=0.979, linewidth=0.5, color='k') # K",
"%d components.'%len(comps)) fig=plt.figure() for i, c in enumerate(comps): ax =",
"from astropy.table import Table import matplotlib.pyplot as plt import matplotlib.cm",
"# Main sequence parametrization # fitpar for pmag, rpmag fitpar",
"are the same for all the plotting scripts and we",
"ax.axvline(x=0.979, linewidth=0.5, color='k') # K ax.axvline(x=1.848, linewidth=0.5, color='k') # M",
"color='k') # M ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) return ax print('Plotting",
"mag above the median', linewidth=1, linestyle='--') ax.plot(xms, yms - 1.5,",
"comp_ID = c['comp_ID'] col=tab['membership%s'%comp_ID] mask = col > pmin_membership t=tab[mask]",
"tab0 = Table.read(data_filename) Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0",
"* 1e-3) / 10) # tab['parallax'] in micro arcsec tab0['Gmag']",
"-12.25864507] poly = np.poly1d(fitpar) x = np.linspace(1, 4, 100) y",
"the same for all the plotting scripts and we put",
"things are the same for all the plotting scripts and",
"<reponame>mikeireland/chronostar \"\"\" Plot CMDs for each component. \"\"\" import numpy",
"lib.compnames colors = lib.colors ############################################ # Minimal probability required for",
"- 1, c='brown', label='1 mag above the median', linewidth=1, linestyle='--')",
"linestyle='--') ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag above the",
"Minimal probability required for membership pmin_membership = 0.5 ############################################ #",
"1.5, c='brown', label='1.5 mag above the median', linewidth=1, linestyle='--') ax.axvline(x=0.369,",
"-3] ############################################ # Read data try: tab = tab0 comps",
"c='k', alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet)",
"lib data_filename = lib.data_filename comps_filename = lib.comps_filename compnames = lib.compnames",
"i, c in enumerate(comps): ax = fig.add_subplot(grid[0], grid[1], i+1) #",
"Some things are the same for all the plotting scripts",
"-2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507] poly = np.poly1d(fitpar) x =",
"= Gmag comps0 = Table.read(comps_filename) tab = tab0 comps =",
"linewidth=1) ax.plot(xms, yms - 1, c='brown', label='1 mag above the",
"enumerate(comps): ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this",
"linewidth=0.5, color='k') # M ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) return ax",
"ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim,",
"= y > 4 yms = y[m] xms = x[m]",
"label='Median main sequence', linewidth=1) ax.plot(xms, yms - 1, c='brown', label='1",
"fig=plt.figure() for i, c in enumerate(comps): ax = fig.add_subplot(grid[0], grid[1],",
"Table import matplotlib.pyplot as plt import matplotlib.cm as cm plt.ion()",
"the plotting scripts and we put # this into a",
"ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) return ax print('Plotting %d components.'%len(comps)) fig=plt.figure()",
"38.31330583, -12.25864507] poly = np.poly1d(fitpar) x = np.linspace(1, 4, 100)",
"in enumerate(comps): ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust",
"split subplots grid = [5, 5] # CMD limits xlim",
"comps0 = Table.read(comps_filename) tab = tab0 comps = comps0 #",
"Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t))) #~ plt.tight_layout() plt.show()",
"= poly(x) m = y > 4 yms = y[m]",
"= [17, -3] ############################################ # Read data try: tab =",
"alpha=1 t.sort('membership%s'%comp_ID) #~ t.reverse() #~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha)",
"fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507] poly =",
"ax.plot(xms, yms - 1, c='brown', label='1 mag above the median',",
"# F ax.axvline(x=0.767, linewidth=0.5, color='k') # G ax.axvline(x=0.979, linewidth=0.5, color='k')",
"# TODO: adjust this if needed comp_ID = c['comp_ID'] col=tab['membership%s'%comp_ID]",
"ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag above the median',",
"# Some things are the same for all the plotting",
"label='1.5 mag above the median', linewidth=1, linestyle='--') ax.axvline(x=0.369, linewidth=0.5, color='k')",
"avoid confusion. import scocenlib as lib data_filename = lib.data_filename comps_filename",
"pmag, rpmag fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507]",
"t=tab[mask] if len(t)>100: alpha=0.5 else: alpha=1 t.sort('membership%s'%comp_ID) #~ t.reverse() #~",
"############################################ # Some things are the same for all the",
"# this into a single library to avoid confusion. import",
"y = poly(x) m = y > 4 yms =",
"ax.axvline(x=0.369, linewidth=0.5, color='k') # F ax.axvline(x=0.767, linewidth=0.5, color='k') # G",
"= lib.comps_filename compnames = lib.compnames colors = lib.colors ############################################ #",
"data_filename = lib.data_filename comps_filename = lib.comps_filename compnames = lib.compnames colors",
"Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] *",
"= col > pmin_membership t=tab[mask] if len(t)>100: alpha=0.5 else: alpha=1",
"plotting scripts and we put # this into a single",
"main sequence', linewidth=1) ax.plot(xms, yms - 1, c='brown', label='1 mag",
"vmin=0.5, vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim) age=c['Age'] ax.set_title('%s (%.2f$\\pm$%.2f Myr",
"for all the plotting scripts and we put # this",
"tab0 comps = comps0 except: tab0 = Table.read(data_filename) Gmag =",
"matplotlib.cm as cm plt.ion() # Pretty plots from fig_settings import",
"limits xlim = [-1, 5] ylim = [17, -3] ############################################",
"for each component. \"\"\" import numpy as np from astropy.table",
"age=c['Age'] ax.set_title('%s (%.2f$\\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t)))",
"for membership pmin_membership = 0.5 ############################################ # how to split",
"1, c='brown', label='1 mag above the median', linewidth=1, linestyle='--') ax.plot(xms,",
"ax.axvline(x=1.848, linewidth=0.5, color='k') # M ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) return",
"= [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507] poly = np.poly1d(fitpar)",
"color='k') # K ax.axvline(x=1.848, linewidth=0.5, color='k') # M ax.set_xlim(xlim[0], xlim[1])",
"- 1.5, c='brown', label='1.5 mag above the median', linewidth=1, linestyle='--')",
"[5, 5] # CMD limits xlim = [-1, 5] ylim",
"import matplotlib.pyplot as plt import matplotlib.cm as cm plt.ion() #",
"the median', linewidth=1, linestyle='--') ax.axvline(x=0.369, linewidth=0.5, color='k') # F ax.axvline(x=0.767,",
"ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1) ax.plot(xms, yms -",
"colors = lib.colors ############################################ # Minimal probability required for membership",
"import matplotlib.cm as cm plt.ion() # Pretty plots from fig_settings",
"into a single library to avoid confusion. import scocenlib as",
"/ (tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in micro",
"t['Gmag'], s=1, c='k', alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5,",
"x[m] def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim): ax.plot(xms, yms, c='brown', label='Median main",
"= [5, 5] # CMD limits xlim = [-1, 5]",
"\"\"\" import numpy as np from astropy.table import Table import",
"# fitpar for pmag, rpmag fitpar = [0.17954163, -2.48748376, 12.9279348,",
"for pmag, rpmag fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583,",
"ylim[1]) return ax print('Plotting %d components.'%len(comps)) fig=plt.figure() for i, c",
"as np from astropy.table import Table import matplotlib.pyplot as plt",
"library to avoid confusion. import scocenlib as lib data_filename =",
"matplotlib.pyplot as plt import matplotlib.cm as cm plt.ion() # Pretty",
"micro arcsec tab0['Gmag'] = Gmag comps0 = Table.read(comps_filename) tab =",
"= lib.colors ############################################ # Minimal probability required for membership pmin_membership",
"Main sequence parametrization # fitpar for pmag, rpmag fitpar =",
"rpmag fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507] poly",
"= [-1, 5] ylim = [17, -3] ############################################ # Read",
"4, 100) y = poly(x) m = y > 4",
"scocenlib as lib data_filename = lib.data_filename comps_filename = lib.comps_filename compnames",
"else: alpha=1 t.sort('membership%s'%comp_ID) #~ t.reverse() #~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k',",
"K ax.axvline(x=1.848, linewidth=0.5, color='k') # M ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1])",
"np from astropy.table import Table import matplotlib.pyplot as plt import",
"* np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) # tab['parallax']",
"as cm plt.ion() # Pretty plots from fig_settings import *",
"if needed comp_ID = c['comp_ID'] col=tab['membership%s'%comp_ID] mask = col >",
"ax.set_ylim(ylim[0], ylim[1]) return ax print('Plotting %d components.'%len(comps)) fig=plt.figure() for i,",
"= Table.read(comps_filename) tab = tab0 comps = comps0 # Main",
"= tab0 comps = comps0 except: tab0 = Table.read(data_filename) Gmag",
"return ax print('Plotting %d components.'%len(comps)) fig=plt.figure() for i, c in",
"subplots grid = [5, 5] # CMD limits xlim =",
"ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim) age=c['Age'] ax.set_title('%s (%.2f$\\pm$%.2f Myr %s) %d'%(comp_ID, age,",
"in micro arcsec tab0['Gmag'] = Gmag comps0 = Table.read(comps_filename) tab",
"try: tab = tab0 comps = comps0 except: tab0 =",
"# Read data try: tab = tab0 comps = comps0",
"lib.colors ############################################ # Minimal probability required for membership pmin_membership =",
"same for all the plotting scripts and we put #",
"> 4 yms = y[m] xms = x[m] def plot_MS_parametrisation_and_spectral_types(ax,",
"to split subplots grid = [5, 5] # CMD limits",
"required for membership pmin_membership = 0.5 ############################################ # how to",
"component. \"\"\" import numpy as np from astropy.table import Table",
"fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this if needed comp_ID",
"median', linewidth=1, linestyle='--') ax.axvline(x=0.369, linewidth=0.5, color='k') # F ax.axvline(x=0.767, linewidth=0.5,",
"Table.read(comps_filename) tab = tab0 comps = comps0 # Main sequence",
"\"\"\" Plot CMDs for each component. \"\"\" import numpy as",
"c='brown', label='1.5 mag above the median', linewidth=1, linestyle='--') ax.axvline(x=0.369, linewidth=0.5,",
"color='k') # F ax.axvline(x=0.767, linewidth=0.5, color='k') # G ax.axvline(x=0.979, linewidth=0.5,",
"linewidth=0.5, color='k') # K ax.axvline(x=1.848, linewidth=0.5, color='k') # M ax.set_xlim(xlim[0],",
"ax.set_title('%s (%.2f$\\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t))) #~",
"alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax,",
"numpy as np from astropy.table import Table import matplotlib.pyplot as",
"poly = np.poly1d(fitpar) x = np.linspace(1, 4, 100) y =",
"ylim) age=c['Age'] ax.set_title('%s (%.2f$\\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'],",
"/ 10) # tab['parallax'] in micro arcsec tab0['Gmag'] = Gmag",
"xlim[1]) ax.set_ylim(ylim[0], ylim[1]) return ax print('Plotting %d components.'%len(comps)) fig=plt.figure() for",
"pmin_membership t=tab[mask] if len(t)>100: alpha=0.5 else: alpha=1 t.sort('membership%s'%comp_ID) #~ t.reverse()",
"t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim)",
"> pmin_membership t=tab[mask] if len(t)>100: alpha=0.5 else: alpha=1 t.sort('membership%s'%comp_ID) #~",
"-31.35434182, 38.31330583, -12.25864507] poly = np.poly1d(fitpar) x = np.linspace(1, 4,",
"linewidth=0.5, color='k') # F ax.axvline(x=0.767, linewidth=0.5, color='k') # G ax.axvline(x=0.979,",
"confusion. import scocenlib as lib data_filename = lib.data_filename comps_filename =",
"= fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this if needed",
"sequence parametrization # fitpar for pmag, rpmag fitpar = [0.17954163,",
"= tab0 comps = comps0 # Main sequence parametrization #",
"mask = col > pmin_membership t=tab[mask] if len(t)>100: alpha=0.5 else:",
"parametrization # fitpar for pmag, rpmag fitpar = [0.17954163, -2.48748376,",
"tab = tab0 comps = comps0 # Main sequence parametrization",
"c['comp_ID'] col=tab['membership%s'%comp_ID] mask = col > pmin_membership t=tab[mask] if len(t)>100:",
"comps = comps0 # Main sequence parametrization # fitpar for",
"arcsec tab0['Gmag'] = Gmag comps0 = Table.read(comps_filename) tab = tab0",
"= lib.compnames colors = lib.colors ############################################ # Minimal probability required",
"= lib.data_filename comps_filename = lib.comps_filename compnames = lib.compnames colors =",
"12.9279348, -31.35434182, 38.31330583, -12.25864507] poly = np.poly1d(fitpar) x = np.linspace(1,",
"yms = y[m] xms = x[m] def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim):",
"4 yms = y[m] xms = x[m] def plot_MS_parametrisation_and_spectral_types(ax, xlim,",
"y > 4 yms = y[m] xms = x[m] def",
"m = y > 4 yms = y[m] xms =",
"# how to split subplots grid = [5, 5] #",
"ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO: adjust this if",
"[0.17954163, -2.48748376, 12.9279348, -31.35434182, 38.31330583, -12.25864507] poly = np.poly1d(fitpar) x",
"# M ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) return ax print('Plotting %d",
"[-1, 5] ylim = [17, -3] ############################################ # Read data",
"np.poly1d(fitpar) x = np.linspace(1, 4, 100) y = poly(x) m",
"adjust this if needed comp_ID = c['comp_ID'] col=tab['membership%s'%comp_ID] mask =",
"= y[m] xms = x[m] def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim): ax.plot(xms,",
"= np.linspace(1, 4, 100) y = poly(x) m = y",
"xlim = [-1, 5] ylim = [17, -3] ############################################ #",
"ylim = [17, -3] ############################################ # Read data try: tab",
"5] # CMD limits xlim = [-1, 5] ylim =",
"linestyle='--') ax.axvline(x=0.369, linewidth=0.5, color='k') # F ax.axvline(x=0.767, linewidth=0.5, color='k') #",
"import Table import matplotlib.pyplot as plt import matplotlib.cm as cm",
"#~ t.reverse() #~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'],",
"x = np.linspace(1, 4, 100) y = poly(x) m =",
"0.5 ############################################ # how to split subplots grid = [5,",
"membership pmin_membership = 0.5 ############################################ # how to split subplots",
"10) # tab['parallax'] in micro arcsec tab0['Gmag'] = Gmag comps0",
"linewidth=1, linestyle='--') ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag above",
"comps0 except: tab0 = Table.read(data_filename) Gmag = tab0['phot_g_mean_mag'] - 5",
"TODO: adjust this if needed comp_ID = c['comp_ID'] col=tab['membership%s'%comp_ID] mask",
"linewidth=0.5, color='k') # G ax.axvline(x=0.979, linewidth=0.5, color='k') # K ax.axvline(x=1.848,",
"plt import matplotlib.cm as cm plt.ion() # Pretty plots from",
"y[m] xms = x[m] def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim): ax.plot(xms, yms,",
"def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim): ax.plot(xms, yms, c='brown', label='Median main sequence',",
"# G ax.axvline(x=0.979, linewidth=0.5, color='k') # K ax.axvline(x=1.848, linewidth=0.5, color='k')",
"# Pretty plots from fig_settings import * ############################################ # Some",
"c='brown', label='Median main sequence', linewidth=1) ax.plot(xms, yms - 1, c='brown',",
"above the median', linewidth=1, linestyle='--') ax.plot(xms, yms - 1.5, c='brown',",
"tab0['Gmag'] = Gmag comps0 = Table.read(comps_filename) tab = tab0 comps",
"and we put # this into a single library to",
"c in enumerate(comps): ax = fig.add_subplot(grid[0], grid[1], i+1) # TODO:",
"yms, c='brown', label='Median main sequence', linewidth=1) ax.plot(xms, yms - 1,",
"Gmag comps0 = Table.read(comps_filename) tab = tab0 comps = comps0",
"how to split subplots grid = [5, 5] # CMD",
"comps = comps0 except: tab0 = Table.read(data_filename) Gmag = tab0['phot_g_mean_mag']",
"len(t)>100: alpha=0.5 else: alpha=1 t.sort('membership%s'%comp_ID) #~ t.reverse() #~ ax.scatter(t['bp_rp'], t['Gmag'],",
"- 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10)",
"except: tab0 = Table.read(data_filename) Gmag = tab0['phot_g_mean_mag'] - 5 *",
"M ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) return ax print('Plotting %d components.'%len(comps))",
"as plt import matplotlib.cm as cm plt.ion() # Pretty plots",
"components.'%len(comps)) fig=plt.figure() for i, c in enumerate(comps): ax = fig.add_subplot(grid[0],",
"ylim): ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1) ax.plot(xms, yms",
"5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) #",
"ax print('Plotting %d components.'%len(comps)) fig=plt.figure() for i, c in enumerate(comps):",
"grid = [5, 5] # CMD limits xlim = [-1,",
"100) y = poly(x) m = y > 4 yms",
"= c['comp_ID'] col=tab['membership%s'%comp_ID] mask = col > pmin_membership t=tab[mask] if",
"grid[1], i+1) # TODO: adjust this if needed comp_ID =",
"############################################ # Minimal probability required for membership pmin_membership = 0.5",
"5] ylim = [17, -3] ############################################ # Read data try:",
"this if needed comp_ID = c['comp_ID'] col=tab['membership%s'%comp_ID] mask = col",
"= tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3)",
"mag above the median', linewidth=1, linestyle='--') ax.axvline(x=0.369, linewidth=0.5, color='k') #",
"# tab['parallax'] in micro arcsec tab0['Gmag'] = Gmag comps0 =",
"for i, c in enumerate(comps): ax = fig.add_subplot(grid[0], grid[1], i+1)",
"from fig_settings import * ############################################ # Some things are the",
"to avoid confusion. import scocenlib as lib data_filename = lib.data_filename",
"c='brown', label='1 mag above the median', linewidth=1, linestyle='--') ax.plot(xms, yms",
"= np.poly1d(fitpar) x = np.linspace(1, 4, 100) y = poly(x)",
"lib.comps_filename compnames = lib.compnames colors = lib.colors ############################################ # Minimal",
"import * ############################################ # Some things are the same for",
"cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim) age=c['Age'] ax.set_title('%s (%.2f$\\pm$%.2f Myr %s) %d'%(comp_ID,",
"print('Plotting %d components.'%len(comps)) fig=plt.figure() for i, c in enumerate(comps): ax",
"fitpar for pmag, rpmag fitpar = [0.17954163, -2.48748376, 12.9279348, -31.35434182,",
"scripts and we put # this into a single library",
"alpha=1, vmin=0.5, vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim) age=c['Age'] ax.set_title('%s (%.2f$\\pm$%.2f",
"pmin_membership = 0.5 ############################################ # how to split subplots grid",
"single library to avoid confusion. import scocenlib as lib data_filename",
"[17, -3] ############################################ # Read data try: tab = tab0",
"plt.ion() # Pretty plots from fig_settings import * ############################################ #",
"xms = x[m] def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim): ax.plot(xms, yms, c='brown',",
"alpha=0.5 else: alpha=1 t.sort('membership%s'%comp_ID) #~ t.reverse() #~ ax.scatter(t['bp_rp'], t['Gmag'], s=1,",
"############################################ # Read data try: tab = tab0 comps =",
"comps0 # Main sequence parametrization # fitpar for pmag, rpmag",
"col=tab['membership%s'%comp_ID] mask = col > pmin_membership t=tab[mask] if len(t)>100: alpha=0.5",
"# CMD limits xlim = [-1, 5] ylim = [17,",
"Read data try: tab = tab0 comps = comps0 except:",
"needed comp_ID = c['comp_ID'] col=tab['membership%s'%comp_ID] mask = col > pmin_membership",
"as lib data_filename = lib.data_filename comps_filename = lib.comps_filename compnames =",
"median', linewidth=1, linestyle='--') ax.plot(xms, yms - 1.5, c='brown', label='1.5 mag",
"1e-3) / 10) # tab['parallax'] in micro arcsec tab0['Gmag'] =",
"tab0 comps = comps0 # Main sequence parametrization # fitpar",
"plots from fig_settings import * ############################################ # Some things are",
"CMD limits xlim = [-1, 5] ylim = [17, -3]",
"s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim) age=c['Age']",
"probability required for membership pmin_membership = 0.5 ############################################ # how",
"sequence', linewidth=1) ax.plot(xms, yms - 1, c='brown', label='1 mag above",
"all the plotting scripts and we put # this into",
"= 0.5 ############################################ # how to split subplots grid =",
"fig_settings import * ############################################ # Some things are the same",
"put # this into a single library to avoid confusion.",
"tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax'] * 1e-3) /",
"import scocenlib as lib data_filename = lib.data_filename comps_filename = lib.comps_filename",
"ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1,",
"* ############################################ # Some things are the same for all",
"= comps0 # Main sequence parametrization # fitpar for pmag,",
"Pretty plots from fig_settings import * ############################################ # Some things",
"astropy.table import Table import matplotlib.pyplot as plt import matplotlib.cm as",
"plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim): ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1)",
"= Table.read(data_filename) Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 /",
"# Minimal probability required for membership pmin_membership = 0.5 ############################################",
"above the median', linewidth=1, linestyle='--') ax.axvline(x=0.369, linewidth=0.5, color='k') # F",
"t.reverse() #~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'], s=1,",
"# K ax.axvline(x=1.848, linewidth=0.5, color='k') # M ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0],",
"the median', linewidth=1, linestyle='--') ax.plot(xms, yms - 1.5, c='brown', label='1.5",
"a single library to avoid confusion. import scocenlib as lib",
"Plot CMDs for each component. \"\"\" import numpy as np",
"############################################ # how to split subplots grid = [5, 5]",
"each component. \"\"\" import numpy as np from astropy.table import",
"lib.data_filename comps_filename = lib.comps_filename compnames = lib.compnames colors = lib.colors",
"(tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in micro arcsec",
"compnames = lib.compnames colors = lib.colors ############################################ # Minimal probability",
"linewidth=1, linestyle='--') ax.axvline(x=0.369, linewidth=0.5, color='k') # F ax.axvline(x=0.767, linewidth=0.5, color='k')",
"data try: tab = tab0 comps = comps0 except: tab0",
"xlim, ylim): ax.plot(xms, yms, c='brown', label='Median main sequence', linewidth=1) ax.plot(xms,",
"#~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID],",
"color='k') # G ax.axvline(x=0.979, linewidth=0.5, color='k') # K ax.axvline(x=1.848, linewidth=0.5,",
"= x[m] def plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim): ax.plot(xms, yms, c='brown', label='Median",
"label='1 mag above the median', linewidth=1, linestyle='--') ax.plot(xms, yms -",
"np.log10(1.0 / (tab0['parallax'] * 1e-3) / 10) # tab['parallax'] in",
"np.linspace(1, 4, 100) y = poly(x) m = y >",
"tab = tab0 comps = comps0 except: tab0 = Table.read(data_filename)",
"i+1) # TODO: adjust this if needed comp_ID = c['comp_ID']",
"Table.read(data_filename) Gmag = tab0['phot_g_mean_mag'] - 5 * np.log10(1.0 / (tab0['parallax']",
"t.sort('membership%s'%comp_ID) #~ t.reverse() #~ ax.scatter(t['bp_rp'], t['Gmag'], s=1, c='k', alpha=alpha) ax.scatter(t['bp_rp'],",
"(%.2f$\\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'], c['Age_reliable'], len(t))) #~ plt.tight_layout()",
"G ax.axvline(x=0.979, linewidth=0.5, color='k') # K ax.axvline(x=1.848, linewidth=0.5, color='k') #",
"we put # this into a single library to avoid",
"= comps0 except: tab0 = Table.read(data_filename) Gmag = tab0['phot_g_mean_mag'] -",
"cm plt.ion() # Pretty plots from fig_settings import * ############################################",
"F ax.axvline(x=0.767, linewidth=0.5, color='k') # G ax.axvline(x=0.979, linewidth=0.5, color='k') #",
"CMDs for each component. \"\"\" import numpy as np from",
"yms - 1.5, c='brown', label='1.5 mag above the median', linewidth=1,",
"comps_filename = lib.comps_filename compnames = lib.compnames colors = lib.colors ############################################",
"s=1, c='k', alpha=alpha) ax.scatter(t['bp_rp'], t['Gmag'], s=1, c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1,",
"c=t['membership%s'%comp_ID], alpha=1, vmin=0.5, vmax=1, cmap=cm.jet) ax=plot_MS_parametrisation_and_spectral_types(ax, xlim, ylim) age=c['Age'] ax.set_title('%s",
"poly(x) m = y > 4 yms = y[m] xms",
"if len(t)>100: alpha=0.5 else: alpha=1 t.sort('membership%s'%comp_ID) #~ t.reverse() #~ ax.scatter(t['bp_rp'],",
"xlim, ylim) age=c['Age'] ax.set_title('%s (%.2f$\\pm$%.2f Myr %s) %d'%(comp_ID, age, c['Crossing_time'],",
"tab['parallax'] in micro arcsec tab0['Gmag'] = Gmag comps0 = Table.read(comps_filename)",
"col > pmin_membership t=tab[mask] if len(t)>100: alpha=0.5 else: alpha=1 t.sort('membership%s'%comp_ID)",
"this into a single library to avoid confusion. import scocenlib"
] |